1 /*
2 * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/debugInfoRec.hpp"
30 #include "code/nativeInst.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/gcLocker.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "logging/log.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/klass.inline.hpp"
40 #include "prims/methodHandles.hpp"
41 #include "runtime/jniHandles.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/signature.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/vframeArray.hpp"
47 #include "runtime/vm_version.hpp"
48 #include "utilities/align.hpp"
49 #include "vmreg_x86.inline.hpp"
50 #ifdef COMPILER1
51 #include "c1/c1_Runtime1.hpp"
52 #endif
53 #ifdef COMPILER2
54 #include "opto/runtime.hpp"
55 #endif
56
57 #define __ masm->
58
59 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
60
61 class RegisterSaver {
62 // Capture info about frame layout
63 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
64 enum layout {
65 fpu_state_off = 0,
66 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
67 st0_off, st0H_off,
68 st1_off, st1H_off,
69 st2_off, st2H_off,
70 st3_off, st3H_off,
71 st4_off, st4H_off,
72 st5_off, st5H_off,
73 st6_off, st6H_off,
74 st7_off, st7H_off,
75 xmm_off,
76 DEF_XMM_OFFS(0),
77 DEF_XMM_OFFS(1),
78 DEF_XMM_OFFS(2),
79 DEF_XMM_OFFS(3),
80 DEF_XMM_OFFS(4),
81 DEF_XMM_OFFS(5),
82 DEF_XMM_OFFS(6),
83 DEF_XMM_OFFS(7),
84 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
85 rdi_off,
86 rsi_off,
87 ignore_off, // extra copy of rbp,
88 rsp_off,
89 rbx_off,
90 rdx_off,
91 rcx_off,
92 rax_off,
93 // The frame sender code expects that rbp will be in the "natural" place and
94 // will override any oopMap setting for it. We must therefore force the layout
95 // so that it agrees with the frame sender code.
96 rbp_off,
97 return_off, // slot for return address
98 reg_save_size };
99 enum { FPU_regs_live = flags_off - fpu_state_end };
100
101 public:
102
103 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
104 int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
105 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
106
107 static int rax_offset() { return rax_off; }
108 static int rbx_offset() { return rbx_off; }
109
110 // Offsets into the register save area
111 // Used by deoptimization when it is managing result register
112 // values on its own
113
114 static int raxOffset(void) { return rax_off; }
115 static int rdxOffset(void) { return rdx_off; }
116 static int rbxOffset(void) { return rbx_off; }
117 static int xmm0Offset(void) { return xmm0_off; }
118 // This really returns a slot in the fp save area, which one is not important
119 static int fpResultOffset(void) { return st0_off; }
120
121 // During deoptimization only the result register need to be restored
122 // all the other values have already been extracted.
123
124 static void restore_result_registers(MacroAssembler* masm);
125
126 };
127
128 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
129 int* total_frame_words, bool verify_fpu, bool save_vectors) {
130 int num_xmm_regs = XMMRegister::number_of_registers;
131 int ymm_bytes = num_xmm_regs * 16;
132 int zmm_bytes = num_xmm_regs * 32;
133 #ifdef COMPILER2
134 int opmask_state_bytes = KRegister::number_of_registers * 8;
135 if (save_vectors) {
136 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
137 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
138 // Save upper half of YMM registers
139 int vect_bytes = ymm_bytes;
140 if (UseAVX > 2) {
141 // Save upper half of ZMM registers as well
142 vect_bytes += zmm_bytes;
143 additional_frame_words += opmask_state_bytes / wordSize;
144 }
145 additional_frame_words += vect_bytes / wordSize;
146 }
147 #else
148 assert(!save_vectors, "vectors are generated only by C2");
149 #endif
150 int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
151 int frame_words = frame_size_in_bytes / wordSize;
152 *total_frame_words = frame_words;
153
154 assert(FPUStateSizeInWords == 27, "update stack layout");
155
156 // save registers, fpu state, and flags
157 // We assume caller has already has return address slot on the stack
158 // We push epb twice in this sequence because we want the real rbp,
159 // to be under the return like a normal enter and we want to use pusha
160 // We push by hand instead of using push.
161 __ enter();
162 __ pusha();
163 __ pushf();
164 __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
165 __ push_FPU_state(); // Save FPU state & init
166
167 if (verify_fpu) {
168 // Some stubs may have non standard FPU control word settings so
169 // only check and reset the value when it required to be the
170 // standard value. The safepoint blob in particular can be used
171 // in methods which are using the 24 bit control word for
172 // optimized float math.
173
174 #ifdef ASSERT
175 // Make sure the control word has the expected value
176 Label ok;
177 __ cmpw(Address(rsp, 0), StubRoutines::x86::fpu_cntrl_wrd_std());
178 __ jccb(Assembler::equal, ok);
179 __ stop("corrupted control word detected");
180 __ bind(ok);
181 #endif
182
183 // Reset the control word to guard against exceptions being unmasked
184 // since fstp_d can cause FPU stack underflow exceptions. Write it
185 // into the on stack copy and then reload that to make sure that the
186 // current and future values are correct.
187 __ movw(Address(rsp, 0), StubRoutines::x86::fpu_cntrl_wrd_std());
188 }
189
190 __ frstor(Address(rsp, 0));
191 if (!verify_fpu) {
192 // Set the control word so that exceptions are masked for the
193 // following code.
194 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
195 }
196
197 int off = st0_off;
198 int delta = st1_off - off;
199
200 // Save the FPU registers in de-opt-able form
201 for (int n = 0; n < FloatRegister::number_of_registers; n++) {
202 __ fstp_d(Address(rsp, off*wordSize));
203 off += delta;
204 }
205
206 off = xmm0_off;
207 delta = xmm1_off - off;
208 if(UseSSE == 1) {
209 // Save the XMM state
210 for (int n = 0; n < num_xmm_regs; n++) {
211 __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
212 off += delta;
213 }
214 } else if(UseSSE >= 2) {
215 // Save whole 128bit (16 bytes) XMM registers
216 for (int n = 0; n < num_xmm_regs; n++) {
217 __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
218 off += delta;
219 }
220 }
221
222 #ifdef COMPILER2
223 if (save_vectors) {
224 __ subptr(rsp, ymm_bytes);
225 // Save upper half of YMM registers
226 for (int n = 0; n < num_xmm_regs; n++) {
227 __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
228 }
229 if (UseAVX > 2) {
230 __ subptr(rsp, zmm_bytes);
231 // Save upper half of ZMM registers
232 for (int n = 0; n < num_xmm_regs; n++) {
233 __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
234 }
235 __ subptr(rsp, opmask_state_bytes);
236 // Save opmask registers
237 for (int n = 0; n < KRegister::number_of_registers; n++) {
238 __ kmov(Address(rsp, n*8), as_KRegister(n));
239 }
240 }
241 }
242 #else
243 assert(!save_vectors, "vectors are generated only by C2");
244 #endif
245
246 __ vzeroupper();
247
248 // Set an oopmap for the call site. This oopmap will map all
249 // oop-registers and debug-info registers as callee-saved. This
250 // will allow deoptimization at this safepoint to find all possible
251 // debug-info recordings, as well as let GC find all oops.
252
253 OopMapSet *oop_maps = new OopMapSet();
254 OopMap* map = new OopMap( frame_words, 0 );
255
256 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
257 #define NEXTREG(x) (x)->as_VMReg()->next()
258
259 map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
260 map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
261 map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
262 map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
263 // rbp, location is known implicitly, no oopMap
264 map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
265 map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
266
267 // %%% This is really a waste but we'll keep things as they were for now for the upper component
268 off = st0_off;
269 delta = st1_off - off;
270 for (int n = 0; n < FloatRegister::number_of_registers; n++) {
271 FloatRegister freg_name = as_FloatRegister(n);
272 map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
273 map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
274 off += delta;
275 }
276 off = xmm0_off;
277 delta = xmm1_off - off;
278 for (int n = 0; n < num_xmm_regs; n++) {
279 XMMRegister xmm_name = as_XMMRegister(n);
280 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
281 map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
282 off += delta;
283 }
284 #undef NEXTREG
285 #undef STACK_OFFSET
286
287 return map;
288 }
289
290 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
291 int opmask_state_bytes = 0;
292 int additional_frame_bytes = 0;
293 int num_xmm_regs = XMMRegister::number_of_registers;
294 int ymm_bytes = num_xmm_regs * 16;
295 int zmm_bytes = num_xmm_regs * 32;
296 // Recover XMM & FPU state
297 #ifdef COMPILER2
298 if (restore_vectors) {
299 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
300 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
301 // Save upper half of YMM registers
302 additional_frame_bytes = ymm_bytes;
303 if (UseAVX > 2) {
304 // Save upper half of ZMM registers as well
305 additional_frame_bytes += zmm_bytes;
306 opmask_state_bytes = KRegister::number_of_registers * 8;
307 additional_frame_bytes += opmask_state_bytes;
308 }
309 }
310 #else
311 assert(!restore_vectors, "vectors are generated only by C2");
312 #endif
313
314 int off = xmm0_off;
315 int delta = xmm1_off - off;
316
317 __ vzeroupper();
318
319 if (UseSSE == 1) {
320 // Restore XMM registers
321 assert(additional_frame_bytes == 0, "");
322 for (int n = 0; n < num_xmm_regs; n++) {
323 __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
324 off += delta;
325 }
326 } else if (UseSSE >= 2) {
327 // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
328 // ZMM because the movdqu instruction zeros the upper part of the XMM register.
329 for (int n = 0; n < num_xmm_regs; n++) {
330 __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
331 off += delta;
332 }
333 }
334
335 if (restore_vectors) {
336 off = additional_frame_bytes - ymm_bytes;
337 // Restore upper half of YMM registers.
338 for (int n = 0; n < num_xmm_regs; n++) {
339 __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16+off));
340 }
341 if (UseAVX > 2) {
342 // Restore upper half of ZMM registers.
343 off = opmask_state_bytes;
344 for (int n = 0; n < num_xmm_regs; n++) {
345 __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32+off));
346 }
347 for (int n = 0; n < KRegister::number_of_registers; n++) {
348 __ kmov(as_KRegister(n), Address(rsp, n*8));
349 }
350 }
351 __ addptr(rsp, additional_frame_bytes);
352 }
353
354 __ pop_FPU_state();
355 __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
356
357 __ popf();
358 __ popa();
359 // Get the rbp, described implicitly by the frame sender code (no oopMap)
360 __ pop(rbp);
361 }
362
363 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
364
365 // Just restore result register. Only used by deoptimization. By
366 // now any callee save register that needs to be restore to a c2
367 // caller of the deoptee has been extracted into the vframeArray
368 // and will be stuffed into the c2i adapter we create for later
369 // restoration so only result registers need to be restored here.
370 //
371
372 __ frstor(Address(rsp, 0)); // Restore fpu state
373
374 // Recover XMM & FPU state
375 if( UseSSE == 1 ) {
376 __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
377 } else if( UseSSE >= 2 ) {
378 __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
379 }
380 __ movptr(rax, Address(rsp, rax_off*wordSize));
381 __ movptr(rdx, Address(rsp, rdx_off*wordSize));
382 // Pop all of the register save are off the stack except the return address
383 __ addptr(rsp, return_off * wordSize);
384 }
385
386 // Is vector's size (in bytes) bigger than a size saved by default?
387 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
388 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
389 bool SharedRuntime::is_wide_vector(int size) {
390 return size > 16;
391 }
392
393 // The java_calling_convention describes stack locations as ideal slots on
394 // a frame with no abi restrictions. Since we must observe abi restrictions
395 // (like the placement of the register window) the slots must be biased by
396 // the following value.
397 static int reg2offset_in(VMReg r) {
398 // Account for saved rbp, and return address
399 // This should really be in_preserve_stack_slots
400 return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
401 }
402
403 static int reg2offset_out(VMReg r) {
404 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
405 }
406
407 // ---------------------------------------------------------------------------
408 // Read the array of BasicTypes from a signature, and compute where the
409 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
410 // quantities. Values less than SharedInfo::stack0 are registers, those above
411 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
412 // as framesizes are fixed.
413 // VMRegImpl::stack0 refers to the first slot 0(sp).
414 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
415 // Register up to Register::number_of_registers are the 32-bit
416 // integer registers.
417
418 // Pass first two oop/int args in registers ECX and EDX.
419 // Pass first two float/double args in registers XMM0 and XMM1.
420 // Doubles have precedence, so if you pass a mix of floats and doubles
421 // the doubles will grab the registers before the floats will.
422
423 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
424 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
425 // units regardless of build. Of course for i486 there is no 64 bit build
426
427
428 // ---------------------------------------------------------------------------
429 // The compiled Java calling convention.
430 // Pass first two oop/int args in registers ECX and EDX.
431 // Pass first two float/double args in registers XMM0 and XMM1.
432 // Doubles have precedence, so if you pass a mix of floats and doubles
433 // the doubles will grab the registers before the floats will.
434 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
435 VMRegPair *regs,
436 int total_args_passed) {
437 uint stack = 0; // Starting stack position for args on stack
438
439
440 // Pass first two oop/int args in registers ECX and EDX.
441 uint reg_arg0 = 9999;
442 uint reg_arg1 = 9999;
443
444 // Pass first two float/double args in registers XMM0 and XMM1.
445 // Doubles have precedence, so if you pass a mix of floats and doubles
446 // the doubles will grab the registers before the floats will.
447 // CNC - TURNED OFF FOR non-SSE.
448 // On Intel we have to round all doubles (and most floats) at
449 // call sites by storing to the stack in any case.
450 // UseSSE=0 ==> Don't Use ==> 9999+0
451 // UseSSE=1 ==> Floats only ==> 9999+1
452 // UseSSE>=2 ==> Floats or doubles ==> 9999+2
453 enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
454 uint fargs = (UseSSE>=2) ? 2 : UseSSE;
455 uint freg_arg0 = 9999+fargs;
456 uint freg_arg1 = 9999+fargs;
457
458 // Pass doubles & longs aligned on the stack. First count stack slots for doubles
459 int i;
460 for( i = 0; i < total_args_passed; i++) {
461 if( sig_bt[i] == T_DOUBLE ) {
462 // first 2 doubles go in registers
463 if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
464 else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
465 else // Else double is passed low on the stack to be aligned.
466 stack += 2;
467 } else if( sig_bt[i] == T_LONG ) {
468 stack += 2;
469 }
470 }
471 int dstack = 0; // Separate counter for placing doubles
472
473 // Now pick where all else goes.
474 for( i = 0; i < total_args_passed; i++) {
475 // From the type and the argument number (count) compute the location
476 switch( sig_bt[i] ) {
477 case T_SHORT:
478 case T_CHAR:
479 case T_BYTE:
480 case T_BOOLEAN:
481 case T_INT:
482 case T_ARRAY:
483 case T_OBJECT:
484 case T_ADDRESS:
485 if( reg_arg0 == 9999 ) {
486 reg_arg0 = i;
487 regs[i].set1(rcx->as_VMReg());
488 } else if( reg_arg1 == 9999 ) {
489 reg_arg1 = i;
490 regs[i].set1(rdx->as_VMReg());
491 } else {
492 regs[i].set1(VMRegImpl::stack2reg(stack++));
493 }
494 break;
495 case T_FLOAT:
496 if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
497 freg_arg0 = i;
498 regs[i].set1(xmm0->as_VMReg());
499 } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
500 freg_arg1 = i;
501 regs[i].set1(xmm1->as_VMReg());
502 } else {
503 regs[i].set1(VMRegImpl::stack2reg(stack++));
504 }
505 break;
506 case T_LONG:
507 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
508 regs[i].set2(VMRegImpl::stack2reg(dstack));
509 dstack += 2;
510 break;
511 case T_DOUBLE:
512 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
513 if( freg_arg0 == (uint)i ) {
514 regs[i].set2(xmm0->as_VMReg());
515 } else if( freg_arg1 == (uint)i ) {
516 regs[i].set2(xmm1->as_VMReg());
517 } else {
518 regs[i].set2(VMRegImpl::stack2reg(dstack));
519 dstack += 2;
520 }
521 break;
522 case T_VOID: regs[i].set_bad(); break;
523 break;
524 default:
525 ShouldNotReachHere();
526 break;
527 }
528 }
529
530 return stack;
531 }
532
533 // Patch the callers callsite with entry to compiled code if it exists.
534 static void patch_callers_callsite(MacroAssembler *masm) {
535 Label L;
536 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
537 __ jcc(Assembler::equal, L);
538 // Schedule the branch target address early.
539 // Call into the VM to patch the caller, then jump to compiled callee
540 // rax, isn't live so capture return address while we easily can
541 __ movptr(rax, Address(rsp, 0));
542 __ pusha();
543 __ pushf();
544
545 if (UseSSE == 1) {
546 __ subptr(rsp, 2*wordSize);
547 __ movflt(Address(rsp, 0), xmm0);
548 __ movflt(Address(rsp, wordSize), xmm1);
549 }
550 if (UseSSE >= 2) {
551 __ subptr(rsp, 4*wordSize);
552 __ movdbl(Address(rsp, 0), xmm0);
553 __ movdbl(Address(rsp, 2*wordSize), xmm1);
554 }
555 #ifdef COMPILER2
556 // C2 may leave the stack dirty if not in SSE2+ mode
557 if (UseSSE >= 2) {
558 __ verify_FPU(0, "c2i transition should have clean FPU stack");
559 } else {
560 __ empty_FPU_stack();
561 }
562 #endif /* COMPILER2 */
563
564 // VM needs caller's callsite
565 __ push(rax);
566 // VM needs target method
567 __ push(rbx);
568 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
569 __ addptr(rsp, 2*wordSize);
570
571 if (UseSSE == 1) {
572 __ movflt(xmm0, Address(rsp, 0));
573 __ movflt(xmm1, Address(rsp, wordSize));
574 __ addptr(rsp, 2*wordSize);
575 }
576 if (UseSSE >= 2) {
577 __ movdbl(xmm0, Address(rsp, 0));
578 __ movdbl(xmm1, Address(rsp, 2*wordSize));
579 __ addptr(rsp, 4*wordSize);
580 }
581
582 __ popf();
583 __ popa();
584 __ bind(L);
585 }
586
587
588 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
589 int next_off = st_off - Interpreter::stackElementSize;
590 __ movdbl(Address(rsp, next_off), r);
591 }
592
593 static void gen_c2i_adapter(MacroAssembler *masm,
594 int total_args_passed,
595 int comp_args_on_stack,
596 const BasicType *sig_bt,
597 const VMRegPair *regs,
598 Label& skip_fixup) {
599 // Before we get into the guts of the C2I adapter, see if we should be here
600 // at all. We've come from compiled code and are attempting to jump to the
601 // interpreter, which means the caller made a static call to get here
602 // (vcalls always get a compiled target if there is one). Check for a
603 // compiled target. If there is one, we need to patch the caller's call.
604 patch_callers_callsite(masm);
605
606 __ bind(skip_fixup);
607
608 #ifdef COMPILER2
609 // C2 may leave the stack dirty if not in SSE2+ mode
610 if (UseSSE >= 2) {
611 __ verify_FPU(0, "c2i transition should have clean FPU stack");
612 } else {
613 __ empty_FPU_stack();
614 }
615 #endif /* COMPILER2 */
616
617 // Since all args are passed on the stack, total_args_passed * interpreter_
618 // stack_element_size is the
619 // space we need.
620 int extraspace = total_args_passed * Interpreter::stackElementSize;
621
622 // Get return address
623 __ pop(rax);
624
625 // set senderSP value
626 __ movptr(rsi, rsp);
627
628 __ subptr(rsp, extraspace);
629
630 // Now write the args into the outgoing interpreter space
631 for (int i = 0; i < total_args_passed; i++) {
632 if (sig_bt[i] == T_VOID) {
633 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
634 continue;
635 }
636
637 // st_off points to lowest address on stack.
638 int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
639 int next_off = st_off - Interpreter::stackElementSize;
640
641 // Say 4 args:
642 // i st_off
643 // 0 12 T_LONG
644 // 1 8 T_VOID
645 // 2 4 T_OBJECT
646 // 3 0 T_BOOL
647 VMReg r_1 = regs[i].first();
648 VMReg r_2 = regs[i].second();
649 if (!r_1->is_valid()) {
650 assert(!r_2->is_valid(), "");
651 continue;
652 }
653
654 if (r_1->is_stack()) {
655 // memory to memory use fpu stack top
656 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
657
658 if (!r_2->is_valid()) {
659 __ movl(rdi, Address(rsp, ld_off));
660 __ movptr(Address(rsp, st_off), rdi);
661 } else {
662
663 // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
664 // st_off == MSW, st_off-wordSize == LSW
665
666 __ movptr(rdi, Address(rsp, ld_off));
667 __ movptr(Address(rsp, next_off), rdi);
668 __ movptr(rdi, Address(rsp, ld_off + wordSize));
669 __ movptr(Address(rsp, st_off), rdi);
670 }
671 } else if (r_1->is_Register()) {
672 Register r = r_1->as_Register();
673 if (!r_2->is_valid()) {
674 __ movl(Address(rsp, st_off), r);
675 } else {
676 // long/double in gpr
677 ShouldNotReachHere();
678 }
679 } else {
680 assert(r_1->is_XMMRegister(), "");
681 if (!r_2->is_valid()) {
682 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
683 } else {
684 assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
685 move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
686 }
687 }
688 }
689
690 // Schedule the branch target address early.
691 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
692 // And repush original return address
693 __ push(rax);
694 __ jmp(rcx);
695 }
696
697
698 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
699 int next_val_off = ld_off - Interpreter::stackElementSize;
700 __ movdbl(r, Address(saved_sp, next_val_off));
701 }
702
703 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
704 address code_start, address code_end,
705 Label& L_ok) {
706 Label L_fail;
707 __ lea(temp_reg, ExternalAddress(code_start));
708 __ cmpptr(pc_reg, temp_reg);
709 __ jcc(Assembler::belowEqual, L_fail);
710 __ lea(temp_reg, ExternalAddress(code_end));
711 __ cmpptr(pc_reg, temp_reg);
712 __ jcc(Assembler::below, L_ok);
713 __ bind(L_fail);
714 }
715
716 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
717 int total_args_passed,
718 int comp_args_on_stack,
719 const BasicType *sig_bt,
720 const VMRegPair *regs) {
721 // Note: rsi contains the senderSP on entry. We must preserve it since
722 // we may do a i2c -> c2i transition if we lose a race where compiled
723 // code goes non-entrant while we get args ready.
724
725 // Adapters can be frameless because they do not require the caller
726 // to perform additional cleanup work, such as correcting the stack pointer.
727 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
728 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
729 // even if a callee has modified the stack pointer.
730 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
731 // routinely repairs its caller's stack pointer (from sender_sp, which is set
732 // up via the senderSP register).
733 // In other words, if *either* the caller or callee is interpreted, we can
734 // get the stack pointer repaired after a call.
735 // This is why c2i and i2c adapters cannot be indefinitely composed.
736 // In particular, if a c2i adapter were to somehow call an i2c adapter,
737 // both caller and callee would be compiled methods, and neither would
738 // clean up the stack pointer changes performed by the two adapters.
739 // If this happens, control eventually transfers back to the compiled
740 // caller, but with an uncorrected stack, causing delayed havoc.
741
742 // Pick up the return address
743 __ movptr(rax, Address(rsp, 0));
744
745 if (VerifyAdapterCalls &&
746 (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
747 // So, let's test for cascading c2i/i2c adapters right now.
748 // assert(Interpreter::contains($return_addr) ||
749 // StubRoutines::contains($return_addr),
750 // "i2c adapter must return to an interpreter frame");
751 __ block_comment("verify_i2c { ");
752 Label L_ok;
753 if (Interpreter::code() != nullptr) {
754 range_check(masm, rax, rdi,
755 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
756 L_ok);
757 }
758 if (StubRoutines::initial_stubs_code() != nullptr) {
759 range_check(masm, rax, rdi,
760 StubRoutines::initial_stubs_code()->code_begin(),
761 StubRoutines::initial_stubs_code()->code_end(),
762 L_ok);
763 }
764 if (StubRoutines::final_stubs_code() != nullptr) {
765 range_check(masm, rax, rdi,
766 StubRoutines::final_stubs_code()->code_begin(),
767 StubRoutines::final_stubs_code()->code_end(),
768 L_ok);
769 }
770 const char* msg = "i2c adapter must return to an interpreter frame";
771 __ block_comment(msg);
772 __ stop(msg);
773 __ bind(L_ok);
774 __ block_comment("} verify_i2ce ");
775 }
776
777 // Must preserve original SP for loading incoming arguments because
778 // we need to align the outgoing SP for compiled code.
779 __ movptr(rdi, rsp);
780
781 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
782 // in registers, we will occasionally have no stack args.
783 int comp_words_on_stack = 0;
784 if (comp_args_on_stack) {
785 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
786 // registers are below. By subtracting stack0, we either get a negative
787 // number (all values in registers) or the maximum stack slot accessed.
788 // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
789 // Convert 4-byte stack slots to words.
790 comp_words_on_stack = align_up(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
791 // Round up to miminum stack alignment, in wordSize
792 comp_words_on_stack = align_up(comp_words_on_stack, 2);
793 __ subptr(rsp, comp_words_on_stack * wordSize);
794 }
795
796 // Align the outgoing SP
797 __ andptr(rsp, -(StackAlignmentInBytes));
798
799 // push the return address on the stack (note that pushing, rather
800 // than storing it, yields the correct frame alignment for the callee)
801 __ push(rax);
802
803 // Put saved SP in another register
804 const Register saved_sp = rax;
805 __ movptr(saved_sp, rdi);
806
807
808 // Will jump to the compiled code just as if compiled code was doing it.
809 // Pre-load the register-jump target early, to schedule it better.
810 __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
811
812 // Now generate the shuffle code. Pick up all register args and move the
813 // rest through the floating point stack top.
814 for (int i = 0; i < total_args_passed; i++) {
815 if (sig_bt[i] == T_VOID) {
816 // Longs and doubles are passed in native word order, but misaligned
817 // in the 32-bit build.
818 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
819 continue;
820 }
821
822 // Pick up 0, 1 or 2 words from SP+offset.
823
824 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
825 "scrambled load targets?");
826 // Load in argument order going down.
827 int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
828 // Point to interpreter value (vs. tag)
829 int next_off = ld_off - Interpreter::stackElementSize;
830 //
831 //
832 //
833 VMReg r_1 = regs[i].first();
834 VMReg r_2 = regs[i].second();
835 if (!r_1->is_valid()) {
836 assert(!r_2->is_valid(), "");
837 continue;
838 }
839 if (r_1->is_stack()) {
840 // Convert stack slot to an SP offset (+ wordSize to account for return address )
841 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
842
843 // We can use rsi as a temp here because compiled code doesn't need rsi as an input
844 // and if we end up going thru a c2i because of a miss a reasonable value of rsi
845 // we be generated.
846 if (!r_2->is_valid()) {
847 // __ fld_s(Address(saved_sp, ld_off));
848 // __ fstp_s(Address(rsp, st_off));
849 __ movl(rsi, Address(saved_sp, ld_off));
850 __ movptr(Address(rsp, st_off), rsi);
851 } else {
852 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
853 // are accessed as negative so LSW is at LOW address
854
855 // ld_off is MSW so get LSW
856 // st_off is LSW (i.e. reg.first())
857 // __ fld_d(Address(saved_sp, next_off));
858 // __ fstp_d(Address(rsp, st_off));
859 //
860 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
861 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
862 // So we must adjust where to pick up the data to match the interpreter.
863 //
864 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
865 // are accessed as negative so LSW is at LOW address
866
867 // ld_off is MSW so get LSW
868 __ movptr(rsi, Address(saved_sp, next_off));
869 __ movptr(Address(rsp, st_off), rsi);
870 __ movptr(rsi, Address(saved_sp, ld_off));
871 __ movptr(Address(rsp, st_off + wordSize), rsi);
872 }
873 } else if (r_1->is_Register()) { // Register argument
874 Register r = r_1->as_Register();
875 assert(r != rax, "must be different");
876 if (r_2->is_valid()) {
877 //
878 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
879 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
880 // So we must adjust where to pick up the data to match the interpreter.
881
882 // this can be a misaligned move
883 __ movptr(r, Address(saved_sp, next_off));
884 assert(r_2->as_Register() != rax, "need another temporary register");
885 // Remember r_1 is low address (and LSB on x86)
886 // So r_2 gets loaded from high address regardless of the platform
887 __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
888 } else {
889 __ movl(r, Address(saved_sp, ld_off));
890 }
891 } else {
892 assert(r_1->is_XMMRegister(), "");
893 if (!r_2->is_valid()) {
894 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
895 } else {
896 move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
897 }
898 }
899 }
900
901 // 6243940 We might end up in handle_wrong_method if
902 // the callee is deoptimized as we race thru here. If that
903 // happens we don't want to take a safepoint because the
904 // caller frame will look interpreted and arguments are now
905 // "compiled" so it is much better to make this transition
906 // invisible to the stack walking code. Unfortunately if
907 // we try and find the callee by normal means a safepoint
908 // is possible. So we stash the desired callee in the thread
909 // and the vm will find there should this case occur.
910
911 __ get_thread(rax);
912 __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
913
914 // move Method* to rax, in case we end up in an c2i adapter.
915 // the c2i adapters expect Method* in rax, (c2) because c2's
916 // resolve stubs return the result (the method) in rax,.
917 // I'd love to fix this.
918 __ mov(rax, rbx);
919
920 __ jmp(rdi);
921 }
922
923 // ---------------------------------------------------------------
924 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
925 int total_args_passed,
926 int comp_args_on_stack,
927 const BasicType *sig_bt,
928 const VMRegPair *regs,
929 AdapterFingerPrint* fingerprint) {
930 address i2c_entry = __ pc();
931
932 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
933
934 // -------------------------------------------------------------------------
935 // Generate a C2I adapter. On entry we know rbx, holds the Method* during calls
936 // to the interpreter. The args start out packed in the compiled layout. They
937 // need to be unpacked into the interpreter layout. This will almost always
938 // require some stack space. We grow the current (compiled) stack, then repack
939 // the args. We finally end in a jump to the generic interpreter entry point.
940 // On exit from the interpreter, the interpreter will restore our SP (lest the
941 // compiled code, which relies solely on SP and not EBP, get sick).
942
943 address c2i_unverified_entry = __ pc();
944 Label skip_fixup;
945
946 Register data = rax;
947 Register receiver = rcx;
948 Register temp = rbx;
949
950 {
951 __ ic_check(1 /* end_alignment */);
952 __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
953 // Method might have been compiled since the call site was patched to
954 // interpreted if that is the case treat it as a miss so we can get
955 // the call site corrected.
956 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
957 __ jcc(Assembler::equal, skip_fixup);
958 }
959
960 address c2i_entry = __ pc();
961
962 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
963 bs->c2i_entry_barrier(masm);
964
965 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
966
967 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
968 }
969
970 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
971 VMRegPair *regs,
972 int total_args_passed) {
973
974 // We return the amount of VMRegImpl stack slots we need to reserve for all
975 // the arguments NOT counting out_preserve_stack_slots.
976
977 uint stack = 0; // All arguments on stack
978
979 for( int i = 0; i < total_args_passed; i++) {
980 // From the type and the argument number (count) compute the location
981 switch( sig_bt[i] ) {
982 case T_BOOLEAN:
983 case T_CHAR:
984 case T_FLOAT:
985 case T_BYTE:
986 case T_SHORT:
987 case T_INT:
988 case T_OBJECT:
989 case T_ARRAY:
990 case T_ADDRESS:
991 case T_METADATA:
992 regs[i].set1(VMRegImpl::stack2reg(stack++));
993 break;
994 case T_LONG:
995 case T_DOUBLE: // The stack numbering is reversed from Java
996 // Since C arguments do not get reversed, the ordering for
997 // doubles on the stack must be opposite the Java convention
998 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
999 regs[i].set2(VMRegImpl::stack2reg(stack));
1000 stack += 2;
1001 break;
1002 case T_VOID: regs[i].set_bad(); break;
1003 default:
1004 ShouldNotReachHere();
1005 break;
1006 }
1007 }
1008 return stack;
1009 }
1010
1011 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1012 uint num_bits,
1013 uint total_args_passed) {
1014 Unimplemented();
1015 return 0;
1016 }
1017
1018 // A simple move of integer like type
1019 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1020 if (src.first()->is_stack()) {
1021 if (dst.first()->is_stack()) {
1022 // stack to stack
1023 // __ ld(FP, reg2offset(src.first()), L5);
1024 // __ st(L5, SP, reg2offset(dst.first()));
1025 __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1026 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1027 } else {
1028 // stack to reg
1029 __ movl2ptr(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1030 }
1031 } else if (dst.first()->is_stack()) {
1032 // reg to stack
1033 // no need to sign extend on 64bit
1034 __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1035 } else {
1036 if (dst.first() != src.first()) {
1037 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1038 }
1039 }
1040 }
1041
1042 // An oop arg. Must pass a handle not the oop itself
1043 static void object_move(MacroAssembler* masm,
1044 OopMap* map,
1045 int oop_handle_offset,
1046 int framesize_in_slots,
1047 VMRegPair src,
1048 VMRegPair dst,
1049 bool is_receiver,
1050 int* receiver_offset) {
1051
1052 // Because of the calling conventions we know that src can be a
1053 // register or a stack location. dst can only be a stack location.
1054
1055 assert(dst.first()->is_stack(), "must be stack");
1056 // must pass a handle. First figure out the location we use as a handle
1057
1058 if (src.first()->is_stack()) {
1059 // Oop is already on the stack as an argument
1060 Register rHandle = rax;
1061 Label nil;
1062 __ xorptr(rHandle, rHandle);
1063 __ cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
1064 __ jcc(Assembler::equal, nil);
1065 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1066 __ bind(nil);
1067 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1068
1069 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1070 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1071 if (is_receiver) {
1072 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1073 }
1074 } else {
1075 // Oop is in a register we must store it to the space we reserve
1076 // on the stack for oop_handles
1077 const Register rOop = src.first()->as_Register();
1078 const Register rHandle = rax;
1079 int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1080 int offset = oop_slot*VMRegImpl::stack_slot_size;
1081 Label skip;
1082 __ movptr(Address(rsp, offset), rOop);
1083 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1084 __ xorptr(rHandle, rHandle);
1085 __ cmpptr(rOop, NULL_WORD);
1086 __ jcc(Assembler::equal, skip);
1087 __ lea(rHandle, Address(rsp, offset));
1088 __ bind(skip);
1089 // Store the handle parameter
1090 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1091 if (is_receiver) {
1092 *receiver_offset = offset;
1093 }
1094 }
1095 }
1096
1097 // A float arg may have to do float reg int reg conversion
1098 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1099 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1100
1101 // Because of the calling convention we know that src is either a stack location
1102 // or an xmm register. dst can only be a stack location.
1103
1104 assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1105
1106 if (src.first()->is_stack()) {
1107 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1108 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1109 } else {
1110 // reg to stack
1111 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1112 }
1113 }
1114
1115 // A long move
1116 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1117
1118 // The only legal possibility for a long_move VMRegPair is:
1119 // 1: two stack slots (possibly unaligned)
1120 // as neither the java or C calling convention will use registers
1121 // for longs.
1122
1123 if (src.first()->is_stack() && dst.first()->is_stack()) {
1124 assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1125 __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1126 __ movptr(rbx, Address(rbp, reg2offset_in(src.second())));
1127 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1128 __ movptr(Address(rsp, reg2offset_out(dst.second())), rbx);
1129 } else {
1130 ShouldNotReachHere();
1131 }
1132 }
1133
1134 // A double move
1135 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1136
1137 // The only legal possibilities for a double_move VMRegPair are:
1138 // The painful thing here is that like long_move a VMRegPair might be
1139
1140 // Because of the calling convention we know that src is either
1141 // 1: a single physical register (xmm registers only)
1142 // 2: two stack slots (possibly unaligned)
1143 // dst can only be a pair of stack slots.
1144
1145 assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1146
1147 if (src.first()->is_stack()) {
1148 // source is all stack
1149 __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1150 __ movptr(rbx, Address(rbp, reg2offset_in(src.second())));
1151 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1152 __ movptr(Address(rsp, reg2offset_out(dst.second())), rbx);
1153 } else {
1154 // reg to stack
1155 // No worries about stack alignment
1156 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1157 }
1158 }
1159
1160
1161 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1162 // We always ignore the frame_slots arg and just use the space just below frame pointer
1163 // which by this time is free to use
1164 switch (ret_type) {
1165 case T_FLOAT:
1166 __ fstp_s(Address(rbp, -wordSize));
1167 break;
1168 case T_DOUBLE:
1169 __ fstp_d(Address(rbp, -2*wordSize));
1170 break;
1171 case T_VOID: break;
1172 case T_LONG:
1173 __ movptr(Address(rbp, -wordSize), rax);
1174 __ movptr(Address(rbp, -2*wordSize), rdx);
1175 break;
1176 default: {
1177 __ movptr(Address(rbp, -wordSize), rax);
1178 }
1179 }
1180 }
1181
1182 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1183 // We always ignore the frame_slots arg and just use the space just below frame pointer
1184 // which by this time is free to use
1185 switch (ret_type) {
1186 case T_FLOAT:
1187 __ fld_s(Address(rbp, -wordSize));
1188 break;
1189 case T_DOUBLE:
1190 __ fld_d(Address(rbp, -2*wordSize));
1191 break;
1192 case T_LONG:
1193 __ movptr(rax, Address(rbp, -wordSize));
1194 __ movptr(rdx, Address(rbp, -2*wordSize));
1195 break;
1196 case T_VOID: break;
1197 default: {
1198 __ movptr(rax, Address(rbp, -wordSize));
1199 }
1200 }
1201 }
1202
1203 static void verify_oop_args(MacroAssembler* masm,
1204 const methodHandle& method,
1205 const BasicType* sig_bt,
1206 const VMRegPair* regs) {
1207 Register temp_reg = rbx; // not part of any compiled calling seq
1208 if (VerifyOops) {
1209 for (int i = 0; i < method->size_of_parameters(); i++) {
1210 if (is_reference_type(sig_bt[i])) {
1211 VMReg r = regs[i].first();
1212 assert(r->is_valid(), "bad oop arg");
1213 if (r->is_stack()) {
1214 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1215 __ verify_oop(temp_reg);
1216 } else {
1217 __ verify_oop(r->as_Register());
1218 }
1219 }
1220 }
1221 }
1222 }
1223
1224 static void gen_special_dispatch(MacroAssembler* masm,
1225 const methodHandle& method,
1226 const BasicType* sig_bt,
1227 const VMRegPair* regs) {
1228 verify_oop_args(masm, method, sig_bt, regs);
1229 vmIntrinsics::ID iid = method->intrinsic_id();
1230
1231 // Now write the args into the outgoing interpreter space
1232 bool has_receiver = false;
1233 Register receiver_reg = noreg;
1234 int member_arg_pos = -1;
1235 Register member_reg = noreg;
1236 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1237 if (ref_kind != 0) {
1238 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1239 member_reg = rbx; // known to be free at this point
1240 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1241 } else if (iid == vmIntrinsics::_invokeBasic) {
1242 has_receiver = true;
1243 } else {
1244 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1245 }
1246
1247 if (member_reg != noreg) {
1248 // Load the member_arg into register, if necessary.
1249 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1250 VMReg r = regs[member_arg_pos].first();
1251 if (r->is_stack()) {
1252 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1253 } else {
1254 // no data motion is needed
1255 member_reg = r->as_Register();
1256 }
1257 }
1258
1259 if (has_receiver) {
1260 // Make sure the receiver is loaded into a register.
1261 assert(method->size_of_parameters() > 0, "oob");
1262 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1263 VMReg r = regs[0].first();
1264 assert(r->is_valid(), "bad receiver arg");
1265 if (r->is_stack()) {
1266 // Porting note: This assumes that compiled calling conventions always
1267 // pass the receiver oop in a register. If this is not true on some
1268 // platform, pick a temp and load the receiver from stack.
1269 fatal("receiver always in a register");
1270 receiver_reg = rcx; // known to be free at this point
1271 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1272 } else {
1273 // no data motion is needed
1274 receiver_reg = r->as_Register();
1275 }
1276 }
1277
1278 // Figure out which address we are really jumping to:
1279 MethodHandles::generate_method_handle_dispatch(masm, iid,
1280 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1281 }
1282
1283 // ---------------------------------------------------------------------------
1284 // Generate a native wrapper for a given method. The method takes arguments
1285 // in the Java compiled code convention, marshals them to the native
1286 // convention (handlizes oops, etc), transitions to native, makes the call,
1287 // returns to java state (possibly blocking), unhandlizes any result and
1288 // returns.
1289 //
1290 // Critical native functions are a shorthand for the use of
1291 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1292 // functions. The wrapper is expected to unpack the arguments before
1293 // passing them to the callee. Critical native functions leave the state _in_Java,
1294 // since they cannot stop for GC.
1295 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1296 // block and the check for pending exceptions it's impossible for them
1297 // to be thrown.
1298 //
1299 //
1300 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1301 const methodHandle& method,
1302 int compile_id,
1303 BasicType* in_sig_bt,
1304 VMRegPair* in_regs,
1305 BasicType ret_type) {
1306 if (method->is_method_handle_intrinsic()) {
1307 vmIntrinsics::ID iid = method->intrinsic_id();
1308 intptr_t start = (intptr_t)__ pc();
1309 int vep_offset = ((intptr_t)__ pc()) - start;
1310 gen_special_dispatch(masm,
1311 method,
1312 in_sig_bt,
1313 in_regs);
1314 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1315 __ flush();
1316 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1317 return nmethod::new_native_nmethod(method,
1318 compile_id,
1319 masm->code(),
1320 vep_offset,
1321 frame_complete,
1322 stack_slots / VMRegImpl::slots_per_word,
1323 in_ByteSize(-1),
1324 in_ByteSize(-1),
1325 (OopMapSet*)nullptr);
1326 }
1327 address native_func = method->native_function();
1328 assert(native_func != nullptr, "must have function");
1329
1330 // An OopMap for lock (and class if static)
1331 OopMapSet *oop_maps = new OopMapSet();
1332
1333 // We have received a description of where all the java arg are located
1334 // on entry to the wrapper. We need to convert these args to where
1335 // the jni function will expect them. To figure out where they go
1336 // we convert the java signature to a C signature by inserting
1337 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1338
1339 const int total_in_args = method->size_of_parameters();
1340 int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1341
1342 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1343 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1344 BasicType* in_elem_bt = nullptr;
1345
1346 int argc = 0;
1347 out_sig_bt[argc++] = T_ADDRESS;
1348 if (method->is_static()) {
1349 out_sig_bt[argc++] = T_OBJECT;
1350 }
1351
1352 for (int i = 0; i < total_in_args ; i++ ) {
1353 out_sig_bt[argc++] = in_sig_bt[i];
1354 }
1355
1356 // Now figure out where the args must be stored and how much stack space
1357 // they require.
1358 int out_arg_slots;
1359 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1360
1361 // Compute framesize for the wrapper. We need to handlize all oops in
1362 // registers a max of 2 on x86.
1363
1364 // Calculate the total number of stack slots we will need.
1365
1366 // First count the abi requirement plus all of the outgoing args
1367 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1368
1369 // Now the space for the inbound oop handle area
1370 int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1371
1372 int oop_handle_offset = stack_slots;
1373 stack_slots += total_save_slots;
1374
1375 // Now any space we need for handlizing a klass if static method
1376
1377 int klass_slot_offset = 0;
1378 int klass_offset = -1;
1379 int lock_slot_offset = 0;
1380 bool is_static = false;
1381
1382 if (method->is_static()) {
1383 klass_slot_offset = stack_slots;
1384 stack_slots += VMRegImpl::slots_per_word;
1385 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1386 is_static = true;
1387 }
1388
1389 // Plus a lock if needed
1390
1391 if (method->is_synchronized()) {
1392 lock_slot_offset = stack_slots;
1393 stack_slots += VMRegImpl::slots_per_word;
1394 }
1395
1396 // Now a place (+2) to save return values or temp during shuffling
1397 // + 2 for return address (which we own) and saved rbp,
1398 stack_slots += 4;
1399
1400 // Ok The space we have allocated will look like:
1401 //
1402 //
1403 // FP-> | |
1404 // |---------------------|
1405 // | 2 slots for moves |
1406 // |---------------------|
1407 // | lock box (if sync) |
1408 // |---------------------| <- lock_slot_offset (-lock_slot_rbp_offset)
1409 // | klass (if static) |
1410 // |---------------------| <- klass_slot_offset
1411 // | oopHandle area |
1412 // |---------------------| <- oop_handle_offset (a max of 2 registers)
1413 // | outbound memory |
1414 // | based arguments |
1415 // | |
1416 // |---------------------|
1417 // | |
1418 // SP-> | out_preserved_slots |
1419 //
1420 //
1421 // ****************************************************************************
1422 // WARNING - on Windows Java Natives use pascal calling convention and pop the
1423 // arguments off of the stack after the jni call. Before the call we can use
1424 // instructions that are SP relative. After the jni call we switch to FP
1425 // relative instructions instead of re-adjusting the stack on windows.
1426 // ****************************************************************************
1427
1428
1429 // Now compute actual number of stack words we need rounding to make
1430 // stack properly aligned.
1431 stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1432
1433 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1434
1435 intptr_t start = (intptr_t)__ pc();
1436
1437 // First thing make an ic check to see if we should even be here
1438
1439 // We are free to use all registers as temps without saving them and
1440 // restoring them except rbp. rbp is the only callee save register
1441 // as far as the interpreter and the compiler(s) are concerned.
1442
1443
1444 const Register receiver = rcx;
1445 Label exception_pending;
1446
1447 __ verify_oop(receiver);
1448 // verified entry must be aligned for code patching.
1449 __ ic_check(8 /* end_alignment */);
1450
1451 int vep_offset = ((intptr_t)__ pc()) - start;
1452
1453 #ifdef COMPILER1
1454 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1455 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1456 inline_check_hashcode_from_object_header(masm, method, rcx /*obj_reg*/, rax /*result*/);
1457 }
1458 #endif // COMPILER1
1459
1460 // The instruction at the verified entry point must be 5 bytes or longer
1461 // because it can be patched on the fly by make_non_entrant. The stack bang
1462 // instruction fits that requirement.
1463
1464 // Generate stack overflow check
1465 __ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
1466
1467 // Generate a new frame for the wrapper.
1468 __ enter();
1469 // -2 because return address is already present and so is saved rbp
1470 __ subptr(rsp, stack_size - 2*wordSize);
1471
1472
1473 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1474 bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */);
1475
1476 // Frame is now completed as far as size and linkage.
1477 int frame_complete = ((intptr_t)__ pc()) - start;
1478
1479 // Calculate the difference between rsp and rbp,. We need to know it
1480 // after the native call because on windows Java Natives will pop
1481 // the arguments and it is painful to do rsp relative addressing
1482 // in a platform independent way. So after the call we switch to
1483 // rbp, relative addressing.
1484
1485 int fp_adjustment = stack_size - 2*wordSize;
1486
1487 #ifdef COMPILER2
1488 // C2 may leave the stack dirty if not in SSE2+ mode
1489 if (UseSSE >= 2) {
1490 __ verify_FPU(0, "c2i transition should have clean FPU stack");
1491 } else {
1492 __ empty_FPU_stack();
1493 }
1494 #endif /* COMPILER2 */
1495
1496 // Compute the rbp, offset for any slots used after the jni call
1497
1498 int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1499
1500 // We use rdi as a thread pointer because it is callee save and
1501 // if we load it once it is usable thru the entire wrapper
1502 const Register thread = rdi;
1503
1504 // We use rsi as the oop handle for the receiver/klass
1505 // It is callee save so it survives the call to native
1506
1507 const Register oop_handle_reg = rsi;
1508
1509 __ get_thread(thread);
1510
1511 //
1512 // We immediately shuffle the arguments so that any vm call we have to
1513 // make from here on out (sync slow path, jvmti, etc.) we will have
1514 // captured the oops from our caller and have a valid oopMap for
1515 // them.
1516
1517 // -----------------
1518 // The Grand Shuffle
1519 //
1520 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1521 // and, if static, the class mirror instead of a receiver. This pretty much
1522 // guarantees that register layout will not match (and x86 doesn't use reg
1523 // parms though amd does). Since the native abi doesn't use register args
1524 // and the java conventions does we don't have to worry about collisions.
1525 // All of our moved are reg->stack or stack->stack.
1526 // We ignore the extra arguments during the shuffle and handle them at the
1527 // last moment. The shuffle is described by the two calling convention
1528 // vectors we have in our possession. We simply walk the java vector to
1529 // get the source locations and the c vector to get the destinations.
1530
1531 int c_arg = method->is_static() ? 2 : 1;
1532
1533 // Record rsp-based slot for receiver on stack for non-static methods
1534 int receiver_offset = -1;
1535
1536 // This is a trick. We double the stack slots so we can claim
1537 // the oops in the caller's frame. Since we are sure to have
1538 // more args than the caller doubling is enough to make
1539 // sure we can capture all the incoming oop args from the
1540 // caller.
1541 //
1542 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1543
1544 // Mark location of rbp,
1545 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1546
1547 // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1548 // Are free to temporaries if we have to do stack to steck moves.
1549 // All inbound args are referenced based on rbp, and all outbound args via rsp.
1550
1551 for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1552 switch (in_sig_bt[i]) {
1553 case T_ARRAY:
1554 case T_OBJECT:
1555 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1556 ((i == 0) && (!is_static)),
1557 &receiver_offset);
1558 break;
1559 case T_VOID:
1560 break;
1561
1562 case T_FLOAT:
1563 float_move(masm, in_regs[i], out_regs[c_arg]);
1564 break;
1565
1566 case T_DOUBLE:
1567 assert( i + 1 < total_in_args &&
1568 in_sig_bt[i + 1] == T_VOID &&
1569 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1570 double_move(masm, in_regs[i], out_regs[c_arg]);
1571 break;
1572
1573 case T_LONG :
1574 long_move(masm, in_regs[i], out_regs[c_arg]);
1575 break;
1576
1577 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1578
1579 default:
1580 simple_move32(masm, in_regs[i], out_regs[c_arg]);
1581 }
1582 }
1583
1584 // Pre-load a static method's oop into rsi. Used both by locking code and
1585 // the normal JNI call code.
1586 if (method->is_static()) {
1587
1588 // load opp into a register
1589 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1590
1591 // Now handlize the static class mirror it's known not-null.
1592 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1593 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1594
1595 // Now get the handle
1596 __ lea(oop_handle_reg, Address(rsp, klass_offset));
1597 // store the klass handle as second argument
1598 __ movptr(Address(rsp, wordSize), oop_handle_reg);
1599 }
1600
1601 // Change state to native (we save the return address in the thread, since it might not
1602 // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1603 // points into the right code segment. It does not have to be the correct return pc.
1604 // We use the same pc/oopMap repeatedly when we call out
1605
1606 intptr_t the_pc = (intptr_t) __ pc();
1607 oop_maps->add_gc_map(the_pc - start, map);
1608
1609 __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc, noreg);
1610
1611
1612 // We have all of the arguments setup at this point. We must not touch any register
1613 // argument registers at this point (what if we save/restore them there are no oop?
1614
1615 {
1616 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1617 __ mov_metadata(rax, method());
1618 __ call_VM_leaf(
1619 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1620 thread, rax);
1621 }
1622
1623 // RedefineClasses() tracing support for obsolete method entry
1624 if (log_is_enabled(Trace, redefine, class, obsolete)) {
1625 __ mov_metadata(rax, method());
1626 __ call_VM_leaf(
1627 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1628 thread, rax);
1629 }
1630
1631 // These are register definitions we need for locking/unlocking
1632 const Register swap_reg = rax; // Must use rax, for cmpxchg instruction
1633 const Register obj_reg = rcx; // Will contain the oop
1634 const Register lock_reg = rdx; // Address of compiler lock object (BasicLock)
1635
1636 Label slow_path_lock;
1637 Label lock_done;
1638
1639 // Lock a synchronized method
1640 if (method->is_synchronized()) {
1641 Label count_mon;
1642
1643 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1644
1645 // Get the handle (the 2nd argument)
1646 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1647
1648 // Get address of the box
1649
1650 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1651
1652 // Load the oop from the handle
1653 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1654
1655 if (LockingMode == LM_MONITOR) {
1656 __ jmp(slow_path_lock);
1657 } else if (LockingMode == LM_LEGACY) {
1658 // Load immediate 1 into swap_reg %rax,
1659 __ movptr(swap_reg, 1);
1660
1661 // Load (object->mark() | 1) into swap_reg %rax,
1662 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1663
1664 // Save (object->mark() | 1) into BasicLock's displaced header
1665 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1666
1667 // src -> dest iff dest == rax, else rax, <- dest
1668 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1669 __ lock();
1670 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1671 __ jcc(Assembler::equal, count_mon);
1672
1673 // Test if the oopMark is an obvious stack pointer, i.e.,
1674 // 1) (mark & 3) == 0, and
1675 // 2) rsp <= mark < mark + os::pagesize()
1676 // These 3 tests can be done by evaluating the following
1677 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1678 // assuming both stack pointer and pagesize have their
1679 // least significant 2 bits clear.
1680 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1681
1682 __ subptr(swap_reg, rsp);
1683 __ andptr(swap_reg, 3 - (int)os::vm_page_size());
1684
1685 // Save the test result, for recursive case, the result is zero
1686 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1687 __ jcc(Assembler::notEqual, slow_path_lock);
1688 } else {
1689 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1690 __ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
1691 }
1692 __ bind(count_mon);
1693 __ inc_held_monitor_count();
1694
1695 // Slow path will re-enter here
1696 __ bind(lock_done);
1697 }
1698
1699
1700 // Finally just about ready to make the JNI call
1701
1702 // get JNIEnv* which is first argument to native
1703 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1704 __ movptr(Address(rsp, 0), rdx);
1705
1706 // Now set thread in native
1707 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1708
1709 __ call(RuntimeAddress(native_func));
1710
1711 // Verify or restore cpu control state after JNI call
1712 __ restore_cpu_control_state_after_jni(noreg);
1713
1714 // WARNING - on Windows Java Natives use pascal calling convention and pop the
1715 // arguments off of the stack. We could just re-adjust the stack pointer here
1716 // and continue to do SP relative addressing but we instead switch to FP
1717 // relative addressing.
1718
1719 // Unpack native results.
1720 switch (ret_type) {
1721 case T_BOOLEAN: __ c2bool(rax); break;
1722 case T_CHAR : __ andptr(rax, 0xFFFF); break;
1723 case T_BYTE : __ sign_extend_byte (rax); break;
1724 case T_SHORT : __ sign_extend_short(rax); break;
1725 case T_INT : /* nothing to do */ break;
1726 case T_DOUBLE :
1727 case T_FLOAT :
1728 // Result is in st0 we'll save as needed
1729 break;
1730 case T_ARRAY: // Really a handle
1731 case T_OBJECT: // Really a handle
1732 break; // can't de-handlize until after safepoint check
1733 case T_VOID: break;
1734 case T_LONG: break;
1735 default : ShouldNotReachHere();
1736 }
1737
1738 Label after_transition;
1739
1740 // Switch thread to "native transition" state before reading the synchronization state.
1741 // This additional state is necessary because reading and testing the synchronization
1742 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1743 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1744 // VM thread changes sync state to synchronizing and suspends threads for GC.
1745 // Thread A is resumed to finish this native method, but doesn't block here since it
1746 // didn't see any synchronization is progress, and escapes.
1747 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1748
1749 // Force this write out before the read below
1750 if (!UseSystemMemoryBarrier) {
1751 __ membar(Assembler::Membar_mask_bits(
1752 Assembler::LoadLoad | Assembler::LoadStore |
1753 Assembler::StoreLoad | Assembler::StoreStore));
1754 }
1755
1756 if (AlwaysRestoreFPU) {
1757 // Make sure the control word is correct.
1758 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
1759 }
1760
1761 // check for safepoint operation in progress and/or pending suspend requests
1762 { Label Continue, slow_path;
1763
1764 __ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */);
1765
1766 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1767 __ jcc(Assembler::equal, Continue);
1768 __ bind(slow_path);
1769
1770 // Don't use call_VM as it will see a possible pending exception and forward it
1771 // and never return here preventing us from clearing _last_native_pc down below.
1772 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1773 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1774 // by hand.
1775 //
1776 __ vzeroupper();
1777
1778 save_native_result(masm, ret_type, stack_slots);
1779 __ push(thread);
1780 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1781 JavaThread::check_special_condition_for_native_trans)));
1782 __ increment(rsp, wordSize);
1783 // Restore any method result value
1784 restore_native_result(masm, ret_type, stack_slots);
1785 __ bind(Continue);
1786 }
1787
1788 // change thread state
1789 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1790 __ bind(after_transition);
1791
1792 Label reguard;
1793 Label reguard_done;
1794 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
1795 __ jcc(Assembler::equal, reguard);
1796
1797 // slow path reguard re-enters here
1798 __ bind(reguard_done);
1799
1800 // Handle possible exception (will unlock if necessary)
1801
1802 // native result if any is live
1803
1804 // Unlock
1805 Label slow_path_unlock;
1806 Label unlock_done;
1807 if (method->is_synchronized()) {
1808
1809 Label fast_done;
1810
1811 // Get locked oop from the handle we passed to jni
1812 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1813
1814 if (LockingMode == LM_LEGACY) {
1815 Label not_recur;
1816 // Simple recursive lock?
1817 __ cmpptr(Address(rbp, lock_slot_rbp_offset), NULL_WORD);
1818 __ jcc(Assembler::notEqual, not_recur);
1819 __ dec_held_monitor_count();
1820 __ jmpb(fast_done);
1821 __ bind(not_recur);
1822 }
1823
1824 // Must save rax, if it is live now because cmpxchg must use it
1825 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1826 save_native_result(masm, ret_type, stack_slots);
1827 }
1828
1829 if (LockingMode == LM_MONITOR) {
1830 __ jmp(slow_path_unlock);
1831 } else if (LockingMode == LM_LEGACY) {
1832 // get old displaced header
1833 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
1834
1835 // get address of the stack lock
1836 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1837
1838 // Atomic swap old header if oop still contains the stack lock
1839 // src -> dest iff dest == rax, else rax, <- dest
1840 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
1841 __ lock();
1842 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1843 __ jcc(Assembler::notEqual, slow_path_unlock);
1844 __ dec_held_monitor_count();
1845 } else {
1846 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1847 __ lightweight_unlock(obj_reg, swap_reg, thread, lock_reg, slow_path_unlock);
1848 __ dec_held_monitor_count();
1849 }
1850
1851 // slow path re-enters here
1852 __ bind(unlock_done);
1853 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1854 restore_native_result(masm, ret_type, stack_slots);
1855 }
1856
1857 __ bind(fast_done);
1858 }
1859
1860 {
1861 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1862 // Tell dtrace about this method exit
1863 save_native_result(masm, ret_type, stack_slots);
1864 __ mov_metadata(rax, method());
1865 __ call_VM_leaf(
1866 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1867 thread, rax);
1868 restore_native_result(masm, ret_type, stack_slots);
1869 }
1870
1871 // We can finally stop using that last_Java_frame we setup ages ago
1872
1873 __ reset_last_Java_frame(thread, false);
1874
1875 // Unbox oop result, e.g. JNIHandles::resolve value.
1876 if (is_reference_type(ret_type)) {
1877 __ resolve_jobject(rax /* value */,
1878 thread /* thread */,
1879 rcx /* tmp */);
1880 }
1881
1882 if (CheckJNICalls) {
1883 // clear_pending_jni_exception_check
1884 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
1885 }
1886
1887 // reset handle block
1888 __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
1889 __ movl(Address(rcx, JNIHandleBlock::top_offset()), NULL_WORD);
1890
1891 // Any exception pending?
1892 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1893 __ jcc(Assembler::notEqual, exception_pending);
1894
1895 // no exception, we're almost done
1896
1897 // check that only result value is on FPU stack
1898 __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
1899
1900 // Fixup floating pointer results so that result looks like a return from a compiled method
1901 if (ret_type == T_FLOAT) {
1902 if (UseSSE >= 1) {
1903 // Pop st0 and store as float and reload into xmm register
1904 __ fstp_s(Address(rbp, -4));
1905 __ movflt(xmm0, Address(rbp, -4));
1906 }
1907 } else if (ret_type == T_DOUBLE) {
1908 if (UseSSE >= 2) {
1909 // Pop st0 and store as double and reload into xmm register
1910 __ fstp_d(Address(rbp, -8));
1911 __ movdbl(xmm0, Address(rbp, -8));
1912 }
1913 }
1914
1915 // Return
1916
1917 __ leave();
1918 __ ret(0);
1919
1920 // Unexpected paths are out of line and go here
1921
1922 // Slow path locking & unlocking
1923 if (method->is_synchronized()) {
1924
1925 // BEGIN Slow path lock
1926
1927 __ bind(slow_path_lock);
1928
1929 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1930 // args are (oop obj, BasicLock* lock, JavaThread* thread)
1931 __ push(thread);
1932 __ push(lock_reg);
1933 __ push(obj_reg);
1934 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1935 __ addptr(rsp, 3*wordSize);
1936
1937 #ifdef ASSERT
1938 { Label L;
1939 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1940 __ jcc(Assembler::equal, L);
1941 __ stop("no pending exception allowed on exit from monitorenter");
1942 __ bind(L);
1943 }
1944 #endif
1945 __ jmp(lock_done);
1946
1947 // END Slow path lock
1948
1949 // BEGIN Slow path unlock
1950 __ bind(slow_path_unlock);
1951 __ vzeroupper();
1952 // Slow path unlock
1953
1954 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1955 save_native_result(masm, ret_type, stack_slots);
1956 }
1957 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1958
1959 __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1960 __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1961
1962
1963 // should be a peal
1964 // +wordSize because of the push above
1965 // args are (oop obj, BasicLock* lock, JavaThread* thread)
1966 __ push(thread);
1967 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1968 __ push(rax);
1969
1970 __ push(obj_reg);
1971 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
1972 __ addptr(rsp, 3*wordSize);
1973 #ifdef ASSERT
1974 {
1975 Label L;
1976 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1977 __ jcc(Assembler::equal, L);
1978 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1979 __ bind(L);
1980 }
1981 #endif /* ASSERT */
1982
1983 __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1984
1985 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1986 restore_native_result(masm, ret_type, stack_slots);
1987 }
1988 __ jmp(unlock_done);
1989 // END Slow path unlock
1990
1991 }
1992
1993 // SLOW PATH Reguard the stack if needed
1994
1995 __ bind(reguard);
1996 __ vzeroupper();
1997 save_native_result(masm, ret_type, stack_slots);
1998 {
1999 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2000 }
2001 restore_native_result(masm, ret_type, stack_slots);
2002 __ jmp(reguard_done);
2003
2004
2005 // BEGIN EXCEPTION PROCESSING
2006
2007 // Forward the exception
2008 __ bind(exception_pending);
2009
2010 // remove possible return value from FPU register stack
2011 __ empty_FPU_stack();
2012
2013 // pop our frame
2014 __ leave();
2015 // and forward the exception
2016 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2017
2018 __ flush();
2019
2020 nmethod *nm = nmethod::new_native_nmethod(method,
2021 compile_id,
2022 masm->code(),
2023 vep_offset,
2024 frame_complete,
2025 stack_slots / VMRegImpl::slots_per_word,
2026 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2027 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2028 oop_maps);
2029
2030 return nm;
2031
2032 }
2033
2034 // this function returns the adjust size (in number of words) to a c2i adapter
2035 // activation for use during deoptimization
2036 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2037 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2038 }
2039
2040
2041 // Number of stack slots between incoming argument block and the start of
2042 // a new frame. The PROLOG must add this many slots to the stack. The
2043 // EPILOG must remove this many slots. Intel needs one slot for
2044 // return address and one for rbp, (must save rbp)
2045 uint SharedRuntime::in_preserve_stack_slots() {
2046 return 2+VerifyStackAtCalls;
2047 }
2048
2049 uint SharedRuntime::out_preserve_stack_slots() {
2050 return 0;
2051 }
2052
2053 //------------------------------generate_deopt_blob----------------------------
2054 void SharedRuntime::generate_deopt_blob() {
2055 // allocate space for the code
2056 ResourceMark rm;
2057 // setup code generation tools
2058 // note: the buffer code size must account for StackShadowPages=50
2059 CodeBuffer buffer("deopt_blob", 1536, 1024);
2060 MacroAssembler* masm = new MacroAssembler(&buffer);
2061 int frame_size_in_words;
2062 OopMap* map = nullptr;
2063 // Account for the extra args we place on the stack
2064 // by the time we call fetch_unroll_info
2065 const int additional_words = 2; // deopt kind, thread
2066
2067 OopMapSet *oop_maps = new OopMapSet();
2068
2069 // -------------
2070 // This code enters when returning to a de-optimized nmethod. A return
2071 // address has been pushed on the stack, and return values are in
2072 // registers.
2073 // If we are doing a normal deopt then we were called from the patched
2074 // nmethod from the point we returned to the nmethod. So the return
2075 // address on the stack is wrong by NativeCall::instruction_size
2076 // We will adjust the value to it looks like we have the original return
2077 // address on the stack (like when we eagerly deoptimized).
2078 // In the case of an exception pending with deoptimized then we enter
2079 // with a return address on the stack that points after the call we patched
2080 // into the exception handler. We have the following register state:
2081 // rax,: exception
2082 // rbx,: exception handler
2083 // rdx: throwing pc
2084 // So in this case we simply jam rdx into the useless return address and
2085 // the stack looks just like we want.
2086 //
2087 // At this point we need to de-opt. We save the argument return
2088 // registers. We call the first C routine, fetch_unroll_info(). This
2089 // routine captures the return values and returns a structure which
2090 // describes the current frame size and the sizes of all replacement frames.
2091 // The current frame is compiled code and may contain many inlined
2092 // functions, each with their own JVM state. We pop the current frame, then
2093 // push all the new frames. Then we call the C routine unpack_frames() to
2094 // populate these frames. Finally unpack_frames() returns us the new target
2095 // address. Notice that callee-save registers are BLOWN here; they have
2096 // already been captured in the vframeArray at the time the return PC was
2097 // patched.
2098 address start = __ pc();
2099 Label cont;
2100
2101 // Prolog for non exception case!
2102
2103 // Save everything in sight.
2104
2105 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2106 // Normal deoptimization
2107 __ push(Deoptimization::Unpack_deopt);
2108 __ jmp(cont);
2109
2110 int reexecute_offset = __ pc() - start;
2111
2112 // Reexecute case
2113 // return address is the pc describes what bci to do re-execute at
2114
2115 // No need to update map as each call to save_live_registers will produce identical oopmap
2116 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2117
2118 __ push(Deoptimization::Unpack_reexecute);
2119 __ jmp(cont);
2120
2121 int exception_offset = __ pc() - start;
2122
2123 // Prolog for exception case
2124
2125 // all registers are dead at this entry point, except for rax, and
2126 // rdx which contain the exception oop and exception pc
2127 // respectively. Set them in TLS and fall thru to the
2128 // unpack_with_exception_in_tls entry point.
2129
2130 __ get_thread(rdi);
2131 __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2132 __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2133
2134 int exception_in_tls_offset = __ pc() - start;
2135
2136 // new implementation because exception oop is now passed in JavaThread
2137
2138 // Prolog for exception case
2139 // All registers must be preserved because they might be used by LinearScan
2140 // Exceptiop oop and throwing PC are passed in JavaThread
2141 // tos: stack at point of call to method that threw the exception (i.e. only
2142 // args are on the stack, no return address)
2143
2144 // make room on stack for the return address
2145 // It will be patched later with the throwing pc. The correct value is not
2146 // available now because loading it from memory would destroy registers.
2147 __ push(0);
2148
2149 // Save everything in sight.
2150
2151 // No need to update map as each call to save_live_registers will produce identical oopmap
2152 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2153
2154 // Now it is safe to overwrite any register
2155
2156 // store the correct deoptimization type
2157 __ push(Deoptimization::Unpack_exception);
2158
2159 // load throwing pc from JavaThread and patch it as the return address
2160 // of the current frame. Then clear the field in JavaThread
2161 __ get_thread(rdi);
2162 __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2163 __ movptr(Address(rbp, wordSize), rdx);
2164 __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2165
2166 #ifdef ASSERT
2167 // verify that there is really an exception oop in JavaThread
2168 __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2169 __ verify_oop(rax);
2170
2171 // verify that there is no pending exception
2172 Label no_pending_exception;
2173 __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2174 __ testptr(rax, rax);
2175 __ jcc(Assembler::zero, no_pending_exception);
2176 __ stop("must not have pending exception here");
2177 __ bind(no_pending_exception);
2178 #endif
2179
2180 __ bind(cont);
2181
2182 // Compiled code leaves the floating point stack dirty, empty it.
2183 __ empty_FPU_stack();
2184
2185
2186 // Call C code. Need thread and this frame, but NOT official VM entry
2187 // crud. We cannot block on this call, no GC can happen.
2188 __ get_thread(rcx);
2189 __ push(rcx);
2190 // fetch_unroll_info needs to call last_java_frame()
2191 __ set_last_Java_frame(rcx, noreg, noreg, nullptr, noreg);
2192
2193 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2194
2195 // Need to have an oopmap that tells fetch_unroll_info where to
2196 // find any register it might need.
2197
2198 oop_maps->add_gc_map( __ pc()-start, map);
2199
2200 // Discard args to fetch_unroll_info
2201 __ pop(rcx);
2202 __ pop(rcx);
2203
2204 __ get_thread(rcx);
2205 __ reset_last_Java_frame(rcx, false);
2206
2207 // Load UnrollBlock into EDI
2208 __ mov(rdi, rax);
2209
2210 // Move the unpack kind to a safe place in the UnrollBlock because
2211 // we are very short of registers
2212
2213 Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset());
2214 // retrieve the deopt kind from the UnrollBlock.
2215 __ movl(rax, unpack_kind);
2216
2217 Label noException;
2218 __ cmpl(rax, Deoptimization::Unpack_exception); // Was exception pending?
2219 __ jcc(Assembler::notEqual, noException);
2220 __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2221 __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2222 __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2223 __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2224
2225 __ verify_oop(rax);
2226
2227 // Overwrite the result registers with the exception results.
2228 __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2229 __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2230
2231 __ bind(noException);
2232
2233 // Stack is back to only having register save data on the stack.
2234 // Now restore the result registers. Everything else is either dead or captured
2235 // in the vframeArray.
2236
2237 RegisterSaver::restore_result_registers(masm);
2238
2239 // Non standard control word may be leaked out through a safepoint blob, and we can
2240 // deopt at a poll point with the non standard control word. However, we should make
2241 // sure the control word is correct after restore_result_registers.
2242 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
2243
2244 // All of the register save area has been popped of the stack. Only the
2245 // return address remains.
2246
2247 // Pop all the frames we must move/replace.
2248 //
2249 // Frame picture (youngest to oldest)
2250 // 1: self-frame (no frame link)
2251 // 2: deopting frame (no frame link)
2252 // 3: caller of deopting frame (could be compiled/interpreted).
2253 //
2254 // Note: by leaving the return address of self-frame on the stack
2255 // and using the size of frame 2 to adjust the stack
2256 // when we are done the return to frame 3 will still be on the stack.
2257
2258 // Pop deoptimized frame
2259 __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2260
2261 // sp should be pointing at the return address to the caller (3)
2262
2263 // Pick up the initial fp we should save
2264 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2265 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
2266
2267 #ifdef ASSERT
2268 // Compilers generate code that bang the stack by as much as the
2269 // interpreter would need. So this stack banging should never
2270 // trigger a fault. Verify that it does not on non product builds.
2271 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2272 __ bang_stack_size(rbx, rcx);
2273 #endif
2274
2275 // Load array of frame pcs into ECX
2276 __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset()));
2277
2278 __ pop(rsi); // trash the old pc
2279
2280 // Load array of frame sizes into ESI
2281 __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset()));
2282
2283 Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset());
2284
2285 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset()));
2286 __ movl(counter, rbx);
2287
2288 // Now adjust the caller's stack to make up for the extra locals
2289 // but record the original sp so that we can save it in the skeletal interpreter
2290 // frame and the stack walking of interpreter_sender will get the unextended sp
2291 // value and not the "real" sp value.
2292
2293 Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset());
2294 __ movptr(sp_temp, rsp);
2295 __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset()));
2296 __ subptr(rsp, rbx);
2297
2298 // Push interpreter frames in a loop
2299 Label loop;
2300 __ bind(loop);
2301 __ movptr(rbx, Address(rsi, 0)); // Load frame size
2302 __ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand
2303 __ pushptr(Address(rcx, 0)); // save return address
2304 __ enter(); // save old & set new rbp,
2305 __ subptr(rsp, rbx); // Prolog!
2306 __ movptr(rbx, sp_temp); // sender's sp
2307 // This value is corrected by layout_activation_impl
2308 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2309 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2310 __ movptr(sp_temp, rsp); // pass to next frame
2311 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
2312 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
2313 __ decrementl(counter); // decrement counter
2314 __ jcc(Assembler::notZero, loop);
2315 __ pushptr(Address(rcx, 0)); // save final return address
2316
2317 // Re-push self-frame
2318 __ enter(); // save old & set new rbp,
2319
2320 // Return address and rbp, are in place
2321 // We'll push additional args later. Just allocate a full sized
2322 // register save area
2323 __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2324
2325 // Restore frame locals after moving the frame
2326 __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2327 __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2328 __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize)); // Pop float stack and store in local
2329 if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2330 if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2331
2332 // Set up the args to unpack_frame
2333
2334 __ pushl(unpack_kind); // get the unpack_kind value
2335 __ get_thread(rcx);
2336 __ push(rcx);
2337
2338 // set last_Java_sp, last_Java_fp
2339 __ set_last_Java_frame(rcx, noreg, rbp, nullptr, noreg);
2340
2341 // Call C code. Need thread but NOT official VM entry
2342 // crud. We cannot block on this call, no GC can happen. Call should
2343 // restore return values to their stack-slots with the new SP.
2344 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2345 // Set an oopmap for the call site
2346 oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2347
2348 // rax, contains the return result type
2349 __ push(rax);
2350
2351 __ get_thread(rcx);
2352 __ reset_last_Java_frame(rcx, false);
2353
2354 // Collect return values
2355 __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2356 __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2357
2358 // Clear floating point stack before returning to interpreter
2359 __ empty_FPU_stack();
2360
2361 // Check if we should push the float or double return value.
2362 Label results_done, yes_double_value;
2363 __ cmpl(Address(rsp, 0), T_DOUBLE);
2364 __ jcc (Assembler::zero, yes_double_value);
2365 __ cmpl(Address(rsp, 0), T_FLOAT);
2366 __ jcc (Assembler::notZero, results_done);
2367
2368 // return float value as expected by interpreter
2369 if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2370 else __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2371 __ jmp(results_done);
2372
2373 // return double value as expected by interpreter
2374 __ bind(yes_double_value);
2375 if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2376 else __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2377
2378 __ bind(results_done);
2379
2380 // Pop self-frame.
2381 __ leave(); // Epilog!
2382
2383 // Jump to interpreter
2384 __ ret(0);
2385
2386 // -------------
2387 // make sure all code is generated
2388 masm->flush();
2389
2390 _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2391 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2392 }
2393
2394
2395 #ifdef COMPILER2
2396 //------------------------------generate_uncommon_trap_blob--------------------
2397 void SharedRuntime::generate_uncommon_trap_blob() {
2398 // allocate space for the code
2399 ResourceMark rm;
2400 // setup code generation tools
2401 CodeBuffer buffer("uncommon_trap_blob", 512, 512);
2402 MacroAssembler* masm = new MacroAssembler(&buffer);
2403
2404 enum frame_layout {
2405 arg0_off, // thread sp + 0 // Arg location for
2406 arg1_off, // unloaded_class_index sp + 1 // calling C
2407 arg2_off, // exec_mode sp + 2
2408 // The frame sender code expects that rbp will be in the "natural" place and
2409 // will override any oopMap setting for it. We must therefore force the layout
2410 // so that it agrees with the frame sender code.
2411 rbp_off, // callee saved register sp + 3
2412 return_off, // slot for return address sp + 4
2413 framesize
2414 };
2415
2416 address start = __ pc();
2417
2418 // Push self-frame.
2419 __ subptr(rsp, return_off*wordSize); // Epilog!
2420
2421 // rbp, is an implicitly saved callee saved register (i.e. the calling
2422 // convention will save restore it in prolog/epilog) Other than that
2423 // there are no callee save registers no that adapter frames are gone.
2424 __ movptr(Address(rsp, rbp_off*wordSize), rbp);
2425
2426 // Clear the floating point exception stack
2427 __ empty_FPU_stack();
2428
2429 // set last_Java_sp
2430 __ get_thread(rdx);
2431 __ set_last_Java_frame(rdx, noreg, noreg, nullptr, noreg);
2432
2433 // Call C code. Need thread but NOT official VM entry
2434 // crud. We cannot block on this call, no GC can happen. Call should
2435 // capture callee-saved registers as well as return values.
2436 __ movptr(Address(rsp, arg0_off*wordSize), rdx);
2437 // argument already in ECX
2438 __ movl(Address(rsp, arg1_off*wordSize),rcx);
2439 __ movl(Address(rsp, arg2_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2440 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2441
2442 // Set an oopmap for the call site
2443 OopMapSet *oop_maps = new OopMapSet();
2444 OopMap* map = new OopMap( framesize, 0 );
2445 // No oopMap for rbp, it is known implicitly
2446
2447 oop_maps->add_gc_map( __ pc()-start, map);
2448
2449 __ get_thread(rcx);
2450
2451 __ reset_last_Java_frame(rcx, false);
2452
2453 // Load UnrollBlock into EDI
2454 __ movptr(rdi, rax);
2455
2456 #ifdef ASSERT
2457 { Label L;
2458 __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset()),
2459 (int32_t)Deoptimization::Unpack_uncommon_trap);
2460 __ jcc(Assembler::equal, L);
2461 __ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
2462 __ bind(L);
2463 }
2464 #endif
2465
2466 // Pop all the frames we must move/replace.
2467 //
2468 // Frame picture (youngest to oldest)
2469 // 1: self-frame (no frame link)
2470 // 2: deopting frame (no frame link)
2471 // 3: caller of deopting frame (could be compiled/interpreted).
2472
2473 // Pop self-frame. We have no frame, and must rely only on EAX and ESP.
2474 __ addptr(rsp,(framesize-1)*wordSize); // Epilog!
2475
2476 // Pop deoptimized frame
2477 __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2478 __ addptr(rsp, rcx);
2479
2480 // sp should be pointing at the return address to the caller (3)
2481
2482 // Pick up the initial fp we should save
2483 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2484 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
2485
2486 #ifdef ASSERT
2487 // Compilers generate code that bang the stack by as much as the
2488 // interpreter would need. So this stack banging should never
2489 // trigger a fault. Verify that it does not on non product builds.
2490 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2491 __ bang_stack_size(rbx, rcx);
2492 #endif
2493
2494 // Load array of frame pcs into ECX
2495 __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset()));
2496
2497 __ pop(rsi); // trash the pc
2498
2499 // Load array of frame sizes into ESI
2500 __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset()));
2501
2502 Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset());
2503
2504 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset()));
2505 __ movl(counter, rbx);
2506
2507 // Now adjust the caller's stack to make up for the extra locals
2508 // but record the original sp so that we can save it in the skeletal interpreter
2509 // frame and the stack walking of interpreter_sender will get the unextended sp
2510 // value and not the "real" sp value.
2511
2512 Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset());
2513 __ movptr(sp_temp, rsp);
2514 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset()));
2515 __ subptr(rsp, rbx);
2516
2517 // Push interpreter frames in a loop
2518 Label loop;
2519 __ bind(loop);
2520 __ movptr(rbx, Address(rsi, 0)); // Load frame size
2521 __ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand
2522 __ pushptr(Address(rcx, 0)); // save return address
2523 __ enter(); // save old & set new rbp,
2524 __ subptr(rsp, rbx); // Prolog!
2525 __ movptr(rbx, sp_temp); // sender's sp
2526 // This value is corrected by layout_activation_impl
2527 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2528 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2529 __ movptr(sp_temp, rsp); // pass to next frame
2530 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
2531 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
2532 __ decrementl(counter); // decrement counter
2533 __ jcc(Assembler::notZero, loop);
2534 __ pushptr(Address(rcx, 0)); // save final return address
2535
2536 // Re-push self-frame
2537 __ enter(); // save old & set new rbp,
2538 __ subptr(rsp, (framesize-2) * wordSize); // Prolog!
2539
2540
2541 // set last_Java_sp, last_Java_fp
2542 __ get_thread(rdi);
2543 __ set_last_Java_frame(rdi, noreg, rbp, nullptr, noreg);
2544
2545 // Call C code. Need thread but NOT official VM entry
2546 // crud. We cannot block on this call, no GC can happen. Call should
2547 // restore return values to their stack-slots with the new SP.
2548 __ movptr(Address(rsp,arg0_off*wordSize),rdi);
2549 __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2550 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2551 // Set an oopmap for the call site
2552 oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2553
2554 __ get_thread(rdi);
2555 __ reset_last_Java_frame(rdi, true);
2556
2557 // Pop self-frame.
2558 __ leave(); // Epilog!
2559
2560 // Jump to interpreter
2561 __ ret(0);
2562
2563 // -------------
2564 // make sure all code is generated
2565 masm->flush();
2566
2567 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2568 }
2569 #endif // COMPILER2
2570
2571 //------------------------------generate_handler_blob------
2572 //
2573 // Generate a special Compile2Runtime blob that saves all registers,
2574 // setup oopmap, and calls safepoint code to stop the compiled code for
2575 // a safepoint.
2576 //
2577 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2578
2579 // Account for thread arg in our frame
2580 const int additional_words = 1;
2581 int frame_size_in_words;
2582
2583 assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2584
2585 ResourceMark rm;
2586 OopMapSet *oop_maps = new OopMapSet();
2587 OopMap* map;
2588
2589 // allocate space for the code
2590 // setup code generation tools
2591 CodeBuffer buffer("handler_blob", 2048, 1024);
2592 MacroAssembler* masm = new MacroAssembler(&buffer);
2593
2594 const Register java_thread = rdi; // callee-saved for VC++
2595 address start = __ pc();
2596 address call_pc = nullptr;
2597 bool cause_return = (poll_type == POLL_AT_RETURN);
2598 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2599
2600 // If cause_return is true we are at a poll_return and there is
2601 // the return address on the stack to the caller on the nmethod
2602 // that is safepoint. We can leave this return on the stack and
2603 // effectively complete the return and safepoint in the caller.
2604 // Otherwise we push space for a return address that the safepoint
2605 // handler will install later to make the stack walking sensible.
2606 if (!cause_return)
2607 __ push(rbx); // Make room for return address (or push it again)
2608
2609 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
2610
2611 // The following is basically a call_VM. However, we need the precise
2612 // address of the call in order to generate an oopmap. Hence, we do all the
2613 // work ourselves.
2614
2615 // Push thread argument and setup last_Java_sp
2616 __ get_thread(java_thread);
2617 __ push(java_thread);
2618 __ set_last_Java_frame(java_thread, noreg, noreg, nullptr, noreg);
2619
2620 // if this was not a poll_return then we need to correct the return address now.
2621 if (!cause_return) {
2622 // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
2623 // Additionally, rbx is a callee saved register and we can look at it later to determine
2624 // if someone changed the return address for us!
2625 __ movptr(rbx, Address(java_thread, JavaThread::saved_exception_pc_offset()));
2626 __ movptr(Address(rbp, wordSize), rbx);
2627 }
2628
2629 // do the call
2630 __ call(RuntimeAddress(call_ptr));
2631
2632 // Set an oopmap for the call site. This oopmap will map all
2633 // oop-registers and debug-info registers as callee-saved. This
2634 // will allow deoptimization at this safepoint to find all possible
2635 // debug-info recordings, as well as let GC find all oops.
2636
2637 oop_maps->add_gc_map( __ pc() - start, map);
2638
2639 // Discard arg
2640 __ pop(rcx);
2641
2642 Label noException;
2643
2644 // Clear last_Java_sp again
2645 __ get_thread(java_thread);
2646 __ reset_last_Java_frame(java_thread, false);
2647
2648 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
2649 __ jcc(Assembler::equal, noException);
2650
2651 // Exception pending
2652 RegisterSaver::restore_live_registers(masm, save_vectors);
2653
2654 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2655
2656 __ bind(noException);
2657
2658 Label no_adjust, bail, not_special;
2659 if (!cause_return) {
2660 // If our stashed return pc was modified by the runtime we avoid touching it
2661 __ cmpptr(rbx, Address(rbp, wordSize));
2662 __ jccb(Assembler::notEqual, no_adjust);
2663
2664 // Skip over the poll instruction.
2665 // See NativeInstruction::is_safepoint_poll()
2666 // Possible encodings:
2667 // 85 00 test %eax,(%rax)
2668 // 85 01 test %eax,(%rcx)
2669 // 85 02 test %eax,(%rdx)
2670 // 85 03 test %eax,(%rbx)
2671 // 85 06 test %eax,(%rsi)
2672 // 85 07 test %eax,(%rdi)
2673 //
2674 // 85 04 24 test %eax,(%rsp)
2675 // 85 45 00 test %eax,0x0(%rbp)
2676
2677 #ifdef ASSERT
2678 __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
2679 #endif
2680 // rsp/rbp base encoding takes 3 bytes with the following register values:
2681 // rsp 0x04
2682 // rbp 0x05
2683 __ movzbl(rcx, Address(rbx, 1));
2684 __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
2685 __ subptr(rcx, 4); // looking for 0x00 .. 0x01
2686 __ cmpptr(rcx, 1);
2687 __ jcc(Assembler::above, not_special);
2688 __ addptr(rbx, 1);
2689 __ bind(not_special);
2690 #ifdef ASSERT
2691 // Verify the correct encoding of the poll we're about to skip.
2692 __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
2693 __ jcc(Assembler::notEqual, bail);
2694 // Mask out the modrm bits
2695 __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
2696 // rax encodes to 0, so if the bits are nonzero it's incorrect
2697 __ jcc(Assembler::notZero, bail);
2698 #endif
2699 // Adjust return pc forward to step over the safepoint poll instruction
2700 __ addptr(rbx, 2);
2701 __ movptr(Address(rbp, wordSize), rbx);
2702 }
2703
2704 __ bind(no_adjust);
2705 // Normal exit, register restoring and exit
2706 RegisterSaver::restore_live_registers(masm, save_vectors);
2707
2708 __ ret(0);
2709
2710 #ifdef ASSERT
2711 __ bind(bail);
2712 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2713 #endif
2714
2715 // make sure all code is generated
2716 masm->flush();
2717
2718 // Fill-out other meta info
2719 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2720 }
2721
2722 //
2723 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2724 //
2725 // Generate a stub that calls into vm to find out the proper destination
2726 // of a java call. All the argument registers are live at this point
2727 // but since this is generic code we don't know what they are and the caller
2728 // must do any gc of the args.
2729 //
2730 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2731 assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2732
2733 // allocate space for the code
2734 ResourceMark rm;
2735
2736 CodeBuffer buffer(name, 1000, 512);
2737 MacroAssembler* masm = new MacroAssembler(&buffer);
2738
2739 int frame_size_words;
2740 enum frame_layout {
2741 thread_off,
2742 extra_words };
2743
2744 OopMapSet *oop_maps = new OopMapSet();
2745 OopMap* map = nullptr;
2746
2747 int start = __ offset();
2748
2749 map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
2750
2751 int frame_complete = __ offset();
2752
2753 const Register thread = rdi;
2754 __ get_thread(rdi);
2755
2756 __ push(thread);
2757 __ set_last_Java_frame(thread, noreg, rbp, nullptr, noreg);
2758
2759 __ call(RuntimeAddress(destination));
2760
2761
2762 // Set an oopmap for the call site.
2763 // We need this not only for callee-saved registers, but also for volatile
2764 // registers that the compiler might be keeping live across a safepoint.
2765
2766 oop_maps->add_gc_map( __ offset() - start, map);
2767
2768 // rax, contains the address we are going to jump to assuming no exception got installed
2769
2770 __ addptr(rsp, wordSize);
2771
2772 // clear last_Java_sp
2773 __ reset_last_Java_frame(thread, true);
2774 // check for pending exceptions
2775 Label pending;
2776 __ cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
2777 __ jcc(Assembler::notEqual, pending);
2778
2779 // get the returned Method*
2780 __ get_vm_result_2(rbx, thread);
2781 __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
2782
2783 __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
2784
2785 RegisterSaver::restore_live_registers(masm);
2786
2787 // We are back to the original state on entry and ready to go.
2788
2789 __ jmp(rax);
2790
2791 // Pending exception after the safepoint
2792
2793 __ bind(pending);
2794
2795 RegisterSaver::restore_live_registers(masm);
2796
2797 // exception pending => remove activation and forward to exception handler
2798
2799 __ get_thread(thread);
2800 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
2801 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
2802 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2803
2804 // -------------
2805 // make sure all code is generated
2806 masm->flush();
2807
2808 // return the blob
2809 // frame_size_words or bytes??
2810 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
2811 }
--- EOF ---