1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2021 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "frame_ppc.hpp" 32 #include "compiler/oopMap.hpp" 33 #include "gc/shared/gcLocker.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "interpreter/interp_masm.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/compiledICHolder.hpp" 38 #include "oops/klass.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/jniHandles.hpp" 41 #include "runtime/safepointMechanism.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/signature.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/align.hpp" 47 #include "utilities/macros.hpp" 48 #include "vmreg_ppc.inline.hpp" 49 #ifdef COMPILER1 50 #include "c1/c1_Runtime1.hpp" 51 #endif 52 #ifdef COMPILER2 53 #include "opto/ad.hpp" 54 #include "opto/runtime.hpp" 55 #endif 56 57 #include <alloca.h> 58 59 #define __ masm-> 60 61 #ifdef PRODUCT 62 #define BLOCK_COMMENT(str) // nothing 63 #else 64 #define BLOCK_COMMENT(str) __ block_comment(str) 65 #endif 66 67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 68 69 70 class RegisterSaver { 71 // Used for saving volatile registers. 72 public: 73 74 // Support different return pc locations. 75 enum ReturnPCLocation { 76 return_pc_is_lr, 77 return_pc_is_pre_saved, 78 return_pc_is_thread_saved_exception_pc 79 }; 80 81 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 82 int* out_frame_size_in_bytes, 83 bool generate_oop_map, 84 int return_pc_adjustment, 85 ReturnPCLocation return_pc_location, 86 bool save_vectors = false); 87 static void restore_live_registers_and_pop_frame(MacroAssembler* masm, 88 int frame_size_in_bytes, 89 bool restore_ctr, 90 bool save_vectors = false); 91 92 static void push_frame_and_save_argument_registers(MacroAssembler* masm, 93 Register r_temp, 94 int frame_size, 95 int total_args, 96 const VMRegPair *regs, const VMRegPair *regs2 = NULL); 97 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, 98 int frame_size, 99 int total_args, 100 const VMRegPair *regs, const VMRegPair *regs2 = NULL); 101 102 // During deoptimization only the result registers need to be restored 103 // all the other values have already been extracted. 104 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); 105 106 // Constants and data structures: 107 108 typedef enum { 109 int_reg, 110 float_reg, 111 special_reg, 112 vs_reg 113 } RegisterType; 114 115 typedef enum { 116 reg_size = 8, 117 half_reg_size = reg_size / 2, 118 vs_reg_size = 16 119 } RegisterConstants; 120 121 typedef struct { 122 RegisterType reg_type; 123 int reg_num; 124 VMReg vmreg; 125 } LiveRegType; 126 }; 127 128 129 #define RegisterSaver_LiveIntReg(regname) \ 130 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() } 131 132 #define RegisterSaver_LiveFloatReg(regname) \ 133 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() } 134 135 #define RegisterSaver_LiveSpecialReg(regname) \ 136 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } 137 138 #define RegisterSaver_LiveVSReg(regname) \ 139 { RegisterSaver::vs_reg, regname->encoding(), regname->as_VMReg() } 140 141 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { 142 // Live registers which get spilled to the stack. Register 143 // positions in this array correspond directly to the stack layout. 144 145 // 146 // live special registers: 147 // 148 RegisterSaver_LiveSpecialReg(SR_CTR), 149 // 150 // live float registers: 151 // 152 RegisterSaver_LiveFloatReg( F0 ), 153 RegisterSaver_LiveFloatReg( F1 ), 154 RegisterSaver_LiveFloatReg( F2 ), 155 RegisterSaver_LiveFloatReg( F3 ), 156 RegisterSaver_LiveFloatReg( F4 ), 157 RegisterSaver_LiveFloatReg( F5 ), 158 RegisterSaver_LiveFloatReg( F6 ), 159 RegisterSaver_LiveFloatReg( F7 ), 160 RegisterSaver_LiveFloatReg( F8 ), 161 RegisterSaver_LiveFloatReg( F9 ), 162 RegisterSaver_LiveFloatReg( F10 ), 163 RegisterSaver_LiveFloatReg( F11 ), 164 RegisterSaver_LiveFloatReg( F12 ), 165 RegisterSaver_LiveFloatReg( F13 ), 166 RegisterSaver_LiveFloatReg( F14 ), 167 RegisterSaver_LiveFloatReg( F15 ), 168 RegisterSaver_LiveFloatReg( F16 ), 169 RegisterSaver_LiveFloatReg( F17 ), 170 RegisterSaver_LiveFloatReg( F18 ), 171 RegisterSaver_LiveFloatReg( F19 ), 172 RegisterSaver_LiveFloatReg( F20 ), 173 RegisterSaver_LiveFloatReg( F21 ), 174 RegisterSaver_LiveFloatReg( F22 ), 175 RegisterSaver_LiveFloatReg( F23 ), 176 RegisterSaver_LiveFloatReg( F24 ), 177 RegisterSaver_LiveFloatReg( F25 ), 178 RegisterSaver_LiveFloatReg( F26 ), 179 RegisterSaver_LiveFloatReg( F27 ), 180 RegisterSaver_LiveFloatReg( F28 ), 181 RegisterSaver_LiveFloatReg( F29 ), 182 RegisterSaver_LiveFloatReg( F30 ), 183 RegisterSaver_LiveFloatReg( F31 ), 184 // 185 // live integer registers: 186 // 187 RegisterSaver_LiveIntReg( R0 ), 188 //RegisterSaver_LiveIntReg( R1 ), // stack pointer 189 RegisterSaver_LiveIntReg( R2 ), 190 RegisterSaver_LiveIntReg( R3 ), 191 RegisterSaver_LiveIntReg( R4 ), 192 RegisterSaver_LiveIntReg( R5 ), 193 RegisterSaver_LiveIntReg( R6 ), 194 RegisterSaver_LiveIntReg( R7 ), 195 RegisterSaver_LiveIntReg( R8 ), 196 RegisterSaver_LiveIntReg( R9 ), 197 RegisterSaver_LiveIntReg( R10 ), 198 RegisterSaver_LiveIntReg( R11 ), 199 RegisterSaver_LiveIntReg( R12 ), 200 //RegisterSaver_LiveIntReg( R13 ), // system thread id 201 RegisterSaver_LiveIntReg( R14 ), 202 RegisterSaver_LiveIntReg( R15 ), 203 RegisterSaver_LiveIntReg( R16 ), 204 RegisterSaver_LiveIntReg( R17 ), 205 RegisterSaver_LiveIntReg( R18 ), 206 RegisterSaver_LiveIntReg( R19 ), 207 RegisterSaver_LiveIntReg( R20 ), 208 RegisterSaver_LiveIntReg( R21 ), 209 RegisterSaver_LiveIntReg( R22 ), 210 RegisterSaver_LiveIntReg( R23 ), 211 RegisterSaver_LiveIntReg( R24 ), 212 RegisterSaver_LiveIntReg( R25 ), 213 RegisterSaver_LiveIntReg( R26 ), 214 RegisterSaver_LiveIntReg( R27 ), 215 RegisterSaver_LiveIntReg( R28 ), 216 RegisterSaver_LiveIntReg( R29 ), 217 RegisterSaver_LiveIntReg( R30 ), 218 RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below) 219 }; 220 221 static const RegisterSaver::LiveRegType RegisterSaver_LiveVSRegs[] = { 222 // 223 // live vector scalar registers (optional, only these ones are used by C2): 224 // 225 RegisterSaver_LiveVSReg( VSR32 ), 226 RegisterSaver_LiveVSReg( VSR33 ), 227 RegisterSaver_LiveVSReg( VSR34 ), 228 RegisterSaver_LiveVSReg( VSR35 ), 229 RegisterSaver_LiveVSReg( VSR36 ), 230 RegisterSaver_LiveVSReg( VSR37 ), 231 RegisterSaver_LiveVSReg( VSR38 ), 232 RegisterSaver_LiveVSReg( VSR39 ), 233 RegisterSaver_LiveVSReg( VSR40 ), 234 RegisterSaver_LiveVSReg( VSR41 ), 235 RegisterSaver_LiveVSReg( VSR42 ), 236 RegisterSaver_LiveVSReg( VSR43 ), 237 RegisterSaver_LiveVSReg( VSR44 ), 238 RegisterSaver_LiveVSReg( VSR45 ), 239 RegisterSaver_LiveVSReg( VSR46 ), 240 RegisterSaver_LiveVSReg( VSR47 ), 241 RegisterSaver_LiveVSReg( VSR48 ), 242 RegisterSaver_LiveVSReg( VSR49 ), 243 RegisterSaver_LiveVSReg( VSR50 ), 244 RegisterSaver_LiveVSReg( VSR51 ) 245 }; 246 247 248 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 249 int* out_frame_size_in_bytes, 250 bool generate_oop_map, 251 int return_pc_adjustment, 252 ReturnPCLocation return_pc_location, 253 bool save_vectors) { 254 // Push an abi_reg_args-frame and store all registers which may be live. 255 // If requested, create an OopMap: Record volatile registers as 256 // callee-save values in an OopMap so their save locations will be 257 // propagated to the RegisterMap of the caller frame during 258 // StackFrameStream construction (needed for deoptimization; see 259 // compiledVFrame::create_stack_value). 260 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment. 261 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved). 262 263 // calculate frame size 264 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 265 sizeof(RegisterSaver::LiveRegType); 266 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / 267 sizeof(RegisterSaver::LiveRegType)) 268 : 0; 269 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; 270 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) 271 + frame::abi_reg_args_size; 272 273 *out_frame_size_in_bytes = frame_size_in_bytes; 274 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 275 const int register_save_offset = frame_size_in_bytes - register_save_size; 276 277 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. 278 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL; 279 280 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); 281 282 // push a new frame 283 __ push_frame(frame_size_in_bytes, noreg); 284 285 // Save some registers in the last (non-vector) slots of the new frame so we 286 // can use them as scratch regs or to determine the return pc. 287 __ std(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); 288 __ std(R30, frame_size_in_bytes - 2*reg_size - vsregstosave_num * vs_reg_size, R1_SP); 289 290 // save the flags 291 // Do the save_LR_CR by hand and adjust the return pc if requested. 292 __ mfcr(R30); 293 __ std(R30, frame_size_in_bytes + _abi0(cr), R1_SP); 294 switch (return_pc_location) { 295 case return_pc_is_lr: __ mflr(R31); break; 296 case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break; 297 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break; 298 default: ShouldNotReachHere(); 299 } 300 if (return_pc_location != return_pc_is_pre_saved) { 301 if (return_pc_adjustment != 0) { 302 __ addi(R31, R31, return_pc_adjustment); 303 } 304 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 305 } 306 307 // save all registers (ints and floats) 308 int offset = register_save_offset; 309 310 for (int i = 0; i < regstosave_num; i++) { 311 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 312 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 313 314 switch (reg_type) { 315 case RegisterSaver::int_reg: { 316 if (reg_num < 30) { // We spilled R30-31 right at the beginning. 317 __ std(as_Register(reg_num), offset, R1_SP); 318 } 319 break; 320 } 321 case RegisterSaver::float_reg: { 322 __ stfd(as_FloatRegister(reg_num), offset, R1_SP); 323 break; 324 } 325 case RegisterSaver::special_reg: { 326 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { 327 __ mfctr(R30); 328 __ std(R30, offset, R1_SP); 329 } else { 330 Unimplemented(); 331 } 332 break; 333 } 334 default: 335 ShouldNotReachHere(); 336 } 337 338 if (generate_oop_map) { 339 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 340 RegisterSaver_LiveRegs[i].vmreg); 341 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), 342 RegisterSaver_LiveRegs[i].vmreg->next()); 343 } 344 offset += reg_size; 345 } 346 347 for (int i = 0; i < vsregstosave_num; i++) { 348 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; 349 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type; 350 351 __ li(R30, offset); 352 __ stxvd2x(as_VectorSRegister(reg_num), R30, R1_SP); 353 354 if (generate_oop_map) { 355 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 356 RegisterSaver_LiveVSRegs[i].vmreg); 357 } 358 offset += vs_reg_size; 359 } 360 361 assert(offset == frame_size_in_bytes, "consistency check"); 362 363 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers"); 364 365 // And we're done. 366 return map; 367 } 368 369 370 // Pop the current frame and restore all the registers that we 371 // saved. 372 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, 373 int frame_size_in_bytes, 374 bool restore_ctr, 375 bool save_vectors) { 376 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 377 sizeof(RegisterSaver::LiveRegType); 378 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / 379 sizeof(RegisterSaver::LiveRegType)) 380 : 0; 381 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; 382 383 const int register_save_offset = frame_size_in_bytes - register_save_size; 384 385 BLOCK_COMMENT("restore_live_registers_and_pop_frame {"); 386 387 // restore all registers (ints and floats) 388 int offset = register_save_offset; 389 390 for (int i = 0; i < regstosave_num; i++) { 391 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 392 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 393 394 switch (reg_type) { 395 case RegisterSaver::int_reg: { 396 if (reg_num != 31) // R31 restored at the end, it's the tmp reg! 397 __ ld(as_Register(reg_num), offset, R1_SP); 398 break; 399 } 400 case RegisterSaver::float_reg: { 401 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 402 break; 403 } 404 case RegisterSaver::special_reg: { 405 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { 406 if (restore_ctr) { // Nothing to do here if ctr already contains the next address. 407 __ ld(R31, offset, R1_SP); 408 __ mtctr(R31); 409 } 410 } else { 411 Unimplemented(); 412 } 413 break; 414 } 415 default: 416 ShouldNotReachHere(); 417 } 418 offset += reg_size; 419 } 420 421 for (int i = 0; i < vsregstosave_num; i++) { 422 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; 423 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type; 424 425 __ li(R31, offset); 426 __ lxvd2x(as_VectorSRegister(reg_num), R31, R1_SP); 427 428 offset += vs_reg_size; 429 } 430 431 assert(offset == frame_size_in_bytes, "consistency check"); 432 433 // restore link and the flags 434 __ ld(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 435 __ mtlr(R31); 436 437 __ ld(R31, frame_size_in_bytes + _abi0(cr), R1_SP); 438 __ mtcr(R31); 439 440 // restore scratch register's value 441 __ ld(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); 442 443 // pop the frame 444 __ addi(R1_SP, R1_SP, frame_size_in_bytes); 445 446 BLOCK_COMMENT("} restore_live_registers_and_pop_frame"); 447 } 448 449 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp, 450 int frame_size,int total_args, const VMRegPair *regs, 451 const VMRegPair *regs2) { 452 __ push_frame(frame_size, r_temp); 453 int st_off = frame_size - wordSize; 454 for (int i = 0; i < total_args; i++) { 455 VMReg r_1 = regs[i].first(); 456 VMReg r_2 = regs[i].second(); 457 if (!r_1->is_valid()) { 458 assert(!r_2->is_valid(), ""); 459 continue; 460 } 461 if (r_1->is_Register()) { 462 Register r = r_1->as_Register(); 463 __ std(r, st_off, R1_SP); 464 st_off -= wordSize; 465 } else if (r_1->is_FloatRegister()) { 466 FloatRegister f = r_1->as_FloatRegister(); 467 __ stfd(f, st_off, R1_SP); 468 st_off -= wordSize; 469 } 470 } 471 if (regs2 != NULL) { 472 for (int i = 0; i < total_args; i++) { 473 VMReg r_1 = regs2[i].first(); 474 VMReg r_2 = regs2[i].second(); 475 if (!r_1->is_valid()) { 476 assert(!r_2->is_valid(), ""); 477 continue; 478 } 479 if (r_1->is_Register()) { 480 Register r = r_1->as_Register(); 481 __ std(r, st_off, R1_SP); 482 st_off -= wordSize; 483 } else if (r_1->is_FloatRegister()) { 484 FloatRegister f = r_1->as_FloatRegister(); 485 __ stfd(f, st_off, R1_SP); 486 st_off -= wordSize; 487 } 488 } 489 } 490 } 491 492 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size, 493 int total_args, const VMRegPair *regs, 494 const VMRegPair *regs2) { 495 int st_off = frame_size - wordSize; 496 for (int i = 0; i < total_args; i++) { 497 VMReg r_1 = regs[i].first(); 498 VMReg r_2 = regs[i].second(); 499 if (r_1->is_Register()) { 500 Register r = r_1->as_Register(); 501 __ ld(r, st_off, R1_SP); 502 st_off -= wordSize; 503 } else if (r_1->is_FloatRegister()) { 504 FloatRegister f = r_1->as_FloatRegister(); 505 __ lfd(f, st_off, R1_SP); 506 st_off -= wordSize; 507 } 508 } 509 if (regs2 != NULL) 510 for (int i = 0; i < total_args; i++) { 511 VMReg r_1 = regs2[i].first(); 512 VMReg r_2 = regs2[i].second(); 513 if (r_1->is_Register()) { 514 Register r = r_1->as_Register(); 515 __ ld(r, st_off, R1_SP); 516 st_off -= wordSize; 517 } else if (r_1->is_FloatRegister()) { 518 FloatRegister f = r_1->as_FloatRegister(); 519 __ lfd(f, st_off, R1_SP); 520 st_off -= wordSize; 521 } 522 } 523 __ pop_frame(); 524 } 525 526 // Restore the registers that might be holding a result. 527 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { 528 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 529 sizeof(RegisterSaver::LiveRegType); 530 const int register_save_size = regstosave_num * reg_size; // VS registers not relevant here. 531 const int register_save_offset = frame_size_in_bytes - register_save_size; 532 533 // restore all result registers (ints and floats) 534 int offset = register_save_offset; 535 for (int i = 0; i < regstosave_num; i++) { 536 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 537 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 538 switch (reg_type) { 539 case RegisterSaver::int_reg: { 540 if (as_Register(reg_num)==R3_RET) // int result_reg 541 __ ld(as_Register(reg_num), offset, R1_SP); 542 break; 543 } 544 case RegisterSaver::float_reg: { 545 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg 546 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 547 break; 548 } 549 case RegisterSaver::special_reg: { 550 // Special registers don't hold a result. 551 break; 552 } 553 default: 554 ShouldNotReachHere(); 555 } 556 offset += reg_size; 557 } 558 559 assert(offset == frame_size_in_bytes, "consistency check"); 560 } 561 562 // Is vector's size (in bytes) bigger than a size saved by default? 563 bool SharedRuntime::is_wide_vector(int size) { 564 // Note, MaxVectorSize == 8/16 on PPC64. 565 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size); 566 return size > 8; 567 } 568 569 static int reg2slot(VMReg r) { 570 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 571 } 572 573 static int reg2offset(VMReg r) { 574 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 575 } 576 577 // --------------------------------------------------------------------------- 578 // Read the array of BasicTypes from a signature, and compute where the 579 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 580 // quantities. Values less than VMRegImpl::stack0 are registers, those above 581 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 582 // as framesizes are fixed. 583 // VMRegImpl::stack0 refers to the first slot 0(sp). 584 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register 585 // up to RegisterImpl::number_of_registers) are the 64-bit 586 // integer registers. 587 588 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 589 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 590 // units regardless of build. Of course for i486 there is no 64 bit build 591 592 // The Java calling convention is a "shifted" version of the C ABI. 593 // By skipping the first C ABI register we can call non-static jni methods 594 // with small numbers of arguments without having to shuffle the arguments 595 // at all. Since we control the java ABI we ought to at least get some 596 // advantage out of it. 597 598 const VMReg java_iarg_reg[8] = { 599 R3->as_VMReg(), 600 R4->as_VMReg(), 601 R5->as_VMReg(), 602 R6->as_VMReg(), 603 R7->as_VMReg(), 604 R8->as_VMReg(), 605 R9->as_VMReg(), 606 R10->as_VMReg() 607 }; 608 609 const VMReg java_farg_reg[13] = { 610 F1->as_VMReg(), 611 F2->as_VMReg(), 612 F3->as_VMReg(), 613 F4->as_VMReg(), 614 F5->as_VMReg(), 615 F6->as_VMReg(), 616 F7->as_VMReg(), 617 F8->as_VMReg(), 618 F9->as_VMReg(), 619 F10->as_VMReg(), 620 F11->as_VMReg(), 621 F12->as_VMReg(), 622 F13->as_VMReg() 623 }; 624 625 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]); 626 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]); 627 628 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 629 VMRegPair *regs, 630 int total_args_passed) { 631 // C2c calling conventions for compiled-compiled calls. 632 // Put 8 ints/longs into registers _AND_ 13 float/doubles into 633 // registers _AND_ put the rest on the stack. 634 635 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats 636 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 637 638 int i; 639 VMReg reg; 640 int stk = 0; 641 int ireg = 0; 642 int freg = 0; 643 644 // We put the first 8 arguments into registers and the rest on the 645 // stack, float arguments are already in their argument registers 646 // due to c2c calling conventions (see calling_convention). 647 for (int i = 0; i < total_args_passed; ++i) { 648 switch(sig_bt[i]) { 649 case T_BOOLEAN: 650 case T_CHAR: 651 case T_BYTE: 652 case T_SHORT: 653 case T_INT: 654 if (ireg < num_java_iarg_registers) { 655 // Put int/ptr in register 656 reg = java_iarg_reg[ireg]; 657 ++ireg; 658 } else { 659 // Put int/ptr on stack. 660 reg = VMRegImpl::stack2reg(stk); 661 stk += inc_stk_for_intfloat; 662 } 663 regs[i].set1(reg); 664 break; 665 case T_LONG: 666 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 667 if (ireg < num_java_iarg_registers) { 668 // Put long in register. 669 reg = java_iarg_reg[ireg]; 670 ++ireg; 671 } else { 672 // Put long on stack. They must be aligned to 2 slots. 673 if (stk & 0x1) ++stk; 674 reg = VMRegImpl::stack2reg(stk); 675 stk += inc_stk_for_longdouble; 676 } 677 regs[i].set2(reg); 678 break; 679 case T_OBJECT: 680 case T_ARRAY: 681 case T_ADDRESS: 682 if (ireg < num_java_iarg_registers) { 683 // Put ptr in register. 684 reg = java_iarg_reg[ireg]; 685 ++ireg; 686 } else { 687 // Put ptr on stack. Objects must be aligned to 2 slots too, 688 // because "64-bit pointers record oop-ishness on 2 aligned 689 // adjacent registers." (see OopFlow::build_oop_map). 690 if (stk & 0x1) ++stk; 691 reg = VMRegImpl::stack2reg(stk); 692 stk += inc_stk_for_longdouble; 693 } 694 regs[i].set2(reg); 695 break; 696 case T_FLOAT: 697 if (freg < num_java_farg_registers) { 698 // Put float in register. 699 reg = java_farg_reg[freg]; 700 ++freg; 701 } else { 702 // Put float on stack. 703 reg = VMRegImpl::stack2reg(stk); 704 stk += inc_stk_for_intfloat; 705 } 706 regs[i].set1(reg); 707 break; 708 case T_DOUBLE: 709 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 710 if (freg < num_java_farg_registers) { 711 // Put double in register. 712 reg = java_farg_reg[freg]; 713 ++freg; 714 } else { 715 // Put double on stack. They must be aligned to 2 slots. 716 if (stk & 0x1) ++stk; 717 reg = VMRegImpl::stack2reg(stk); 718 stk += inc_stk_for_longdouble; 719 } 720 regs[i].set2(reg); 721 break; 722 case T_VOID: 723 // Do not count halves. 724 regs[i].set_bad(); 725 break; 726 default: 727 ShouldNotReachHere(); 728 } 729 } 730 return align_up(stk, 2); 731 } 732 733 #if defined(COMPILER1) || defined(COMPILER2) 734 // Calling convention for calling C code. 735 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 736 VMRegPair *regs, 737 VMRegPair *regs2, 738 int total_args_passed) { 739 // Calling conventions for C runtime calls and calls to JNI native methods. 740 // 741 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8 742 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist 743 // the first 13 flt/dbl's in the first 13 fp regs but additionally 744 // copy flt/dbl to the stack if they are beyond the 8th argument. 745 746 const VMReg iarg_reg[8] = { 747 R3->as_VMReg(), 748 R4->as_VMReg(), 749 R5->as_VMReg(), 750 R6->as_VMReg(), 751 R7->as_VMReg(), 752 R8->as_VMReg(), 753 R9->as_VMReg(), 754 R10->as_VMReg() 755 }; 756 757 const VMReg farg_reg[13] = { 758 F1->as_VMReg(), 759 F2->as_VMReg(), 760 F3->as_VMReg(), 761 F4->as_VMReg(), 762 F5->as_VMReg(), 763 F6->as_VMReg(), 764 F7->as_VMReg(), 765 F8->as_VMReg(), 766 F9->as_VMReg(), 767 F10->as_VMReg(), 768 F11->as_VMReg(), 769 F12->as_VMReg(), 770 F13->as_VMReg() 771 }; 772 773 // Check calling conventions consistency. 774 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c && 775 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c, 776 "consistency"); 777 778 // `Stk' counts stack slots. Due to alignment, 32 bit values occupy 779 // 2 such slots, like 64 bit values do. 780 const int inc_stk_for_intfloat = 2; // 2 slots for ints and floats 781 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 782 783 int i; 784 VMReg reg; 785 // Leave room for C-compatible ABI_REG_ARGS. 786 int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size; 787 int arg = 0; 788 int freg = 0; 789 790 // Avoid passing C arguments in the wrong stack slots. 791 #if defined(ABI_ELFv2) 792 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96, 793 "passing C arguments in wrong stack slots"); 794 #else 795 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112, 796 "passing C arguments in wrong stack slots"); 797 #endif 798 // We fill-out regs AND regs2 if an argument must be passed in a 799 // register AND in a stack slot. If regs2 is NULL in such a 800 // situation, we bail-out with a fatal error. 801 for (int i = 0; i < total_args_passed; ++i, ++arg) { 802 // Initialize regs2 to BAD. 803 if (regs2 != NULL) regs2[i].set_bad(); 804 805 switch(sig_bt[i]) { 806 807 // 808 // If arguments 0-7 are integers, they are passed in integer registers. 809 // Argument i is placed in iarg_reg[i]. 810 // 811 case T_BOOLEAN: 812 case T_CHAR: 813 case T_BYTE: 814 case T_SHORT: 815 case T_INT: 816 // We must cast ints to longs and use full 64 bit stack slots 817 // here. Thus fall through, handle as long. 818 case T_LONG: 819 case T_OBJECT: 820 case T_ARRAY: 821 case T_ADDRESS: 822 case T_METADATA: 823 // Oops are already boxed if required (JNI). 824 if (arg < Argument::n_int_register_parameters_c) { 825 reg = iarg_reg[arg]; 826 } else { 827 reg = VMRegImpl::stack2reg(stk); 828 stk += inc_stk_for_longdouble; 829 } 830 regs[i].set2(reg); 831 break; 832 833 // 834 // Floats are treated differently from int regs: The first 13 float arguments 835 // are passed in registers (not the float args among the first 13 args). 836 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed 837 // in farg_reg[j] if argument i is the j-th float argument of this call. 838 // 839 case T_FLOAT: 840 #if defined(LINUX) 841 // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float 842 // in the least significant word of an argument slot. 843 #if defined(VM_LITTLE_ENDIAN) 844 #define FLOAT_WORD_OFFSET_IN_SLOT 0 845 #else 846 #define FLOAT_WORD_OFFSET_IN_SLOT 1 847 #endif 848 #elif defined(AIX) 849 // Although AIX runs on big endian CPU, float is in the most 850 // significant word of an argument slot. 851 #define FLOAT_WORD_OFFSET_IN_SLOT 0 852 #else 853 #error "unknown OS" 854 #endif 855 if (freg < Argument::n_float_register_parameters_c) { 856 // Put float in register ... 857 reg = farg_reg[freg]; 858 ++freg; 859 860 // Argument i for i > 8 is placed on the stack even if it's 861 // placed in a register (if it's a float arg). Aix disassembly 862 // shows that xlC places these float args on the stack AND in 863 // a register. This is not documented, but we follow this 864 // convention, too. 865 if (arg >= Argument::n_regs_not_on_stack_c) { 866 // ... and on the stack. 867 guarantee(regs2 != NULL, "must pass float in register and stack slot"); 868 VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); 869 regs2[i].set1(reg2); 870 stk += inc_stk_for_intfloat; 871 } 872 873 } else { 874 // Put float on stack. 875 reg = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); 876 stk += inc_stk_for_intfloat; 877 } 878 regs[i].set1(reg); 879 break; 880 case T_DOUBLE: 881 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 882 if (freg < Argument::n_float_register_parameters_c) { 883 // Put double in register ... 884 reg = farg_reg[freg]; 885 ++freg; 886 887 // Argument i for i > 8 is placed on the stack even if it's 888 // placed in a register (if it's a double arg). Aix disassembly 889 // shows that xlC places these float args on the stack AND in 890 // a register. This is not documented, but we follow this 891 // convention, too. 892 if (arg >= Argument::n_regs_not_on_stack_c) { 893 // ... and on the stack. 894 guarantee(regs2 != NULL, "must pass float in register and stack slot"); 895 VMReg reg2 = VMRegImpl::stack2reg(stk); 896 regs2[i].set2(reg2); 897 stk += inc_stk_for_longdouble; 898 } 899 } else { 900 // Put double on stack. 901 reg = VMRegImpl::stack2reg(stk); 902 stk += inc_stk_for_longdouble; 903 } 904 regs[i].set2(reg); 905 break; 906 907 case T_VOID: 908 // Do not count halves. 909 regs[i].set_bad(); 910 --arg; 911 break; 912 default: 913 ShouldNotReachHere(); 914 } 915 } 916 917 return align_up(stk, 2); 918 } 919 #endif // COMPILER2 920 921 int SharedRuntime::vector_calling_convention(VMRegPair *regs, 922 uint num_bits, 923 uint total_args_passed) { 924 Unimplemented(); 925 return 0; 926 } 927 928 static address gen_c2i_adapter(MacroAssembler *masm, 929 int total_args_passed, 930 int comp_args_on_stack, 931 const BasicType *sig_bt, 932 const VMRegPair *regs, 933 Label& call_interpreter, 934 const Register& ientry) { 935 936 address c2i_entrypoint; 937 938 const Register sender_SP = R21_sender_SP; // == R21_tmp1 939 const Register code = R22_tmp2; 940 //const Register ientry = R23_tmp3; 941 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 }; 942 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 943 int value_regs_index = 0; 944 945 const Register return_pc = R27_tmp7; 946 const Register tmp = R28_tmp8; 947 948 assert_different_registers(sender_SP, code, ientry, return_pc, tmp); 949 950 // Adapter needs TOP_IJAVA_FRAME_ABI. 951 const int adapter_size = frame::top_ijava_frame_abi_size + 952 align_up(total_args_passed * wordSize, frame::alignment_in_bytes); 953 954 // regular (verified) c2i entry point 955 c2i_entrypoint = __ pc(); 956 957 // Does compiled code exists? If yes, patch the caller's callsite. 958 __ ld(code, method_(code)); 959 __ cmpdi(CCR0, code, 0); 960 __ ld(ientry, method_(interpreter_entry)); // preloaded 961 __ beq(CCR0, call_interpreter); 962 963 964 // Patch caller's callsite, method_(code) was not NULL which means that 965 // compiled code exists. 966 __ mflr(return_pc); 967 __ std(return_pc, _abi0(lr), R1_SP); 968 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs); 969 970 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc); 971 972 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs); 973 __ ld(return_pc, _abi0(lr), R1_SP); 974 __ ld(ientry, method_(interpreter_entry)); // preloaded 975 __ mtlr(return_pc); 976 977 978 // Call the interpreter. 979 __ BIND(call_interpreter); 980 __ mtctr(ientry); 981 982 // Get a copy of the current SP for loading caller's arguments. 983 __ mr(sender_SP, R1_SP); 984 985 // Add space for the adapter. 986 __ resize_frame(-adapter_size, R12_scratch2); 987 988 int st_off = adapter_size - wordSize; 989 990 // Write the args into the outgoing interpreter space. 991 for (int i = 0; i < total_args_passed; i++) { 992 VMReg r_1 = regs[i].first(); 993 VMReg r_2 = regs[i].second(); 994 if (!r_1->is_valid()) { 995 assert(!r_2->is_valid(), ""); 996 continue; 997 } 998 if (r_1->is_stack()) { 999 Register tmp_reg = value_regs[value_regs_index]; 1000 value_regs_index = (value_regs_index + 1) % num_value_regs; 1001 // The calling convention produces OptoRegs that ignore the out 1002 // preserve area (JIT's ABI). We must account for it here. 1003 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 1004 if (!r_2->is_valid()) { 1005 __ lwz(tmp_reg, ld_off, sender_SP); 1006 } else { 1007 __ ld(tmp_reg, ld_off, sender_SP); 1008 } 1009 // Pretend stack targets were loaded into tmp_reg. 1010 r_1 = tmp_reg->as_VMReg(); 1011 } 1012 1013 if (r_1->is_Register()) { 1014 Register r = r_1->as_Register(); 1015 if (!r_2->is_valid()) { 1016 __ stw(r, st_off, R1_SP); 1017 st_off-=wordSize; 1018 } else { 1019 // Longs are given 2 64-bit slots in the interpreter, but the 1020 // data is passed in only 1 slot. 1021 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1022 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 1023 st_off-=wordSize; 1024 } 1025 __ std(r, st_off, R1_SP); 1026 st_off-=wordSize; 1027 } 1028 } else { 1029 assert(r_1->is_FloatRegister(), ""); 1030 FloatRegister f = r_1->as_FloatRegister(); 1031 if (!r_2->is_valid()) { 1032 __ stfs(f, st_off, R1_SP); 1033 st_off-=wordSize; 1034 } else { 1035 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the 1036 // data is passed in only 1 slot. 1037 // One of these should get known junk... 1038 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 1039 st_off-=wordSize; 1040 __ stfd(f, st_off, R1_SP); 1041 st_off-=wordSize; 1042 } 1043 } 1044 } 1045 1046 // Jump to the interpreter just as if interpreter was doing it. 1047 1048 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 1049 1050 // load TOS 1051 __ addi(R15_esp, R1_SP, st_off); 1052 1053 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1. 1054 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register"); 1055 __ bctr(); 1056 1057 return c2i_entrypoint; 1058 } 1059 1060 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 1061 int total_args_passed, 1062 int comp_args_on_stack, 1063 const BasicType *sig_bt, 1064 const VMRegPair *regs) { 1065 1066 // Load method's entry-point from method. 1067 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 1068 __ mtctr(R12_scratch2); 1069 1070 // We will only enter here from an interpreted frame and never from after 1071 // passing thru a c2i. Azul allowed this but we do not. If we lose the 1072 // race and use a c2i we will remain interpreted for the race loser(s). 1073 // This removes all sorts of headaches on the x86 side and also eliminates 1074 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 1075 1076 // Note: r13 contains the senderSP on entry. We must preserve it since 1077 // we may do a i2c -> c2i transition if we lose a race where compiled 1078 // code goes non-entrant while we get args ready. 1079 // In addition we use r13 to locate all the interpreter args as 1080 // we must align the stack to 16 bytes on an i2c entry else we 1081 // lose alignment we expect in all compiled code and register 1082 // save code can segv when fxsave instructions find improperly 1083 // aligned stack pointer. 1084 1085 const Register ld_ptr = R15_esp; 1086 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 }; 1087 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 1088 int value_regs_index = 0; 1089 1090 int ld_offset = total_args_passed*wordSize; 1091 1092 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 1093 // in registers, we will occasionally have no stack args. 1094 int comp_words_on_stack = 0; 1095 if (comp_args_on_stack) { 1096 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in 1097 // registers are below. By subtracting stack0, we either get a negative 1098 // number (all values in registers) or the maximum stack slot accessed. 1099 1100 // Convert 4-byte c2 stack slots to words. 1101 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1102 // Round up to miminum stack alignment, in wordSize. 1103 comp_words_on_stack = align_up(comp_words_on_stack, 2); 1104 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1); 1105 } 1106 1107 // Now generate the shuffle code. Pick up all register args and move the 1108 // rest through register value=Z_R12. 1109 BLOCK_COMMENT("Shuffle arguments"); 1110 for (int i = 0; i < total_args_passed; i++) { 1111 if (sig_bt[i] == T_VOID) { 1112 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1113 continue; 1114 } 1115 1116 // Pick up 0, 1 or 2 words from ld_ptr. 1117 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 1118 "scrambled load targets?"); 1119 VMReg r_1 = regs[i].first(); 1120 VMReg r_2 = regs[i].second(); 1121 if (!r_1->is_valid()) { 1122 assert(!r_2->is_valid(), ""); 1123 continue; 1124 } 1125 if (r_1->is_FloatRegister()) { 1126 if (!r_2->is_valid()) { 1127 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr); 1128 ld_offset-=wordSize; 1129 } else { 1130 // Skip the unused interpreter slot. 1131 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr); 1132 ld_offset-=2*wordSize; 1133 } 1134 } else { 1135 Register r; 1136 if (r_1->is_stack()) { 1137 // Must do a memory to memory move thru "value". 1138 r = value_regs[value_regs_index]; 1139 value_regs_index = (value_regs_index + 1) % num_value_regs; 1140 } else { 1141 r = r_1->as_Register(); 1142 } 1143 if (!r_2->is_valid()) { 1144 // Not sure we need to do this but it shouldn't hurt. 1145 if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) { 1146 __ ld(r, ld_offset, ld_ptr); 1147 ld_offset-=wordSize; 1148 } else { 1149 __ lwz(r, ld_offset, ld_ptr); 1150 ld_offset-=wordSize; 1151 } 1152 } else { 1153 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the 1154 // data is passed in only 1 slot. 1155 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1156 ld_offset-=wordSize; 1157 } 1158 __ ld(r, ld_offset, ld_ptr); 1159 ld_offset-=wordSize; 1160 } 1161 1162 if (r_1->is_stack()) { 1163 // Now store value where the compiler expects it 1164 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size; 1165 1166 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN || 1167 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) { 1168 __ stw(r, st_off, R1_SP); 1169 } else { 1170 __ std(r, st_off, R1_SP); 1171 } 1172 } 1173 } 1174 } 1175 1176 BLOCK_COMMENT("Store method"); 1177 // Store method into thread->callee_target. 1178 // We might end up in handle_wrong_method if the callee is 1179 // deoptimized as we race thru here. If that happens we don't want 1180 // to take a safepoint because the caller frame will look 1181 // interpreted and arguments are now "compiled" so it is much better 1182 // to make this transition invisible to the stack walking 1183 // code. Unfortunately if we try and find the callee by normal means 1184 // a safepoint is possible. So we stash the desired callee in the 1185 // thread and the vm will find there should this case occur. 1186 __ std(R19_method, thread_(callee_target)); 1187 1188 // Jump to the compiled code just as if compiled code was doing it. 1189 __ bctr(); 1190 } 1191 1192 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1193 int total_args_passed, 1194 int comp_args_on_stack, 1195 const BasicType *sig_bt, 1196 const VMRegPair *regs, 1197 AdapterFingerPrint* fingerprint) { 1198 address i2c_entry; 1199 address c2i_unverified_entry; 1200 address c2i_entry; 1201 1202 1203 // entry: i2c 1204 1205 __ align(CodeEntryAlignment); 1206 i2c_entry = __ pc(); 1207 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1208 1209 1210 // entry: c2i unverified 1211 1212 __ align(CodeEntryAlignment); 1213 BLOCK_COMMENT("c2i unverified entry"); 1214 c2i_unverified_entry = __ pc(); 1215 1216 // inline_cache contains a compiledICHolder 1217 const Register ic = R19_method; 1218 const Register ic_klass = R11_scratch1; 1219 const Register receiver_klass = R12_scratch2; 1220 const Register code = R21_tmp1; 1221 const Register ientry = R23_tmp3; 1222 1223 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry); 1224 assert(R11_scratch1 == R11, "need prologue scratch register"); 1225 1226 Label call_interpreter; 1227 1228 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), 1229 "klass offset should reach into any page"); 1230 // Check for NULL argument if we don't have implicit null checks. 1231 if (!ImplicitNullChecks || !os::zero_page_read_protected()) { 1232 if (TrapBasedNullChecks) { 1233 __ trap_null_check(R3_ARG1); 1234 } else { 1235 Label valid; 1236 __ cmpdi(CCR0, R3_ARG1, 0); 1237 __ bne_predict_taken(CCR0, valid); 1238 // We have a null argument, branch to ic_miss_stub. 1239 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 1240 relocInfo::runtime_call_type); 1241 __ BIND(valid); 1242 } 1243 } 1244 // Assume argument is not NULL, load klass from receiver. 1245 __ load_klass(receiver_klass, R3_ARG1); 1246 1247 __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic); 1248 1249 if (TrapBasedICMissChecks) { 1250 __ trap_ic_miss_check(receiver_klass, ic_klass); 1251 } else { 1252 Label valid; 1253 __ cmpd(CCR0, receiver_klass, ic_klass); 1254 __ beq_predict_taken(CCR0, valid); 1255 // We have an unexpected klass, branch to ic_miss_stub. 1256 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 1257 relocInfo::runtime_call_type); 1258 __ BIND(valid); 1259 } 1260 1261 // Argument is valid and klass is as expected, continue. 1262 1263 // Extract method from inline cache, verified entry point needs it. 1264 __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic); 1265 assert(R19_method == ic, "the inline cache register is dead here"); 1266 1267 __ ld(code, method_(code)); 1268 __ cmpdi(CCR0, code, 0); 1269 __ ld(ientry, method_(interpreter_entry)); // preloaded 1270 __ beq_predict_taken(CCR0, call_interpreter); 1271 1272 // Branch to ic_miss_stub. 1273 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); 1274 1275 // entry: c2i 1276 1277 c2i_entry = __ pc(); 1278 1279 // Class initialization barrier for static methods 1280 address c2i_no_clinit_check_entry = NULL; 1281 if (VM_Version::supports_fast_class_init_checks()) { 1282 Label L_skip_barrier; 1283 1284 { // Bypass the barrier for non-static methods 1285 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1286 __ andi_(R0, R0, JVM_ACC_STATIC); 1287 __ beq(CCR0, L_skip_barrier); // non-static 1288 } 1289 1290 Register klass = R11_scratch1; 1291 __ load_method_holder(klass, R19_method); 1292 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 1293 1294 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 1295 __ mtctr(klass); 1296 __ bctr(); 1297 1298 __ bind(L_skip_barrier); 1299 c2i_no_clinit_check_entry = __ pc(); 1300 } 1301 1302 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1303 bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code); 1304 1305 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry); 1306 1307 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, 1308 c2i_no_clinit_check_entry); 1309 } 1310 1311 // An oop arg. Must pass a handle not the oop itself. 1312 static void object_move(MacroAssembler* masm, 1313 int frame_size_in_slots, 1314 OopMap* oop_map, int oop_handle_offset, 1315 bool is_receiver, int* receiver_offset, 1316 VMRegPair src, VMRegPair dst, 1317 Register r_caller_sp, Register r_temp_1, Register r_temp_2) { 1318 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), 1319 "receiver has already been moved"); 1320 1321 // We must pass a handle. First figure out the location we use as a handle. 1322 1323 if (src.first()->is_stack()) { 1324 // stack to stack or reg 1325 1326 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1327 Label skip; 1328 const int oop_slot_in_callers_frame = reg2slot(src.first()); 1329 1330 guarantee(!is_receiver, "expecting receiver in register"); 1331 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots)); 1332 1333 __ addi(r_handle, r_caller_sp, reg2offset(src.first())); 1334 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); 1335 __ cmpdi(CCR0, r_temp_2, 0); 1336 __ bne(CCR0, skip); 1337 // Use a NULL handle if oop is NULL. 1338 __ li(r_handle, 0); 1339 __ bind(skip); 1340 1341 if (dst.first()->is_stack()) { 1342 // stack to stack 1343 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1344 } else { 1345 // stack to reg 1346 // Nothing to do, r_handle is already the dst register. 1347 } 1348 } else { 1349 // reg to stack or reg 1350 const Register r_oop = src.first()->as_Register(); 1351 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1352 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word 1353 + oop_handle_offset; // in slots 1354 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size; 1355 Label skip; 1356 1357 if (is_receiver) { 1358 *receiver_offset = oop_offset; 1359 } 1360 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1361 1362 __ std( r_oop, oop_offset, R1_SP); 1363 __ addi(r_handle, R1_SP, oop_offset); 1364 1365 __ cmpdi(CCR0, r_oop, 0); 1366 __ bne(CCR0, skip); 1367 // Use a NULL handle if oop is NULL. 1368 __ li(r_handle, 0); 1369 __ bind(skip); 1370 1371 if (dst.first()->is_stack()) { 1372 // reg to stack 1373 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1374 } else { 1375 // reg to reg 1376 // Nothing to do, r_handle is already the dst register. 1377 } 1378 } 1379 } 1380 1381 static void int_move(MacroAssembler*masm, 1382 VMRegPair src, VMRegPair dst, 1383 Register r_caller_sp, Register r_temp) { 1384 assert(src.first()->is_valid(), "incoming must be int"); 1385 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1386 1387 if (src.first()->is_stack()) { 1388 if (dst.first()->is_stack()) { 1389 // stack to stack 1390 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp); 1391 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1392 } else { 1393 // stack to reg 1394 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1395 } 1396 } else if (dst.first()->is_stack()) { 1397 // reg to stack 1398 __ extsw(r_temp, src.first()->as_Register()); 1399 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1400 } else { 1401 // reg to reg 1402 __ extsw(dst.first()->as_Register(), src.first()->as_Register()); 1403 } 1404 } 1405 1406 static void long_move(MacroAssembler*masm, 1407 VMRegPair src, VMRegPair dst, 1408 Register r_caller_sp, Register r_temp) { 1409 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long"); 1410 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1411 1412 if (src.first()->is_stack()) { 1413 if (dst.first()->is_stack()) { 1414 // stack to stack 1415 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1416 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1417 } else { 1418 // stack to reg 1419 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1420 } 1421 } else if (dst.first()->is_stack()) { 1422 // reg to stack 1423 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1424 } else { 1425 // reg to reg 1426 if (dst.first()->as_Register() != src.first()->as_Register()) 1427 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1428 } 1429 } 1430 1431 static void float_move(MacroAssembler*masm, 1432 VMRegPair src, VMRegPair dst, 1433 Register r_caller_sp, Register r_temp) { 1434 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float"); 1435 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float"); 1436 1437 if (src.first()->is_stack()) { 1438 if (dst.first()->is_stack()) { 1439 // stack to stack 1440 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp); 1441 __ stw(r_temp, reg2offset(dst.first()), R1_SP); 1442 } else { 1443 // stack to reg 1444 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1445 } 1446 } else if (dst.first()->is_stack()) { 1447 // reg to stack 1448 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1449 } else { 1450 // reg to reg 1451 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1452 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1453 } 1454 } 1455 1456 static void double_move(MacroAssembler*masm, 1457 VMRegPair src, VMRegPair dst, 1458 Register r_caller_sp, Register r_temp) { 1459 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double"); 1460 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double"); 1461 1462 if (src.first()->is_stack()) { 1463 if (dst.first()->is_stack()) { 1464 // stack to stack 1465 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1466 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1467 } else { 1468 // stack to reg 1469 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1470 } 1471 } else if (dst.first()->is_stack()) { 1472 // reg to stack 1473 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1474 } else { 1475 // reg to reg 1476 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1477 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1478 } 1479 } 1480 1481 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1482 switch (ret_type) { 1483 case T_BOOLEAN: 1484 case T_CHAR: 1485 case T_BYTE: 1486 case T_SHORT: 1487 case T_INT: 1488 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1489 break; 1490 case T_ARRAY: 1491 case T_OBJECT: 1492 case T_LONG: 1493 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1494 break; 1495 case T_FLOAT: 1496 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1497 break; 1498 case T_DOUBLE: 1499 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1500 break; 1501 case T_VOID: 1502 break; 1503 default: 1504 ShouldNotReachHere(); 1505 break; 1506 } 1507 } 1508 1509 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1510 switch (ret_type) { 1511 case T_BOOLEAN: 1512 case T_CHAR: 1513 case T_BYTE: 1514 case T_SHORT: 1515 case T_INT: 1516 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1517 break; 1518 case T_ARRAY: 1519 case T_OBJECT: 1520 case T_LONG: 1521 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1522 break; 1523 case T_FLOAT: 1524 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1525 break; 1526 case T_DOUBLE: 1527 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1528 break; 1529 case T_VOID: 1530 break; 1531 default: 1532 ShouldNotReachHere(); 1533 break; 1534 } 1535 } 1536 1537 static void verify_oop_args(MacroAssembler* masm, 1538 const methodHandle& method, 1539 const BasicType* sig_bt, 1540 const VMRegPair* regs) { 1541 Register temp_reg = R19_method; // not part of any compiled calling seq 1542 if (VerifyOops) { 1543 for (int i = 0; i < method->size_of_parameters(); i++) { 1544 if (is_reference_type(sig_bt[i])) { 1545 VMReg r = regs[i].first(); 1546 assert(r->is_valid(), "bad oop arg"); 1547 if (r->is_stack()) { 1548 __ ld(temp_reg, reg2offset(r), R1_SP); 1549 __ verify_oop(temp_reg, FILE_AND_LINE); 1550 } else { 1551 __ verify_oop(r->as_Register(), FILE_AND_LINE); 1552 } 1553 } 1554 } 1555 } 1556 } 1557 1558 static void gen_special_dispatch(MacroAssembler* masm, 1559 const methodHandle& method, 1560 const BasicType* sig_bt, 1561 const VMRegPair* regs) { 1562 verify_oop_args(masm, method, sig_bt, regs); 1563 vmIntrinsics::ID iid = method->intrinsic_id(); 1564 1565 // Now write the args into the outgoing interpreter space 1566 bool has_receiver = false; 1567 Register receiver_reg = noreg; 1568 int member_arg_pos = -1; 1569 Register member_reg = noreg; 1570 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1571 if (ref_kind != 0) { 1572 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1573 member_reg = R19_method; // known to be free at this point 1574 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1575 } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) { 1576 has_receiver = true; 1577 } else { 1578 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid)); 1579 } 1580 1581 if (member_reg != noreg) { 1582 // Load the member_arg into register, if necessary. 1583 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1584 VMReg r = regs[member_arg_pos].first(); 1585 if (r->is_stack()) { 1586 __ ld(member_reg, reg2offset(r), R1_SP); 1587 } else { 1588 // no data motion is needed 1589 member_reg = r->as_Register(); 1590 } 1591 } 1592 1593 if (has_receiver) { 1594 // Make sure the receiver is loaded into a register. 1595 assert(method->size_of_parameters() > 0, "oob"); 1596 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1597 VMReg r = regs[0].first(); 1598 assert(r->is_valid(), "bad receiver arg"); 1599 if (r->is_stack()) { 1600 // Porting note: This assumes that compiled calling conventions always 1601 // pass the receiver oop in a register. If this is not true on some 1602 // platform, pick a temp and load the receiver from stack. 1603 fatal("receiver always in a register"); 1604 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point? 1605 __ ld(receiver_reg, reg2offset(r), R1_SP); 1606 } else { 1607 // no data motion is needed 1608 receiver_reg = r->as_Register(); 1609 } 1610 } 1611 1612 // Figure out which address we are really jumping to: 1613 MethodHandles::generate_method_handle_dispatch(masm, iid, 1614 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1615 } 1616 1617 // --------------------------------------------------------------------------- 1618 // Generate a native wrapper for a given method. The method takes arguments 1619 // in the Java compiled code convention, marshals them to the native 1620 // convention (handlizes oops, etc), transitions to native, makes the call, 1621 // returns to java state (possibly blocking), unhandlizes any result and 1622 // returns. 1623 // 1624 // Critical native functions are a shorthand for the use of 1625 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1626 // functions. The wrapper is expected to unpack the arguments before 1627 // passing them to the callee. Critical native functions leave the state _in_Java, 1628 // since they cannot stop for GC. 1629 // Some other parts of JNI setup are skipped like the tear down of the JNI handle 1630 // block and the check for pending exceptions it's impossible for them 1631 // to be thrown. 1632 // 1633 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, 1634 const methodHandle& method, 1635 int compile_id, 1636 BasicType *in_sig_bt, 1637 VMRegPair *in_regs, 1638 BasicType ret_type) { 1639 if (method->is_method_handle_intrinsic()) { 1640 vmIntrinsics::ID iid = method->intrinsic_id(); 1641 intptr_t start = (intptr_t)__ pc(); 1642 int vep_offset = ((intptr_t)__ pc()) - start; 1643 gen_special_dispatch(masm, 1644 method, 1645 in_sig_bt, 1646 in_regs); 1647 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1648 __ flush(); 1649 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1650 return nmethod::new_native_nmethod(method, 1651 compile_id, 1652 masm->code(), 1653 vep_offset, 1654 frame_complete, 1655 stack_slots / VMRegImpl::slots_per_word, 1656 in_ByteSize(-1), 1657 in_ByteSize(-1), 1658 (OopMapSet*)NULL); 1659 } 1660 1661 address native_func = method->native_function(); 1662 assert(native_func != NULL, "must have function"); 1663 1664 // First, create signature for outgoing C call 1665 // -------------------------------------------------------------------------- 1666 1667 int total_in_args = method->size_of_parameters(); 1668 // We have received a description of where all the java args are located 1669 // on entry to the wrapper. We need to convert these args to where 1670 // the jni function will expect them. To figure out where they go 1671 // we convert the java signature to a C signature by inserting 1672 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1673 1674 // Calculate the total number of C arguments and create arrays for the 1675 // signature and the outgoing registers. 1676 // On ppc64, we have two arrays for the outgoing registers, because 1677 // some floating-point arguments must be passed in registers _and_ 1678 // in stack locations. 1679 bool method_is_static = method->is_static(); 1680 int total_c_args = total_in_args + (method_is_static ? 2 : 1); 1681 1682 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1683 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1684 VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1685 BasicType* in_elem_bt = NULL; 1686 1687 // Create the signature for the C call: 1688 // 1) add the JNIEnv* 1689 // 2) add the class if the method is static 1690 // 3) copy the rest of the incoming signature (shifted by the number of 1691 // hidden arguments). 1692 1693 int argc = 0; 1694 out_sig_bt[argc++] = T_ADDRESS; 1695 if (method->is_static()) { 1696 out_sig_bt[argc++] = T_OBJECT; 1697 } 1698 1699 for (int i = 0; i < total_in_args ; i++ ) { 1700 out_sig_bt[argc++] = in_sig_bt[i]; 1701 } 1702 1703 1704 // Compute the wrapper's frame size. 1705 // -------------------------------------------------------------------------- 1706 1707 // Now figure out where the args must be stored and how much stack space 1708 // they require. 1709 // 1710 // Compute framesize for the wrapper. We need to handlize all oops in 1711 // incoming registers. 1712 // 1713 // Calculate the total number of stack slots we will need: 1714 // 1) abi requirements 1715 // 2) outgoing arguments 1716 // 3) space for inbound oop handle area 1717 // 4) space for handlizing a klass if static method 1718 // 5) space for a lock if synchronized method 1719 // 6) workspace for saving return values, int <-> float reg moves, etc. 1720 // 7) alignment 1721 // 1722 // Layout of the native wrapper frame: 1723 // (stack grows upwards, memory grows downwards) 1724 // 1725 // NW [ABI_REG_ARGS] <-- 1) R1_SP 1726 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset 1727 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset 1728 // klass <-- 4) R1_SP + klass_offset 1729 // lock <-- 5) R1_SP + lock_offset 1730 // [workspace] <-- 6) R1_SP + workspace_offset 1731 // [alignment] (optional) <-- 7) 1732 // caller [JIT_TOP_ABI_48] <-- r_callers_sp 1733 // 1734 // - *_slot_offset Indicates offset from SP in number of stack slots. 1735 // - *_offset Indicates offset from SP in bytes. 1736 1737 int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) + // 1+2) 1738 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention. 1739 1740 // Now the space for the inbound oop handle area. 1741 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word; 1742 1743 int oop_handle_slot_offset = stack_slots; 1744 stack_slots += total_save_slots; // 3) 1745 1746 int klass_slot_offset = 0; 1747 int klass_offset = -1; 1748 if (method_is_static) { // 4) 1749 klass_slot_offset = stack_slots; 1750 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1751 stack_slots += VMRegImpl::slots_per_word; 1752 } 1753 1754 int lock_slot_offset = 0; 1755 int lock_offset = -1; 1756 if (method->is_synchronized()) { // 5) 1757 lock_slot_offset = stack_slots; 1758 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size; 1759 stack_slots += VMRegImpl::slots_per_word; 1760 } 1761 1762 int workspace_slot_offset = stack_slots; // 6) 1763 stack_slots += 2; 1764 1765 // Now compute actual number of stack words we need. 1766 // Rounding to make stack properly aligned. 1767 stack_slots = align_up(stack_slots, // 7) 1768 frame::alignment_in_bytes / VMRegImpl::stack_slot_size); 1769 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size; 1770 1771 1772 // Now we can start generating code. 1773 // -------------------------------------------------------------------------- 1774 1775 intptr_t start_pc = (intptr_t)__ pc(); 1776 intptr_t vep_start_pc; 1777 intptr_t frame_done_pc; 1778 intptr_t oopmap_pc; 1779 1780 Label ic_miss; 1781 Label handle_pending_exception; 1782 1783 Register r_callers_sp = R21; 1784 Register r_temp_1 = R22; 1785 Register r_temp_2 = R23; 1786 Register r_temp_3 = R24; 1787 Register r_temp_4 = R25; 1788 Register r_temp_5 = R26; 1789 Register r_temp_6 = R27; 1790 Register r_return_pc = R28; 1791 1792 Register r_carg1_jnienv = noreg; 1793 Register r_carg2_classorobject = noreg; 1794 r_carg1_jnienv = out_regs[0].first()->as_Register(); 1795 r_carg2_classorobject = out_regs[1].first()->as_Register(); 1796 1797 1798 // Generate the Unverified Entry Point (UEP). 1799 // -------------------------------------------------------------------------- 1800 assert(start_pc == (intptr_t)__ pc(), "uep must be at start"); 1801 1802 // Check ic: object class == cached class? 1803 if (!method_is_static) { 1804 Register ic = R19_inline_cache_reg; 1805 Register receiver_klass = r_temp_1; 1806 1807 __ cmpdi(CCR0, R3_ARG1, 0); 1808 __ beq(CCR0, ic_miss); 1809 __ verify_oop(R3_ARG1, FILE_AND_LINE); 1810 __ load_klass(receiver_klass, R3_ARG1); 1811 1812 __ cmpd(CCR0, receiver_klass, ic); 1813 __ bne(CCR0, ic_miss); 1814 } 1815 1816 1817 // Generate the Verified Entry Point (VEP). 1818 // -------------------------------------------------------------------------- 1819 vep_start_pc = (intptr_t)__ pc(); 1820 1821 if (UseRTMLocking) { 1822 // Abort RTM transaction before calling JNI 1823 // because critical section can be large and 1824 // abort anyway. Also nmethod can be deoptimized. 1825 __ tabort_(); 1826 } 1827 1828 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { 1829 Label L_skip_barrier; 1830 Register klass = r_temp_1; 1831 // Notify OOP recorder (don't need the relocation) 1832 AddressLiteral md = __ constant_metadata_address(method->method_holder()); 1833 __ load_const_optimized(klass, md.value(), R0); 1834 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 1835 1836 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 1837 __ mtctr(klass); 1838 __ bctr(); 1839 1840 __ bind(L_skip_barrier); 1841 } 1842 1843 __ save_LR_CR(r_temp_1); 1844 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. 1845 __ mr(r_callers_sp, R1_SP); // Remember frame pointer. 1846 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame. 1847 1848 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1849 bs->nmethod_entry_barrier(masm, r_temp_1); 1850 1851 frame_done_pc = (intptr_t)__ pc(); 1852 1853 __ verify_thread(); 1854 1855 // Native nmethod wrappers never take possession of the oop arguments. 1856 // So the caller will gc the arguments. 1857 // The only thing we need an oopMap for is if the call is static. 1858 // 1859 // An OopMap for lock (and class if static), and one for the VM call itself. 1860 OopMapSet *oop_maps = new OopMapSet(); 1861 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1862 1863 // Move arguments from register/stack to register/stack. 1864 // -------------------------------------------------------------------------- 1865 // 1866 // We immediately shuffle the arguments so that for any vm call we have 1867 // to make from here on out (sync slow path, jvmti, etc.) we will have 1868 // captured the oops from our caller and have a valid oopMap for them. 1869 // 1870 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 1871 // (derived from JavaThread* which is in R16_thread) and, if static, 1872 // the class mirror instead of a receiver. This pretty much guarantees that 1873 // register layout will not match. We ignore these extra arguments during 1874 // the shuffle. The shuffle is described by the two calling convention 1875 // vectors we have in our possession. We simply walk the java vector to 1876 // get the source locations and the c vector to get the destinations. 1877 1878 // Record sp-based slot for receiver on stack for non-static methods. 1879 int receiver_offset = -1; 1880 1881 // We move the arguments backward because the floating point registers 1882 // destination will always be to a register with a greater or equal 1883 // register number or the stack. 1884 // in is the index of the incoming Java arguments 1885 // out is the index of the outgoing C arguments 1886 1887 #ifdef ASSERT 1888 bool reg_destroyed[RegisterImpl::number_of_registers]; 1889 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 1890 for (int r = 0 ; r < RegisterImpl::number_of_registers ; r++) { 1891 reg_destroyed[r] = false; 1892 } 1893 for (int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++) { 1894 freg_destroyed[f] = false; 1895 } 1896 #endif // ASSERT 1897 1898 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) { 1899 1900 #ifdef ASSERT 1901 if (in_regs[in].first()->is_Register()) { 1902 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!"); 1903 } else if (in_regs[in].first()->is_FloatRegister()) { 1904 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!"); 1905 } 1906 if (out_regs[out].first()->is_Register()) { 1907 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true; 1908 } else if (out_regs[out].first()->is_FloatRegister()) { 1909 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true; 1910 } 1911 if (out_regs2[out].first()->is_Register()) { 1912 reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true; 1913 } else if (out_regs2[out].first()->is_FloatRegister()) { 1914 freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true; 1915 } 1916 #endif // ASSERT 1917 1918 switch (in_sig_bt[in]) { 1919 case T_BOOLEAN: 1920 case T_CHAR: 1921 case T_BYTE: 1922 case T_SHORT: 1923 case T_INT: 1924 // Move int and do sign extension. 1925 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 1926 break; 1927 case T_LONG: 1928 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 1929 break; 1930 case T_ARRAY: 1931 case T_OBJECT: 1932 object_move(masm, stack_slots, 1933 oop_map, oop_handle_slot_offset, 1934 ((in == 0) && (!method_is_static)), &receiver_offset, 1935 in_regs[in], out_regs[out], 1936 r_callers_sp, r_temp_1, r_temp_2); 1937 break; 1938 case T_VOID: 1939 break; 1940 case T_FLOAT: 1941 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 1942 if (out_regs2[out].first()->is_valid()) { 1943 float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); 1944 } 1945 break; 1946 case T_DOUBLE: 1947 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 1948 if (out_regs2[out].first()->is_valid()) { 1949 double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); 1950 } 1951 break; 1952 case T_ADDRESS: 1953 fatal("found type (T_ADDRESS) in java args"); 1954 break; 1955 default: 1956 ShouldNotReachHere(); 1957 break; 1958 } 1959 } 1960 1961 // Pre-load a static method's oop into ARG2. 1962 // Used both by locking code and the normal JNI call code. 1963 if (method_is_static) { 1964 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), 1965 r_carg2_classorobject); 1966 1967 // Now handlize the static class mirror in carg2. It's known not-null. 1968 __ std(r_carg2_classorobject, klass_offset, R1_SP); 1969 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 1970 __ addi(r_carg2_classorobject, R1_SP, klass_offset); 1971 } 1972 1973 // Get JNIEnv* which is first argument to native. 1974 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset())); 1975 1976 // NOTE: 1977 // 1978 // We have all of the arguments setup at this point. 1979 // We MUST NOT touch any outgoing regs from this point on. 1980 // So if we must call out we must push a new frame. 1981 1982 // Get current pc for oopmap, and load it patchable relative to global toc. 1983 oopmap_pc = (intptr_t) __ pc(); 1984 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true); 1985 1986 // We use the same pc/oopMap repeatedly when we call out. 1987 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); 1988 1989 // r_return_pc now has the pc loaded that we will use when we finally call 1990 // to native. 1991 1992 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. 1993 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); 1994 1995 # if 0 1996 // DTrace method entry 1997 # endif 1998 1999 // Lock a synchronized method. 2000 // -------------------------------------------------------------------------- 2001 2002 if (method->is_synchronized()) { 2003 ConditionRegister r_flag = CCR1; 2004 Register r_oop = r_temp_4; 2005 const Register r_box = r_temp_5; 2006 Label done, locked; 2007 2008 // Load the oop for the object or class. r_carg2_classorobject contains 2009 // either the handlized oop from the incoming arguments or the handlized 2010 // class mirror (if the method is static). 2011 __ ld(r_oop, 0, r_carg2_classorobject); 2012 2013 // Get the lock box slot's address. 2014 __ addi(r_box, R1_SP, lock_offset); 2015 2016 // Try fastpath for locking. 2017 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2018 __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2019 __ beq(r_flag, locked); 2020 2021 // None of the above fast optimizations worked so we have to get into the 2022 // slow case of monitor enter. Inline a special case of call_VM that 2023 // disallows any pending_exception. 2024 2025 // Save argument registers and leave room for C-compatible ABI_REG_ARGS. 2026 int frame_size = frame::abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); 2027 __ mr(R11_scratch1, R1_SP); 2028 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2); 2029 2030 // Do the call. 2031 __ set_last_Java_frame(R11_scratch1, r_return_pc); 2032 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register"); 2033 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); 2034 __ reset_last_Java_frame(); 2035 2036 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2); 2037 2038 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2039 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C"); 2040 2041 __ bind(locked); 2042 } 2043 2044 // Use that pc we placed in r_return_pc a while back as the current frame anchor. 2045 __ set_last_Java_frame(R1_SP, r_return_pc); 2046 2047 // Publish thread state 2048 // -------------------------------------------------------------------------- 2049 2050 // Transition from _thread_in_Java to _thread_in_native. 2051 __ li(R0, _thread_in_native); 2052 __ release(); 2053 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2054 __ stw(R0, thread_(thread_state)); 2055 2056 2057 // The JNI call 2058 // -------------------------------------------------------------------------- 2059 #if defined(ABI_ELFv2) 2060 __ call_c(native_func, relocInfo::runtime_call_type); 2061 #else 2062 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func; 2063 __ call_c(fd_native_method, relocInfo::runtime_call_type); 2064 #endif 2065 2066 2067 // Now, we are back from the native code. 2068 2069 2070 // Unpack the native result. 2071 // -------------------------------------------------------------------------- 2072 2073 // For int-types, we do any needed sign-extension required. 2074 // Care must be taken that the return values (R3_RET and F1_RET) 2075 // will survive any VM calls for blocking or unlocking. 2076 // An OOP result (handle) is done specially in the slow-path code. 2077 2078 switch (ret_type) { 2079 case T_VOID: break; // Nothing to do! 2080 case T_FLOAT: break; // Got it where we want it (unless slow-path). 2081 case T_DOUBLE: break; // Got it where we want it (unless slow-path). 2082 case T_LONG: break; // Got it where we want it (unless slow-path). 2083 case T_OBJECT: break; // Really a handle. 2084 // Cannot de-handlize until after reclaiming jvm_lock. 2085 case T_ARRAY: break; 2086 2087 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1) 2088 Label skip_modify; 2089 __ cmpwi(CCR0, R3_RET, 0); 2090 __ beq(CCR0, skip_modify); 2091 __ li(R3_RET, 1); 2092 __ bind(skip_modify); 2093 break; 2094 } 2095 case T_BYTE: { // sign extension 2096 __ extsb(R3_RET, R3_RET); 2097 break; 2098 } 2099 case T_CHAR: { // unsigned result 2100 __ andi(R3_RET, R3_RET, 0xffff); 2101 break; 2102 } 2103 case T_SHORT: { // sign extension 2104 __ extsh(R3_RET, R3_RET); 2105 break; 2106 } 2107 case T_INT: // nothing to do 2108 break; 2109 default: 2110 ShouldNotReachHere(); 2111 break; 2112 } 2113 2114 Label after_transition; 2115 2116 // Publish thread state 2117 // -------------------------------------------------------------------------- 2118 2119 // Switch thread to "native transition" state before reading the 2120 // synchronization state. This additional state is necessary because reading 2121 // and testing the synchronization state is not atomic w.r.t. GC, as this 2122 // scenario demonstrates: 2123 // - Java thread A, in _thread_in_native state, loads _not_synchronized 2124 // and is preempted. 2125 // - VM thread changes sync state to synchronizing and suspends threads 2126 // for GC. 2127 // - Thread A is resumed to finish this native method, but doesn't block 2128 // here since it didn't see any synchronization in progress, and escapes. 2129 2130 // Transition from _thread_in_native to _thread_in_native_trans. 2131 __ li(R0, _thread_in_native_trans); 2132 __ release(); 2133 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2134 __ stw(R0, thread_(thread_state)); 2135 2136 2137 // Must we block? 2138 // -------------------------------------------------------------------------- 2139 2140 // Block, if necessary, before resuming in _thread_in_Java state. 2141 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2142 { 2143 Label no_block, sync; 2144 2145 // Force this write out before the read below. 2146 __ fence(); 2147 2148 Register sync_state_addr = r_temp_4; 2149 Register sync_state = r_temp_5; 2150 Register suspend_flags = r_temp_6; 2151 2152 // No synchronization in progress nor yet synchronized 2153 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 2154 __ safepoint_poll(sync, sync_state, true /* at_return */, false /* in_nmethod */); 2155 2156 // Not suspended. 2157 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 2158 __ lwz(suspend_flags, thread_(suspend_flags)); 2159 __ cmpwi(CCR1, suspend_flags, 0); 2160 __ beq(CCR1, no_block); 2161 2162 // Block. Save any potential method result value before the operation and 2163 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2164 // lets us share the oopMap we used when we went native rather than create 2165 // a distinct one for this pc. 2166 __ bind(sync); 2167 __ isync(); 2168 2169 address entry_point = 2170 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans); 2171 save_native_result(masm, ret_type, workspace_slot_offset); 2172 __ call_VM_leaf(entry_point, R16_thread); 2173 restore_native_result(masm, ret_type, workspace_slot_offset); 2174 2175 __ bind(no_block); 2176 2177 // Publish thread state. 2178 // -------------------------------------------------------------------------- 2179 2180 // Thread state is thread_in_native_trans. Any safepoint blocking has 2181 // already happened so we can now change state to _thread_in_Java. 2182 2183 // Transition from _thread_in_native_trans to _thread_in_Java. 2184 __ li(R0, _thread_in_Java); 2185 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 2186 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2187 __ stw(R0, thread_(thread_state)); 2188 __ bind(after_transition); 2189 } 2190 2191 // Reguard any pages if necessary. 2192 // -------------------------------------------------------------------------- 2193 2194 Label no_reguard; 2195 __ lwz(r_temp_1, thread_(stack_guard_state)); 2196 __ cmpwi(CCR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled); 2197 __ bne(CCR0, no_reguard); 2198 2199 save_native_result(masm, ret_type, workspace_slot_offset); 2200 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2201 restore_native_result(masm, ret_type, workspace_slot_offset); 2202 2203 __ bind(no_reguard); 2204 2205 2206 // Unlock 2207 // -------------------------------------------------------------------------- 2208 2209 if (method->is_synchronized()) { 2210 2211 ConditionRegister r_flag = CCR1; 2212 const Register r_oop = r_temp_4; 2213 const Register r_box = r_temp_5; 2214 const Register r_exception = r_temp_6; 2215 Label done; 2216 2217 // Get oop and address of lock object box. 2218 if (method_is_static) { 2219 assert(klass_offset != -1, ""); 2220 __ ld(r_oop, klass_offset, R1_SP); 2221 } else { 2222 assert(receiver_offset != -1, ""); 2223 __ ld(r_oop, receiver_offset, R1_SP); 2224 } 2225 __ addi(r_box, R1_SP, lock_offset); 2226 2227 // Try fastpath for unlocking. 2228 __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2229 __ beq(r_flag, done); 2230 2231 // Save and restore any potential method result value around the unlocking operation. 2232 save_native_result(masm, ret_type, workspace_slot_offset); 2233 2234 // Must save pending exception around the slow-path VM call. Since it's a 2235 // leaf call, the pending exception (if any) can be kept in a register. 2236 __ ld(r_exception, thread_(pending_exception)); 2237 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile"); 2238 __ li(R0, 0); 2239 __ std(R0, thread_(pending_exception)); 2240 2241 // Slow case of monitor enter. 2242 // Inline a special case of call_VM that disallows any pending_exception. 2243 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread). 2244 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread); 2245 2246 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2247 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C"); 2248 2249 restore_native_result(masm, ret_type, workspace_slot_offset); 2250 2251 // Check_forward_pending_exception jump to forward_exception if any pending 2252 // exception is set. The forward_exception routine expects to see the 2253 // exception in pending_exception and not in a register. Kind of clumsy, 2254 // since all folks who branch to forward_exception must have tested 2255 // pending_exception first and hence have it in a register already. 2256 __ std(r_exception, thread_(pending_exception)); 2257 2258 __ bind(done); 2259 } 2260 2261 # if 0 2262 // DTrace method exit 2263 # endif 2264 2265 // Clear "last Java frame" SP and PC. 2266 // -------------------------------------------------------------------------- 2267 2268 __ reset_last_Java_frame(); 2269 2270 // Unbox oop result, e.g. JNIHandles::resolve value. 2271 // -------------------------------------------------------------------------- 2272 2273 if (is_reference_type(ret_type)) { 2274 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, MacroAssembler::PRESERVATION_NONE); 2275 } 2276 2277 if (CheckJNICalls) { 2278 // clear_pending_jni_exception_check 2279 __ load_const_optimized(R0, 0L); 2280 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 2281 } 2282 2283 // Reset handle block. 2284 // -------------------------------------------------------------------------- 2285 __ ld(r_temp_1, thread_(active_handles)); 2286 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 2287 __ li(r_temp_2, 0); 2288 __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1); 2289 2290 2291 // Check for pending exceptions. 2292 // -------------------------------------------------------------------------- 2293 __ ld(r_temp_2, thread_(pending_exception)); 2294 __ cmpdi(CCR0, r_temp_2, 0); 2295 __ bne(CCR0, handle_pending_exception); 2296 2297 // Return 2298 // -------------------------------------------------------------------------- 2299 2300 __ pop_frame(); 2301 __ restore_LR_CR(R11); 2302 __ blr(); 2303 2304 2305 // Handler for pending exceptions (out-of-line). 2306 // -------------------------------------------------------------------------- 2307 // Since this is a native call, we know the proper exception handler 2308 // is the empty function. We just pop this frame and then jump to 2309 // forward_exception_entry. 2310 __ bind(handle_pending_exception); 2311 2312 __ pop_frame(); 2313 __ restore_LR_CR(R11); 2314 __ b64_patchable((address)StubRoutines::forward_exception_entry(), 2315 relocInfo::runtime_call_type); 2316 2317 // Handler for a cache miss (out-of-line). 2318 // -------------------------------------------------------------------------- 2319 2320 if (!method_is_static) { 2321 __ bind(ic_miss); 2322 2323 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 2324 relocInfo::runtime_call_type); 2325 } 2326 2327 // Done. 2328 // -------------------------------------------------------------------------- 2329 2330 __ flush(); 2331 2332 nmethod *nm = nmethod::new_native_nmethod(method, 2333 compile_id, 2334 masm->code(), 2335 vep_start_pc-start_pc, 2336 frame_done_pc-start_pc, 2337 stack_slots / VMRegImpl::slots_per_word, 2338 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2339 in_ByteSize(lock_offset), 2340 oop_maps); 2341 2342 return nm; 2343 } 2344 2345 // This function returns the adjust size (in number of words) to a c2i adapter 2346 // activation for use during deoptimization. 2347 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2348 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes); 2349 } 2350 2351 uint SharedRuntime::in_preserve_stack_slots() { 2352 return frame::jit_in_preserve_size / VMRegImpl::stack_slot_size; 2353 } 2354 2355 uint SharedRuntime::out_preserve_stack_slots() { 2356 #if defined(COMPILER1) || defined(COMPILER2) 2357 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size; 2358 #else 2359 return 0; 2360 #endif 2361 } 2362 2363 #if defined(COMPILER1) || defined(COMPILER2) 2364 // Frame generation for deopt and uncommon trap blobs. 2365 static void push_skeleton_frame(MacroAssembler* masm, bool deopt, 2366 /* Read */ 2367 Register unroll_block_reg, 2368 /* Update */ 2369 Register frame_sizes_reg, 2370 Register number_of_frames_reg, 2371 Register pcs_reg, 2372 /* Invalidate */ 2373 Register frame_size_reg, 2374 Register pc_reg) { 2375 2376 __ ld(pc_reg, 0, pcs_reg); 2377 __ ld(frame_size_reg, 0, frame_sizes_reg); 2378 __ std(pc_reg, _abi0(lr), R1_SP); 2379 __ push_frame(frame_size_reg, R0/*tmp*/); 2380 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP); 2381 __ addi(number_of_frames_reg, number_of_frames_reg, -1); 2382 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize); 2383 __ addi(pcs_reg, pcs_reg, wordSize); 2384 } 2385 2386 // Loop through the UnrollBlock info and create new frames. 2387 static void push_skeleton_frames(MacroAssembler* masm, bool deopt, 2388 /* read */ 2389 Register unroll_block_reg, 2390 /* invalidate */ 2391 Register frame_sizes_reg, 2392 Register number_of_frames_reg, 2393 Register pcs_reg, 2394 Register frame_size_reg, 2395 Register pc_reg) { 2396 Label loop; 2397 2398 // _number_of_frames is of type int (deoptimization.hpp) 2399 __ lwa(number_of_frames_reg, 2400 Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), 2401 unroll_block_reg); 2402 __ ld(pcs_reg, 2403 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), 2404 unroll_block_reg); 2405 __ ld(frame_sizes_reg, 2406 Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), 2407 unroll_block_reg); 2408 2409 // stack: (caller_of_deoptee, ...). 2410 2411 // At this point we either have an interpreter frame or a compiled 2412 // frame on top of stack. If it is a compiled frame we push a new c2i 2413 // adapter here 2414 2415 // Memorize top-frame stack-pointer. 2416 __ mr(frame_size_reg/*old_sp*/, R1_SP); 2417 2418 // Resize interpreter top frame OR C2I adapter. 2419 2420 // At this moment, the top frame (which is the caller of the deoptee) is 2421 // an interpreter frame or a newly pushed C2I adapter or an entry frame. 2422 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the 2423 // outgoing arguments. 2424 // 2425 // In order to push the interpreter frame for the deoptee, we need to 2426 // resize the top frame such that we are able to place the deoptee's 2427 // locals in the frame. 2428 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI 2429 // into a valid PARENT_IJAVA_FRAME_ABI. 2430 2431 __ lwa(R11_scratch1, 2432 Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), 2433 unroll_block_reg); 2434 __ neg(R11_scratch1, R11_scratch1); 2435 2436 // R11_scratch1 contains size of locals for frame resizing. 2437 // R12_scratch2 contains top frame's lr. 2438 2439 // Resize frame by complete frame size prevents TOC from being 2440 // overwritten by locals. A more stack space saving way would be 2441 // to copy the TOC to its location in the new abi. 2442 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size); 2443 2444 // now, resize the frame 2445 __ resize_frame(R11_scratch1, pc_reg/*tmp*/); 2446 2447 // In the case where we have resized a c2i frame above, the optional 2448 // alignment below the locals has size 32 (why?). 2449 __ std(R12_scratch2, _abi0(lr), R1_SP); 2450 2451 // Initialize initial_caller_sp. 2452 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP); 2453 2454 #ifdef ASSERT 2455 // Make sure that there is at least one entry in the array. 2456 __ cmpdi(CCR0, number_of_frames_reg, 0); 2457 __ asm_assert_ne("array_size must be > 0"); 2458 #endif 2459 2460 // Now push the new interpreter frames. 2461 // 2462 __ bind(loop); 2463 // Allocate a new frame, fill in the pc. 2464 push_skeleton_frame(masm, deopt, 2465 unroll_block_reg, 2466 frame_sizes_reg, 2467 number_of_frames_reg, 2468 pcs_reg, 2469 frame_size_reg, 2470 pc_reg); 2471 __ cmpdi(CCR0, number_of_frames_reg, 0); 2472 __ bne(CCR0, loop); 2473 2474 // Get the return address pointing into the frame manager. 2475 __ ld(R0, 0, pcs_reg); 2476 // Store it in the top interpreter frame. 2477 __ std(R0, _abi0(lr), R1_SP); 2478 // Initialize frame_manager_lr of interpreter top frame. 2479 } 2480 #endif 2481 2482 void SharedRuntime::generate_deopt_blob() { 2483 // Allocate space for the code 2484 ResourceMark rm; 2485 // Setup code generation tools 2486 CodeBuffer buffer("deopt_blob", 2048, 1024); 2487 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2488 Label exec_mode_initialized; 2489 int frame_size_in_words; 2490 OopMap* map = NULL; 2491 OopMapSet *oop_maps = new OopMapSet(); 2492 2493 // size of ABI112 plus spill slots for R3_RET and F1_RET. 2494 const int frame_size_in_bytes = frame::abi_reg_args_spill_size; 2495 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 2496 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. 2497 2498 const Register exec_mode_reg = R21_tmp1; 2499 2500 const address start = __ pc(); 2501 2502 #if defined(COMPILER1) || defined(COMPILER2) 2503 // -------------------------------------------------------------------------- 2504 // Prolog for non exception case! 2505 2506 // We have been called from the deopt handler of the deoptee. 2507 // 2508 // deoptee: 2509 // ... 2510 // call X 2511 // ... 2512 // deopt_handler: call_deopt_stub 2513 // cur. return pc --> ... 2514 // 2515 // So currently SR_LR points behind the call in the deopt handler. 2516 // We adjust it such that it points to the start of the deopt handler. 2517 // The return_pc has been stored in the frame of the deoptee and 2518 // will replace the address of the deopt_handler in the call 2519 // to Deoptimization::fetch_unroll_info below. 2520 // We can't grab a free register here, because all registers may 2521 // contain live values, so let the RegisterSaver do the adjustment 2522 // of the return pc. 2523 const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size; 2524 2525 // Push the "unpack frame" 2526 // Save everything in sight. 2527 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2528 &first_frame_size_in_bytes, 2529 /*generate_oop_map=*/ true, 2530 return_pc_adjustment_no_exception, 2531 RegisterSaver::return_pc_is_lr); 2532 assert(map != NULL, "OopMap must have been created"); 2533 2534 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); 2535 // Save exec mode for unpack_frames. 2536 __ b(exec_mode_initialized); 2537 2538 // -------------------------------------------------------------------------- 2539 // Prolog for exception case 2540 2541 // An exception is pending. 2542 // We have been called with a return (interpreter) or a jump (exception blob). 2543 // 2544 // - R3_ARG1: exception oop 2545 // - R4_ARG2: exception pc 2546 2547 int exception_offset = __ pc() - start; 2548 2549 BLOCK_COMMENT("Prolog for exception case"); 2550 2551 // Store exception oop and pc in thread (location known to GC). 2552 // This is needed since the call to "fetch_unroll_info()" may safepoint. 2553 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2554 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2555 __ std(R4_ARG2, _abi0(lr), R1_SP); 2556 2557 // Vanilla deoptimization with an exception pending in exception_oop. 2558 int exception_in_tls_offset = __ pc() - start; 2559 2560 // Push the "unpack frame". 2561 // Save everything in sight. 2562 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2563 &first_frame_size_in_bytes, 2564 /*generate_oop_map=*/ false, 2565 /*return_pc_adjustment_exception=*/ 0, 2566 RegisterSaver::return_pc_is_pre_saved); 2567 2568 // Deopt during an exception. Save exec mode for unpack_frames. 2569 __ li(exec_mode_reg, Deoptimization::Unpack_exception); 2570 2571 // fall through 2572 2573 int reexecute_offset = 0; 2574 #ifdef COMPILER1 2575 __ b(exec_mode_initialized); 2576 2577 // Reexecute entry, similar to c2 uncommon trap 2578 reexecute_offset = __ pc() - start; 2579 2580 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2581 &first_frame_size_in_bytes, 2582 /*generate_oop_map=*/ false, 2583 /*return_pc_adjustment_reexecute=*/ 0, 2584 RegisterSaver::return_pc_is_pre_saved); 2585 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute); 2586 #endif 2587 2588 // -------------------------------------------------------------------------- 2589 __ BIND(exec_mode_initialized); 2590 2591 { 2592 const Register unroll_block_reg = R22_tmp2; 2593 2594 // We need to set `last_Java_frame' because `fetch_unroll_info' will 2595 // call `last_Java_frame()'. The value of the pc in the frame is not 2596 // particularly important. It just needs to identify this blob. 2597 __ set_last_Java_frame(R1_SP, noreg); 2598 2599 // With EscapeAnalysis turned on, this call may safepoint! 2600 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg); 2601 address calls_return_pc = __ last_calls_return_pc(); 2602 // Set an oopmap for the call site that describes all our saved registers. 2603 oop_maps->add_gc_map(calls_return_pc - start, map); 2604 2605 __ reset_last_Java_frame(); 2606 // Save the return value. 2607 __ mr(unroll_block_reg, R3_RET); 2608 2609 // Restore only the result registers that have been saved 2610 // by save_volatile_registers(...). 2611 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); 2612 2613 // reload the exec mode from the UnrollBlock (it might have changed) 2614 __ lwz(exec_mode_reg, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg); 2615 // In excp_deopt_mode, restore and clear exception oop which we 2616 // stored in the thread during exception entry above. The exception 2617 // oop will be the return value of this stub. 2618 Label skip_restore_excp; 2619 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception); 2620 __ bne(CCR0, skip_restore_excp); 2621 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2622 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2623 __ li(R0, 0); 2624 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2625 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2626 __ BIND(skip_restore_excp); 2627 2628 __ pop_frame(); 2629 2630 // stack: (deoptee, optional i2c, caller of deoptee, ...). 2631 2632 // pop the deoptee's frame 2633 __ pop_frame(); 2634 2635 // stack: (caller_of_deoptee, ...). 2636 2637 // Loop through the `UnrollBlock' info and create interpreter frames. 2638 push_skeleton_frames(masm, true/*deopt*/, 2639 unroll_block_reg, 2640 R23_tmp3, 2641 R24_tmp4, 2642 R25_tmp5, 2643 R26_tmp6, 2644 R27_tmp7); 2645 2646 // stack: (skeletal interpreter frame, ..., optional skeletal 2647 // interpreter frame, optional c2i, caller of deoptee, ...). 2648 } 2649 2650 // push an `unpack_frame' taking care of float / int return values. 2651 __ push_frame(frame_size_in_bytes, R0/*tmp*/); 2652 2653 // stack: (unpack frame, skeletal interpreter frame, ..., optional 2654 // skeletal interpreter frame, optional c2i, caller of deoptee, 2655 // ...). 2656 2657 // Spill live volatile registers since we'll do a call. 2658 __ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 2659 __ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 2660 2661 // Let the unpacker layout information in the skeletal frames just 2662 // allocated. 2663 __ get_PC_trash_LR(R3_RET); 2664 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET); 2665 // This is a call to a LEAF method, so no oop map is required. 2666 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 2667 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/); 2668 __ reset_last_Java_frame(); 2669 2670 // Restore the volatiles saved above. 2671 __ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); 2672 __ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); 2673 2674 // Pop the unpack frame. 2675 __ pop_frame(); 2676 __ restore_LR_CR(R0); 2677 2678 // stack: (top interpreter frame, ..., optional interpreter frame, 2679 // optional c2i, caller of deoptee, ...). 2680 2681 // Initialize R14_state. 2682 __ restore_interpreter_state(R11_scratch1); 2683 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 2684 2685 // Return to the interpreter entry point. 2686 __ blr(); 2687 __ flush(); 2688 #else // COMPILER2 2689 __ unimplemented("deopt blob needed only with compiler"); 2690 int exception_offset = __ pc() - start; 2691 #endif // COMPILER2 2692 2693 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 2694 reexecute_offset, first_frame_size_in_bytes / wordSize); 2695 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2696 } 2697 2698 #ifdef COMPILER2 2699 void SharedRuntime::generate_uncommon_trap_blob() { 2700 // Allocate space for the code. 2701 ResourceMark rm; 2702 // Setup code generation tools. 2703 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 2704 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2705 address start = __ pc(); 2706 2707 if (UseRTMLocking) { 2708 // Abort RTM transaction before possible nmethod deoptimization. 2709 __ tabort_(); 2710 } 2711 2712 Register unroll_block_reg = R21_tmp1; 2713 Register klass_index_reg = R22_tmp2; 2714 Register unc_trap_reg = R23_tmp3; 2715 2716 OopMapSet* oop_maps = new OopMapSet(); 2717 int frame_size_in_bytes = frame::abi_reg_args_size; 2718 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 2719 2720 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 2721 2722 // Push a dummy `unpack_frame' and call 2723 // `Deoptimization::uncommon_trap' to pack the compiled frame into a 2724 // vframe array and return the `UnrollBlock' information. 2725 2726 // Save LR to compiled frame. 2727 __ save_LR_CR(R11_scratch1); 2728 2729 // Push an "uncommon_trap" frame. 2730 __ push_frame_reg_args(0, R11_scratch1); 2731 2732 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). 2733 2734 // Set the `unpack_frame' as last_Java_frame. 2735 // `Deoptimization::uncommon_trap' expects it and considers its 2736 // sender frame as the deoptee frame. 2737 // Remember the offset of the instruction whose address will be 2738 // moved to R11_scratch1. 2739 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 2740 2741 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 2742 2743 __ mr(klass_index_reg, R3); 2744 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap); 2745 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), 2746 R16_thread, klass_index_reg, R5_ARG3); 2747 2748 // Set an oopmap for the call site. 2749 oop_maps->add_gc_map(gc_map_pc - start, map); 2750 2751 __ reset_last_Java_frame(); 2752 2753 // Pop the `unpack frame'. 2754 __ pop_frame(); 2755 2756 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 2757 2758 // Save the return value. 2759 __ mr(unroll_block_reg, R3_RET); 2760 2761 // Pop the uncommon_trap frame. 2762 __ pop_frame(); 2763 2764 // stack: (caller_of_deoptee, ...). 2765 2766 #ifdef ASSERT 2767 __ lwz(R22_tmp2, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg); 2768 __ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap); 2769 __ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap"); 2770 #endif 2771 2772 // Allocate new interpreter frame(s) and possibly a c2i adapter 2773 // frame. 2774 push_skeleton_frames(masm, false/*deopt*/, 2775 unroll_block_reg, 2776 R22_tmp2, 2777 R23_tmp3, 2778 R24_tmp4, 2779 R25_tmp5, 2780 R26_tmp6); 2781 2782 // stack: (skeletal interpreter frame, ..., optional skeletal 2783 // interpreter frame, optional c2i, caller of deoptee, ...). 2784 2785 // Push a dummy `unpack_frame' taking care of float return values. 2786 // Call `Deoptimization::unpack_frames' to layout information in the 2787 // interpreter frames just created. 2788 2789 // Push a simple "unpack frame" here. 2790 __ push_frame_reg_args(0, R11_scratch1); 2791 2792 // stack: (unpack frame, skeletal interpreter frame, ..., optional 2793 // skeletal interpreter frame, optional c2i, caller of deoptee, 2794 // ...). 2795 2796 // Set the "unpack_frame" as last_Java_frame. 2797 __ get_PC_trash_LR(R11_scratch1); 2798 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 2799 2800 // Indicate it is the uncommon trap case. 2801 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap); 2802 // Let the unpacker layout information in the skeletal frames just 2803 // allocated. 2804 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 2805 R16_thread, unc_trap_reg); 2806 2807 __ reset_last_Java_frame(); 2808 // Pop the `unpack frame'. 2809 __ pop_frame(); 2810 // Restore LR from top interpreter frame. 2811 __ restore_LR_CR(R11_scratch1); 2812 2813 // stack: (top interpreter frame, ..., optional interpreter frame, 2814 // optional c2i, caller of deoptee, ...). 2815 2816 __ restore_interpreter_state(R11_scratch1); 2817 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 2818 2819 // Return to the interpreter entry point. 2820 __ blr(); 2821 2822 masm->flush(); 2823 2824 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize); 2825 } 2826 #endif // COMPILER2 2827 2828 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. 2829 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 2830 assert(StubRoutines::forward_exception_entry() != NULL, 2831 "must be generated before"); 2832 2833 ResourceMark rm; 2834 OopMapSet *oop_maps = new OopMapSet(); 2835 OopMap* map; 2836 2837 // Allocate space for the code. Setup code generation tools. 2838 CodeBuffer buffer("handler_blob", 2048, 1024); 2839 MacroAssembler* masm = new MacroAssembler(&buffer); 2840 2841 address start = __ pc(); 2842 int frame_size_in_bytes = 0; 2843 2844 RegisterSaver::ReturnPCLocation return_pc_location; 2845 bool cause_return = (poll_type == POLL_AT_RETURN); 2846 if (cause_return) { 2847 // Nothing to do here. The frame has already been popped in MachEpilogNode. 2848 // Register LR already contains the return pc. 2849 return_pc_location = RegisterSaver::return_pc_is_pre_saved; 2850 } else { 2851 // Use thread()->saved_exception_pc() as return pc. 2852 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; 2853 } 2854 2855 if (UseRTMLocking) { 2856 // Abort RTM transaction before calling runtime 2857 // because critical section can be large and so 2858 // will abort anyway. Also nmethod can be deoptimized. 2859 __ tabort_(); 2860 } 2861 2862 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP); 2863 2864 // Save registers, fpu state, and flags. Set R31 = return pc. 2865 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2866 &frame_size_in_bytes, 2867 /*generate_oop_map=*/ true, 2868 /*return_pc_adjustment=*/0, 2869 return_pc_location, save_vectors); 2870 2871 // The following is basically a call_VM. However, we need the precise 2872 // address of the call in order to generate an oopmap. Hence, we do all the 2873 // work ourselves. 2874 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg); 2875 2876 // The return address must always be correct so that the frame constructor 2877 // never sees an invalid pc. 2878 2879 // Do the call 2880 __ call_VM_leaf(call_ptr, R16_thread); 2881 address calls_return_pc = __ last_calls_return_pc(); 2882 2883 // Set an oopmap for the call site. This oopmap will map all 2884 // oop-registers and debug-info registers as callee-saved. This 2885 // will allow deoptimization at this safepoint to find all possible 2886 // debug-info recordings, as well as let GC find all oops. 2887 oop_maps->add_gc_map(calls_return_pc - start, map); 2888 2889 Label noException; 2890 2891 // Clear the last Java frame. 2892 __ reset_last_Java_frame(); 2893 2894 BLOCK_COMMENT(" Check pending exception."); 2895 const Register pending_exception = R0; 2896 __ ld(pending_exception, thread_(pending_exception)); 2897 __ cmpdi(CCR0, pending_exception, 0); 2898 __ beq(CCR0, noException); 2899 2900 // Exception pending 2901 RegisterSaver::restore_live_registers_and_pop_frame(masm, 2902 frame_size_in_bytes, 2903 /*restore_ctr=*/true, save_vectors); 2904 2905 BLOCK_COMMENT(" Jump to forward_exception_entry."); 2906 // Jump to forward_exception_entry, with the issuing PC in LR 2907 // so it looks like the original nmethod called forward_exception_entry. 2908 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 2909 2910 // No exception case. 2911 __ BIND(noException); 2912 2913 if (!cause_return) { 2914 Label no_adjust; 2915 // If our stashed return pc was modified by the runtime we avoid touching it 2916 __ ld(R0, frame_size_in_bytes + _abi0(lr), R1_SP); 2917 __ cmpd(CCR0, R0, R31); 2918 __ bne(CCR0, no_adjust); 2919 2920 // Adjust return pc forward to step over the safepoint poll instruction 2921 __ addi(R31, R31, 4); 2922 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 2923 2924 __ bind(no_adjust); 2925 } 2926 2927 // Normal exit, restore registers and exit. 2928 RegisterSaver::restore_live_registers_and_pop_frame(masm, 2929 frame_size_in_bytes, 2930 /*restore_ctr=*/true, save_vectors); 2931 2932 __ blr(); 2933 2934 // Make sure all code is generated 2935 masm->flush(); 2936 2937 // Fill-out other meta info 2938 // CodeBlob frame size is in words. 2939 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize); 2940 } 2941 2942 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss) 2943 // 2944 // Generate a stub that calls into the vm to find out the proper destination 2945 // of a java call. All the argument registers are live at this point 2946 // but since this is generic code we don't know what they are and the caller 2947 // must do any gc of the args. 2948 // 2949 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 2950 2951 // allocate space for the code 2952 ResourceMark rm; 2953 2954 CodeBuffer buffer(name, 1000, 512); 2955 MacroAssembler* masm = new MacroAssembler(&buffer); 2956 2957 int frame_size_in_bytes; 2958 2959 OopMapSet *oop_maps = new OopMapSet(); 2960 OopMap* map = NULL; 2961 2962 address start = __ pc(); 2963 2964 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2965 &frame_size_in_bytes, 2966 /*generate_oop_map*/ true, 2967 /*return_pc_adjustment*/ 0, 2968 RegisterSaver::return_pc_is_lr); 2969 2970 // Use noreg as last_Java_pc, the return pc will be reconstructed 2971 // from the physical frame. 2972 __ set_last_Java_frame(/*sp*/R1_SP, noreg); 2973 2974 int frame_complete = __ offset(); 2975 2976 // Pass R19_method as 2nd (optional) argument, used by 2977 // counter_overflow_stub. 2978 __ call_VM_leaf(destination, R16_thread, R19_method); 2979 address calls_return_pc = __ last_calls_return_pc(); 2980 // Set an oopmap for the call site. 2981 // We need this not only for callee-saved registers, but also for volatile 2982 // registers that the compiler might be keeping live across a safepoint. 2983 // Create the oopmap for the call's return pc. 2984 oop_maps->add_gc_map(calls_return_pc - start, map); 2985 2986 // R3_RET contains the address we are going to jump to assuming no exception got installed. 2987 2988 // clear last_Java_sp 2989 __ reset_last_Java_frame(); 2990 2991 // Check for pending exceptions. 2992 BLOCK_COMMENT("Check for pending exceptions."); 2993 Label pending; 2994 __ ld(R11_scratch1, thread_(pending_exception)); 2995 __ cmpdi(CCR0, R11_scratch1, 0); 2996 __ bne(CCR0, pending); 2997 2998 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame. 2999 3000 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false); 3001 3002 // Get the returned method. 3003 __ get_vm_result_2(R19_method); 3004 3005 __ bctr(); 3006 3007 3008 // Pending exception after the safepoint. 3009 __ BIND(pending); 3010 3011 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true); 3012 3013 // exception pending => remove activation and forward to exception handler 3014 3015 __ li(R11_scratch1, 0); 3016 __ ld(R3_ARG1, thread_(pending_exception)); 3017 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread); 3018 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3019 3020 // ------------- 3021 // Make sure all code is generated. 3022 masm->flush(); 3023 3024 // return the blob 3025 // frame_size_words or bytes?? 3026 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, 3027 oop_maps, true); 3028 } 3029 3030 3031 //------------------------------Montgomery multiplication------------------------ 3032 // 3033 3034 // Subtract 0:b from carry:a. Return carry. 3035 static unsigned long 3036 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 3037 long i = 0; 3038 unsigned long tmp, tmp2; 3039 __asm__ __volatile__ ( 3040 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA 3041 "mtctr %[len] \n" 3042 "0: \n" 3043 "ldx %[tmp], %[i], %[a] \n" 3044 "ldx %[tmp2], %[i], %[b] \n" 3045 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended 3046 "stdx %[tmp], %[i], %[a] \n" 3047 "addi %[i], %[i], 8 \n" 3048 "bdnz 0b \n" 3049 "addme %[tmp], %[carry] \n" // carry + CA - 1 3050 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2) 3051 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len) 3052 : "ctr", "xer", "memory" 3053 ); 3054 return tmp; 3055 } 3056 3057 // Multiply (unsigned) Long A by Long B, accumulating the double- 3058 // length result into the accumulator formed of T0, T1, and T2. 3059 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3060 unsigned long hi, lo; 3061 __asm__ __volatile__ ( 3062 "mulld %[lo], %[A], %[B] \n" 3063 "mulhdu %[hi], %[A], %[B] \n" 3064 "addc %[T0], %[T0], %[lo] \n" 3065 "adde %[T1], %[T1], %[hi] \n" 3066 "addze %[T2], %[T2] \n" 3067 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3068 : [A]"r"(A), [B]"r"(B) 3069 : "xer" 3070 ); 3071 } 3072 3073 // As above, but add twice the double-length result into the 3074 // accumulator. 3075 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3076 unsigned long hi, lo; 3077 __asm__ __volatile__ ( 3078 "mulld %[lo], %[A], %[B] \n" 3079 "mulhdu %[hi], %[A], %[B] \n" 3080 "addc %[T0], %[T0], %[lo] \n" 3081 "adde %[T1], %[T1], %[hi] \n" 3082 "addze %[T2], %[T2] \n" 3083 "addc %[T0], %[T0], %[lo] \n" 3084 "adde %[T1], %[T1], %[hi] \n" 3085 "addze %[T2], %[T2] \n" 3086 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3087 : [A]"r"(A), [B]"r"(B) 3088 : "xer" 3089 ); 3090 } 3091 3092 // Fast Montgomery multiplication. The derivation of the algorithm is 3093 // in "A Cryptographic Library for the Motorola DSP56000, 3094 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237". 3095 static void 3096 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 3097 unsigned long m[], unsigned long inv, int len) { 3098 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3099 int i; 3100 3101 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3102 3103 for (i = 0; i < len; i++) { 3104 int j; 3105 for (j = 0; j < i; j++) { 3106 MACC(a[j], b[i-j], t0, t1, t2); 3107 MACC(m[j], n[i-j], t0, t1, t2); 3108 } 3109 MACC(a[i], b[0], t0, t1, t2); 3110 m[i] = t0 * inv; 3111 MACC(m[i], n[0], t0, t1, t2); 3112 3113 assert(t0 == 0, "broken Montgomery multiply"); 3114 3115 t0 = t1; t1 = t2; t2 = 0; 3116 } 3117 3118 for (i = len; i < 2*len; i++) { 3119 int j; 3120 for (j = i-len+1; j < len; j++) { 3121 MACC(a[j], b[i-j], t0, t1, t2); 3122 MACC(m[j], n[i-j], t0, t1, t2); 3123 } 3124 m[i-len] = t0; 3125 t0 = t1; t1 = t2; t2 = 0; 3126 } 3127 3128 while (t0) { 3129 t0 = sub(m, n, t0, len); 3130 } 3131 } 3132 3133 // Fast Montgomery squaring. This uses asymptotically 25% fewer 3134 // multiplies so it should be up to 25% faster than Montgomery 3135 // multiplication. However, its loop control is more complex and it 3136 // may actually run slower on some machines. 3137 static void 3138 montgomery_square(unsigned long a[], unsigned long n[], 3139 unsigned long m[], unsigned long inv, int len) { 3140 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3141 int i; 3142 3143 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3144 3145 for (i = 0; i < len; i++) { 3146 int j; 3147 int end = (i+1)/2; 3148 for (j = 0; j < end; j++) { 3149 MACC2(a[j], a[i-j], t0, t1, t2); 3150 MACC(m[j], n[i-j], t0, t1, t2); 3151 } 3152 if ((i & 1) == 0) { 3153 MACC(a[j], a[j], t0, t1, t2); 3154 } 3155 for (; j < i; j++) { 3156 MACC(m[j], n[i-j], t0, t1, t2); 3157 } 3158 m[i] = t0 * inv; 3159 MACC(m[i], n[0], t0, t1, t2); 3160 3161 assert(t0 == 0, "broken Montgomery square"); 3162 3163 t0 = t1; t1 = t2; t2 = 0; 3164 } 3165 3166 for (i = len; i < 2*len; i++) { 3167 int start = i-len+1; 3168 int end = start + (len - start)/2; 3169 int j; 3170 for (j = start; j < end; j++) { 3171 MACC2(a[j], a[i-j], t0, t1, t2); 3172 MACC(m[j], n[i-j], t0, t1, t2); 3173 } 3174 if ((i & 1) == 0) { 3175 MACC(a[j], a[j], t0, t1, t2); 3176 } 3177 for (; j < len; j++) { 3178 MACC(m[j], n[i-j], t0, t1, t2); 3179 } 3180 m[i-len] = t0; 3181 t0 = t1; t1 = t2; t2 = 0; 3182 } 3183 3184 while (t0) { 3185 t0 = sub(m, n, t0, len); 3186 } 3187 } 3188 3189 // The threshold at which squaring is advantageous was determined 3190 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 3191 // Doesn't seem to be relevant for Power8 so we use the same value. 3192 #define MONTGOMERY_SQUARING_THRESHOLD 64 3193 3194 // Copy len longwords from s to d, word-swapping as we go. The 3195 // destination array is reversed. 3196 static void reverse_words(unsigned long *s, unsigned long *d, int len) { 3197 d += len; 3198 while(len-- > 0) { 3199 d--; 3200 unsigned long s_val = *s; 3201 // Swap words in a longword on little endian machines. 3202 #ifdef VM_LITTLE_ENDIAN 3203 s_val = (s_val << 32) | (s_val >> 32); 3204 #endif 3205 *d = s_val; 3206 s++; 3207 } 3208 } 3209 3210 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 3211 jint len, jlong inv, 3212 jint *m_ints) { 3213 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3214 assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 3215 int longwords = len/2; 3216 3217 // Make very sure we don't use so much space that the stack might 3218 // overflow. 512 jints corresponds to an 16384-bit integer and 3219 // will use here a total of 8k bytes of stack space. 3220 int divisor = sizeof(unsigned long) * 4; 3221 guarantee(longwords <= 8192 / divisor, "must be"); 3222 int total_allocation = longwords * sizeof (unsigned long) * 4; 3223 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3224 3225 // Local scratch arrays 3226 unsigned long 3227 *a = scratch + 0 * longwords, 3228 *b = scratch + 1 * longwords, 3229 *n = scratch + 2 * longwords, 3230 *m = scratch + 3 * longwords; 3231 3232 reverse_words((unsigned long *)a_ints, a, longwords); 3233 reverse_words((unsigned long *)b_ints, b, longwords); 3234 reverse_words((unsigned long *)n_ints, n, longwords); 3235 3236 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 3237 3238 reverse_words(m, (unsigned long *)m_ints, longwords); 3239 } 3240 3241 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 3242 jint len, jlong inv, 3243 jint *m_ints) { 3244 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3245 assert(len % 2 == 0, "array length in montgomery_square must be even"); 3246 int longwords = len/2; 3247 3248 // Make very sure we don't use so much space that the stack might 3249 // overflow. 512 jints corresponds to an 16384-bit integer and 3250 // will use here a total of 6k bytes of stack space. 3251 int divisor = sizeof(unsigned long) * 3; 3252 guarantee(longwords <= (8192 / divisor), "must be"); 3253 int total_allocation = longwords * sizeof (unsigned long) * 3; 3254 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3255 3256 // Local scratch arrays 3257 unsigned long 3258 *a = scratch + 0 * longwords, 3259 *n = scratch + 1 * longwords, 3260 *m = scratch + 2 * longwords; 3261 3262 reverse_words((unsigned long *)a_ints, a, longwords); 3263 reverse_words((unsigned long *)n_ints, n, longwords); 3264 3265 if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 3266 ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 3267 } else { 3268 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 3269 } 3270 3271 reverse_words(m, (unsigned long *)m_ints, longwords); 3272 } 3273 3274 #ifdef COMPILER2 3275 RuntimeStub* SharedRuntime::make_native_invoker(address call_target, 3276 int shadow_space_bytes, 3277 const GrowableArray<VMReg>& input_registers, 3278 const GrowableArray<VMReg>& output_registers) { 3279 Unimplemented(); 3280 return nullptr; 3281 } 3282 #endif