1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2024 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "frame_ppc.hpp" 32 #include "compiler/oopMap.hpp" 33 #include "gc/shared/gcLocker.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "interpreter/interp_masm.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/klass.inline.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/continuation.hpp" 40 #include "runtime/continuationEntry.inline.hpp" 41 #include "runtime/jniHandles.hpp" 42 #include "runtime/os.inline.hpp" 43 #include "runtime/safepointMechanism.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/signature.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/align.hpp" 49 #include "utilities/macros.hpp" 50 #include "vmreg_ppc.inline.hpp" 51 #ifdef COMPILER1 52 #include "c1/c1_Runtime1.hpp" 53 #endif 54 #ifdef COMPILER2 55 #include "opto/ad.hpp" 56 #include "opto/runtime.hpp" 57 #endif 58 59 #include <alloca.h> 60 61 #define __ masm-> 62 63 #ifdef PRODUCT 64 #define BLOCK_COMMENT(str) // nothing 65 #else 66 #define BLOCK_COMMENT(str) __ block_comment(str) 67 #endif 68 69 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 70 71 72 class RegisterSaver { 73 // Used for saving volatile registers. 74 public: 75 76 // Support different return pc locations. 77 enum ReturnPCLocation { 78 return_pc_is_lr, 79 return_pc_is_pre_saved, 80 return_pc_is_thread_saved_exception_pc 81 }; 82 83 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 84 int* out_frame_size_in_bytes, 85 bool generate_oop_map, 86 int return_pc_adjustment, 87 ReturnPCLocation return_pc_location, 88 bool save_vectors = false); 89 static void restore_live_registers_and_pop_frame(MacroAssembler* masm, 90 int frame_size_in_bytes, 91 bool restore_ctr, 92 bool save_vectors = false); 93 94 static void push_frame_and_save_argument_registers(MacroAssembler* masm, 95 Register r_temp, 96 int frame_size, 97 int total_args, 98 const VMRegPair *regs, const VMRegPair *regs2 = nullptr); 99 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, 100 int frame_size, 101 int total_args, 102 const VMRegPair *regs, const VMRegPair *regs2 = nullptr); 103 104 // During deoptimization only the result registers need to be restored 105 // all the other values have already been extracted. 106 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); 107 108 // Constants and data structures: 109 110 typedef enum { 111 int_reg, 112 float_reg, 113 special_reg, 114 vs_reg 115 } RegisterType; 116 117 typedef enum { 118 reg_size = 8, 119 half_reg_size = reg_size / 2, 120 vs_reg_size = 16 121 } RegisterConstants; 122 123 typedef struct { 124 RegisterType reg_type; 125 int reg_num; 126 VMReg vmreg; 127 } LiveRegType; 128 }; 129 130 131 #define RegisterSaver_LiveIntReg(regname) \ 132 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() } 133 134 #define RegisterSaver_LiveFloatReg(regname) \ 135 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() } 136 137 #define RegisterSaver_LiveSpecialReg(regname) \ 138 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } 139 140 #define RegisterSaver_LiveVSReg(regname) \ 141 { RegisterSaver::vs_reg, regname->encoding(), regname->as_VMReg() } 142 143 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { 144 // Live registers which get spilled to the stack. Register 145 // positions in this array correspond directly to the stack layout. 146 147 // 148 // live special registers: 149 // 150 RegisterSaver_LiveSpecialReg(SR_CTR), 151 // 152 // live float registers: 153 // 154 RegisterSaver_LiveFloatReg( F0 ), 155 RegisterSaver_LiveFloatReg( F1 ), 156 RegisterSaver_LiveFloatReg( F2 ), 157 RegisterSaver_LiveFloatReg( F3 ), 158 RegisterSaver_LiveFloatReg( F4 ), 159 RegisterSaver_LiveFloatReg( F5 ), 160 RegisterSaver_LiveFloatReg( F6 ), 161 RegisterSaver_LiveFloatReg( F7 ), 162 RegisterSaver_LiveFloatReg( F8 ), 163 RegisterSaver_LiveFloatReg( F9 ), 164 RegisterSaver_LiveFloatReg( F10 ), 165 RegisterSaver_LiveFloatReg( F11 ), 166 RegisterSaver_LiveFloatReg( F12 ), 167 RegisterSaver_LiveFloatReg( F13 ), 168 RegisterSaver_LiveFloatReg( F14 ), 169 RegisterSaver_LiveFloatReg( F15 ), 170 RegisterSaver_LiveFloatReg( F16 ), 171 RegisterSaver_LiveFloatReg( F17 ), 172 RegisterSaver_LiveFloatReg( F18 ), 173 RegisterSaver_LiveFloatReg( F19 ), 174 RegisterSaver_LiveFloatReg( F20 ), 175 RegisterSaver_LiveFloatReg( F21 ), 176 RegisterSaver_LiveFloatReg( F22 ), 177 RegisterSaver_LiveFloatReg( F23 ), 178 RegisterSaver_LiveFloatReg( F24 ), 179 RegisterSaver_LiveFloatReg( F25 ), 180 RegisterSaver_LiveFloatReg( F26 ), 181 RegisterSaver_LiveFloatReg( F27 ), 182 RegisterSaver_LiveFloatReg( F28 ), 183 RegisterSaver_LiveFloatReg( F29 ), 184 RegisterSaver_LiveFloatReg( F30 ), 185 RegisterSaver_LiveFloatReg( F31 ), 186 // 187 // live integer registers: 188 // 189 RegisterSaver_LiveIntReg( R0 ), 190 //RegisterSaver_LiveIntReg( R1 ), // stack pointer 191 RegisterSaver_LiveIntReg( R2 ), 192 RegisterSaver_LiveIntReg( R3 ), 193 RegisterSaver_LiveIntReg( R4 ), 194 RegisterSaver_LiveIntReg( R5 ), 195 RegisterSaver_LiveIntReg( R6 ), 196 RegisterSaver_LiveIntReg( R7 ), 197 RegisterSaver_LiveIntReg( R8 ), 198 RegisterSaver_LiveIntReg( R9 ), 199 RegisterSaver_LiveIntReg( R10 ), 200 RegisterSaver_LiveIntReg( R11 ), 201 RegisterSaver_LiveIntReg( R12 ), 202 //RegisterSaver_LiveIntReg( R13 ), // system thread id 203 RegisterSaver_LiveIntReg( R14 ), 204 RegisterSaver_LiveIntReg( R15 ), 205 RegisterSaver_LiveIntReg( R16 ), 206 RegisterSaver_LiveIntReg( R17 ), 207 RegisterSaver_LiveIntReg( R18 ), 208 RegisterSaver_LiveIntReg( R19 ), 209 RegisterSaver_LiveIntReg( R20 ), 210 RegisterSaver_LiveIntReg( R21 ), 211 RegisterSaver_LiveIntReg( R22 ), 212 RegisterSaver_LiveIntReg( R23 ), 213 RegisterSaver_LiveIntReg( R24 ), 214 RegisterSaver_LiveIntReg( R25 ), 215 RegisterSaver_LiveIntReg( R26 ), 216 RegisterSaver_LiveIntReg( R27 ), 217 RegisterSaver_LiveIntReg( R28 ), 218 RegisterSaver_LiveIntReg( R29 ), 219 RegisterSaver_LiveIntReg( R30 ), 220 RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below) 221 }; 222 223 static const RegisterSaver::LiveRegType RegisterSaver_LiveVSRegs[] = { 224 // 225 // live vector scalar registers (optional, only these ones are used by C2): 226 // 227 RegisterSaver_LiveVSReg( VSR32 ), 228 RegisterSaver_LiveVSReg( VSR33 ), 229 RegisterSaver_LiveVSReg( VSR34 ), 230 RegisterSaver_LiveVSReg( VSR35 ), 231 RegisterSaver_LiveVSReg( VSR36 ), 232 RegisterSaver_LiveVSReg( VSR37 ), 233 RegisterSaver_LiveVSReg( VSR38 ), 234 RegisterSaver_LiveVSReg( VSR39 ), 235 RegisterSaver_LiveVSReg( VSR40 ), 236 RegisterSaver_LiveVSReg( VSR41 ), 237 RegisterSaver_LiveVSReg( VSR42 ), 238 RegisterSaver_LiveVSReg( VSR43 ), 239 RegisterSaver_LiveVSReg( VSR44 ), 240 RegisterSaver_LiveVSReg( VSR45 ), 241 RegisterSaver_LiveVSReg( VSR46 ), 242 RegisterSaver_LiveVSReg( VSR47 ), 243 RegisterSaver_LiveVSReg( VSR48 ), 244 RegisterSaver_LiveVSReg( VSR49 ), 245 RegisterSaver_LiveVSReg( VSR50 ), 246 RegisterSaver_LiveVSReg( VSR51 ) 247 }; 248 249 250 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 251 int* out_frame_size_in_bytes, 252 bool generate_oop_map, 253 int return_pc_adjustment, 254 ReturnPCLocation return_pc_location, 255 bool save_vectors) { 256 // Push an abi_reg_args-frame and store all registers which may be live. 257 // If requested, create an OopMap: Record volatile registers as 258 // callee-save values in an OopMap so their save locations will be 259 // propagated to the RegisterMap of the caller frame during 260 // StackFrameStream construction (needed for deoptimization; see 261 // compiledVFrame::create_stack_value). 262 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment. 263 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved). 264 265 // calculate frame size 266 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 267 sizeof(RegisterSaver::LiveRegType); 268 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / 269 sizeof(RegisterSaver::LiveRegType)) 270 : 0; 271 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; 272 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) 273 + frame::native_abi_reg_args_size; 274 275 *out_frame_size_in_bytes = frame_size_in_bytes; 276 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 277 const int register_save_offset = frame_size_in_bytes - register_save_size; 278 279 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. 280 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : nullptr; 281 282 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); 283 284 // push a new frame 285 __ push_frame(frame_size_in_bytes, noreg); 286 287 // Save some registers in the last (non-vector) slots of the new frame so we 288 // can use them as scratch regs or to determine the return pc. 289 __ std(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); 290 __ std(R30, frame_size_in_bytes - 2*reg_size - vsregstosave_num * vs_reg_size, R1_SP); 291 292 // save the flags 293 // Do the save_LR_CR by hand and adjust the return pc if requested. 294 __ mfcr(R30); 295 __ std(R30, frame_size_in_bytes + _abi0(cr), R1_SP); 296 switch (return_pc_location) { 297 case return_pc_is_lr: __ mflr(R31); break; 298 case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break; 299 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break; 300 default: ShouldNotReachHere(); 301 } 302 if (return_pc_location != return_pc_is_pre_saved) { 303 if (return_pc_adjustment != 0) { 304 __ addi(R31, R31, return_pc_adjustment); 305 } 306 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 307 } 308 309 // save all registers (ints and floats) 310 int offset = register_save_offset; 311 312 for (int i = 0; i < regstosave_num; i++) { 313 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 314 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 315 316 switch (reg_type) { 317 case RegisterSaver::int_reg: { 318 if (reg_num < 30) { // We spilled R30-31 right at the beginning. 319 __ std(as_Register(reg_num), offset, R1_SP); 320 } 321 break; 322 } 323 case RegisterSaver::float_reg: { 324 __ stfd(as_FloatRegister(reg_num), offset, R1_SP); 325 break; 326 } 327 case RegisterSaver::special_reg: { 328 if (reg_num == SR_CTR.encoding()) { 329 __ mfctr(R30); 330 __ std(R30, offset, R1_SP); 331 } else { 332 Unimplemented(); 333 } 334 break; 335 } 336 default: 337 ShouldNotReachHere(); 338 } 339 340 if (generate_oop_map) { 341 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 342 RegisterSaver_LiveRegs[i].vmreg); 343 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), 344 RegisterSaver_LiveRegs[i].vmreg->next()); 345 } 346 offset += reg_size; 347 } 348 349 for (int i = 0; i < vsregstosave_num; i++) { 350 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; 351 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type; 352 353 __ li(R30, offset); 354 __ stxvd2x(as_VectorSRegister(reg_num), R30, R1_SP); 355 356 if (generate_oop_map) { 357 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 358 RegisterSaver_LiveVSRegs[i].vmreg); 359 } 360 offset += vs_reg_size; 361 } 362 363 assert(offset == frame_size_in_bytes, "consistency check"); 364 365 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers"); 366 367 // And we're done. 368 return map; 369 } 370 371 372 // Pop the current frame and restore all the registers that we 373 // saved. 374 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, 375 int frame_size_in_bytes, 376 bool restore_ctr, 377 bool save_vectors) { 378 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 379 sizeof(RegisterSaver::LiveRegType); 380 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / 381 sizeof(RegisterSaver::LiveRegType)) 382 : 0; 383 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; 384 385 const int register_save_offset = frame_size_in_bytes - register_save_size; 386 387 BLOCK_COMMENT("restore_live_registers_and_pop_frame {"); 388 389 // restore all registers (ints and floats) 390 int offset = register_save_offset; 391 392 for (int i = 0; i < regstosave_num; i++) { 393 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 394 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 395 396 switch (reg_type) { 397 case RegisterSaver::int_reg: { 398 if (reg_num != 31) // R31 restored at the end, it's the tmp reg! 399 __ ld(as_Register(reg_num), offset, R1_SP); 400 break; 401 } 402 case RegisterSaver::float_reg: { 403 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 404 break; 405 } 406 case RegisterSaver::special_reg: { 407 if (reg_num == SR_CTR.encoding()) { 408 if (restore_ctr) { // Nothing to do here if ctr already contains the next address. 409 __ ld(R31, offset, R1_SP); 410 __ mtctr(R31); 411 } 412 } else { 413 Unimplemented(); 414 } 415 break; 416 } 417 default: 418 ShouldNotReachHere(); 419 } 420 offset += reg_size; 421 } 422 423 for (int i = 0; i < vsregstosave_num; i++) { 424 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; 425 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type; 426 427 __ li(R31, offset); 428 __ lxvd2x(as_VectorSRegister(reg_num), R31, R1_SP); 429 430 offset += vs_reg_size; 431 } 432 433 assert(offset == frame_size_in_bytes, "consistency check"); 434 435 // restore link and the flags 436 __ ld(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 437 __ mtlr(R31); 438 439 __ ld(R31, frame_size_in_bytes + _abi0(cr), R1_SP); 440 __ mtcr(R31); 441 442 // restore scratch register's value 443 __ ld(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); 444 445 // pop the frame 446 __ addi(R1_SP, R1_SP, frame_size_in_bytes); 447 448 BLOCK_COMMENT("} restore_live_registers_and_pop_frame"); 449 } 450 451 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp, 452 int frame_size,int total_args, const VMRegPair *regs, 453 const VMRegPair *regs2) { 454 __ push_frame(frame_size, r_temp); 455 int st_off = frame_size - wordSize; 456 for (int i = 0; i < total_args; i++) { 457 VMReg r_1 = regs[i].first(); 458 VMReg r_2 = regs[i].second(); 459 if (!r_1->is_valid()) { 460 assert(!r_2->is_valid(), ""); 461 continue; 462 } 463 if (r_1->is_Register()) { 464 Register r = r_1->as_Register(); 465 __ std(r, st_off, R1_SP); 466 st_off -= wordSize; 467 } else if (r_1->is_FloatRegister()) { 468 FloatRegister f = r_1->as_FloatRegister(); 469 __ stfd(f, st_off, R1_SP); 470 st_off -= wordSize; 471 } 472 } 473 if (regs2 != nullptr) { 474 for (int i = 0; i < total_args; i++) { 475 VMReg r_1 = regs2[i].first(); 476 VMReg r_2 = regs2[i].second(); 477 if (!r_1->is_valid()) { 478 assert(!r_2->is_valid(), ""); 479 continue; 480 } 481 if (r_1->is_Register()) { 482 Register r = r_1->as_Register(); 483 __ std(r, st_off, R1_SP); 484 st_off -= wordSize; 485 } else if (r_1->is_FloatRegister()) { 486 FloatRegister f = r_1->as_FloatRegister(); 487 __ stfd(f, st_off, R1_SP); 488 st_off -= wordSize; 489 } 490 } 491 } 492 } 493 494 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size, 495 int total_args, const VMRegPair *regs, 496 const VMRegPair *regs2) { 497 int st_off = frame_size - wordSize; 498 for (int i = 0; i < total_args; i++) { 499 VMReg r_1 = regs[i].first(); 500 VMReg r_2 = regs[i].second(); 501 if (r_1->is_Register()) { 502 Register r = r_1->as_Register(); 503 __ ld(r, st_off, R1_SP); 504 st_off -= wordSize; 505 } else if (r_1->is_FloatRegister()) { 506 FloatRegister f = r_1->as_FloatRegister(); 507 __ lfd(f, st_off, R1_SP); 508 st_off -= wordSize; 509 } 510 } 511 if (regs2 != nullptr) 512 for (int i = 0; i < total_args; i++) { 513 VMReg r_1 = regs2[i].first(); 514 VMReg r_2 = regs2[i].second(); 515 if (r_1->is_Register()) { 516 Register r = r_1->as_Register(); 517 __ ld(r, st_off, R1_SP); 518 st_off -= wordSize; 519 } else if (r_1->is_FloatRegister()) { 520 FloatRegister f = r_1->as_FloatRegister(); 521 __ lfd(f, st_off, R1_SP); 522 st_off -= wordSize; 523 } 524 } 525 __ pop_frame(); 526 } 527 528 // Restore the registers that might be holding a result. 529 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { 530 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 531 sizeof(RegisterSaver::LiveRegType); 532 const int register_save_size = regstosave_num * reg_size; // VS registers not relevant here. 533 const int register_save_offset = frame_size_in_bytes - register_save_size; 534 535 // restore all result registers (ints and floats) 536 int offset = register_save_offset; 537 for (int i = 0; i < regstosave_num; i++) { 538 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 539 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 540 switch (reg_type) { 541 case RegisterSaver::int_reg: { 542 if (as_Register(reg_num)==R3_RET) // int result_reg 543 __ ld(as_Register(reg_num), offset, R1_SP); 544 break; 545 } 546 case RegisterSaver::float_reg: { 547 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg 548 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 549 break; 550 } 551 case RegisterSaver::special_reg: { 552 // Special registers don't hold a result. 553 break; 554 } 555 default: 556 ShouldNotReachHere(); 557 } 558 offset += reg_size; 559 } 560 561 assert(offset == frame_size_in_bytes, "consistency check"); 562 } 563 564 // Is vector's size (in bytes) bigger than a size saved by default? 565 bool SharedRuntime::is_wide_vector(int size) { 566 // Note, MaxVectorSize == 8/16 on PPC64. 567 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size); 568 return size > 8; 569 } 570 571 static int reg2slot(VMReg r) { 572 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 573 } 574 575 static int reg2offset(VMReg r) { 576 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 577 } 578 579 // --------------------------------------------------------------------------- 580 // Read the array of BasicTypes from a signature, and compute where the 581 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 582 // quantities. Values less than VMRegImpl::stack0 are registers, those above 583 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 584 // as framesizes are fixed. 585 // VMRegImpl::stack0 refers to the first slot 0(sp). 586 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register 587 // up to Register::number_of_registers) are the 64-bit 588 // integer registers. 589 590 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 591 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 592 // units regardless of build. Of course for i486 there is no 64 bit build 593 594 // The Java calling convention is a "shifted" version of the C ABI. 595 // By skipping the first C ABI register we can call non-static jni methods 596 // with small numbers of arguments without having to shuffle the arguments 597 // at all. Since we control the java ABI we ought to at least get some 598 // advantage out of it. 599 600 const VMReg java_iarg_reg[8] = { 601 R3->as_VMReg(), 602 R4->as_VMReg(), 603 R5->as_VMReg(), 604 R6->as_VMReg(), 605 R7->as_VMReg(), 606 R8->as_VMReg(), 607 R9->as_VMReg(), 608 R10->as_VMReg() 609 }; 610 611 const VMReg java_farg_reg[13] = { 612 F1->as_VMReg(), 613 F2->as_VMReg(), 614 F3->as_VMReg(), 615 F4->as_VMReg(), 616 F5->as_VMReg(), 617 F6->as_VMReg(), 618 F7->as_VMReg(), 619 F8->as_VMReg(), 620 F9->as_VMReg(), 621 F10->as_VMReg(), 622 F11->as_VMReg(), 623 F12->as_VMReg(), 624 F13->as_VMReg() 625 }; 626 627 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]); 628 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]); 629 630 STATIC_ASSERT(num_java_iarg_registers == Argument::n_int_register_parameters_j); 631 STATIC_ASSERT(num_java_farg_registers == Argument::n_float_register_parameters_j); 632 633 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 634 VMRegPair *regs, 635 int total_args_passed) { 636 // C2c calling conventions for compiled-compiled calls. 637 // Put 8 ints/longs into registers _AND_ 13 float/doubles into 638 // registers _AND_ put the rest on the stack. 639 640 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats 641 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 642 643 int i; 644 VMReg reg; 645 int stk = 0; 646 int ireg = 0; 647 int freg = 0; 648 649 // We put the first 8 arguments into registers and the rest on the 650 // stack, float arguments are already in their argument registers 651 // due to c2c calling conventions (see calling_convention). 652 for (int i = 0; i < total_args_passed; ++i) { 653 switch(sig_bt[i]) { 654 case T_BOOLEAN: 655 case T_CHAR: 656 case T_BYTE: 657 case T_SHORT: 658 case T_INT: 659 if (ireg < num_java_iarg_registers) { 660 // Put int/ptr in register 661 reg = java_iarg_reg[ireg]; 662 ++ireg; 663 } else { 664 // Put int/ptr on stack. 665 reg = VMRegImpl::stack2reg(stk); 666 stk += inc_stk_for_intfloat; 667 } 668 regs[i].set1(reg); 669 break; 670 case T_LONG: 671 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 672 if (ireg < num_java_iarg_registers) { 673 // Put long in register. 674 reg = java_iarg_reg[ireg]; 675 ++ireg; 676 } else { 677 // Put long on stack. They must be aligned to 2 slots. 678 if (stk & 0x1) ++stk; 679 reg = VMRegImpl::stack2reg(stk); 680 stk += inc_stk_for_longdouble; 681 } 682 regs[i].set2(reg); 683 break; 684 case T_OBJECT: 685 case T_ARRAY: 686 case T_ADDRESS: 687 if (ireg < num_java_iarg_registers) { 688 // Put ptr in register. 689 reg = java_iarg_reg[ireg]; 690 ++ireg; 691 } else { 692 // Put ptr on stack. Objects must be aligned to 2 slots too, 693 // because "64-bit pointers record oop-ishness on 2 aligned 694 // adjacent registers." (see OopFlow::build_oop_map). 695 if (stk & 0x1) ++stk; 696 reg = VMRegImpl::stack2reg(stk); 697 stk += inc_stk_for_longdouble; 698 } 699 regs[i].set2(reg); 700 break; 701 case T_FLOAT: 702 if (freg < num_java_farg_registers) { 703 // Put float in register. 704 reg = java_farg_reg[freg]; 705 ++freg; 706 } else { 707 // Put float on stack. 708 reg = VMRegImpl::stack2reg(stk); 709 stk += inc_stk_for_intfloat; 710 } 711 regs[i].set1(reg); 712 break; 713 case T_DOUBLE: 714 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 715 if (freg < num_java_farg_registers) { 716 // Put double in register. 717 reg = java_farg_reg[freg]; 718 ++freg; 719 } else { 720 // Put double on stack. They must be aligned to 2 slots. 721 if (stk & 0x1) ++stk; 722 reg = VMRegImpl::stack2reg(stk); 723 stk += inc_stk_for_longdouble; 724 } 725 regs[i].set2(reg); 726 break; 727 case T_VOID: 728 // Do not count halves. 729 regs[i].set_bad(); 730 break; 731 default: 732 ShouldNotReachHere(); 733 } 734 } 735 return stk; 736 } 737 738 #if defined(COMPILER1) || defined(COMPILER2) 739 // Calling convention for calling C code. 740 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 741 VMRegPair *regs, 742 int total_args_passed) { 743 // Calling conventions for C runtime calls and calls to JNI native methods. 744 // 745 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8 746 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist 747 // the first 13 flt/dbl's in the first 13 fp regs but additionally 748 // copy flt/dbl to the stack if they are beyond the 8th argument. 749 750 const VMReg iarg_reg[8] = { 751 R3->as_VMReg(), 752 R4->as_VMReg(), 753 R5->as_VMReg(), 754 R6->as_VMReg(), 755 R7->as_VMReg(), 756 R8->as_VMReg(), 757 R9->as_VMReg(), 758 R10->as_VMReg() 759 }; 760 761 const VMReg farg_reg[13] = { 762 F1->as_VMReg(), 763 F2->as_VMReg(), 764 F3->as_VMReg(), 765 F4->as_VMReg(), 766 F5->as_VMReg(), 767 F6->as_VMReg(), 768 F7->as_VMReg(), 769 F8->as_VMReg(), 770 F9->as_VMReg(), 771 F10->as_VMReg(), 772 F11->as_VMReg(), 773 F12->as_VMReg(), 774 F13->as_VMReg() 775 }; 776 777 // Check calling conventions consistency. 778 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c && 779 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c, 780 "consistency"); 781 782 const int additional_frame_header_slots = ((frame::native_abi_minframe_size - frame::jit_out_preserve_size) 783 / VMRegImpl::stack_slot_size); 784 const int float_offset_in_slots = Argument::float_on_stack_offset_in_bytes_c / VMRegImpl::stack_slot_size; 785 786 VMReg reg; 787 int arg = 0; 788 int freg = 0; 789 bool stack_used = false; 790 791 for (int i = 0; i < total_args_passed; ++i, ++arg) { 792 // Each argument corresponds to a slot in the Parameter Save Area (if not omitted) 793 int stk = (arg * 2) + additional_frame_header_slots; 794 795 switch(sig_bt[i]) { 796 // 797 // If arguments 0-7 are integers, they are passed in integer registers. 798 // Argument i is placed in iarg_reg[i]. 799 // 800 case T_BOOLEAN: 801 case T_CHAR: 802 case T_BYTE: 803 case T_SHORT: 804 case T_INT: 805 // We must cast ints to longs and use full 64 bit stack slots 806 // here. Thus fall through, handle as long. 807 case T_LONG: 808 case T_OBJECT: 809 case T_ARRAY: 810 case T_ADDRESS: 811 case T_METADATA: 812 // Oops are already boxed if required (JNI). 813 if (arg < Argument::n_int_register_parameters_c) { 814 reg = iarg_reg[arg]; 815 } else { 816 reg = VMRegImpl::stack2reg(stk); 817 stack_used = true; 818 } 819 regs[i].set2(reg); 820 break; 821 822 // 823 // Floats are treated differently from int regs: The first 13 float arguments 824 // are passed in registers (not the float args among the first 13 args). 825 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed 826 // in farg_reg[j] if argument i is the j-th float argument of this call. 827 // 828 case T_FLOAT: 829 if (freg < Argument::n_float_register_parameters_c) { 830 // Put float in register ... 831 reg = farg_reg[freg]; 832 ++freg; 833 } else { 834 // Put float on stack. 835 reg = VMRegImpl::stack2reg(stk + float_offset_in_slots); 836 stack_used = true; 837 } 838 regs[i].set1(reg); 839 break; 840 case T_DOUBLE: 841 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 842 if (freg < Argument::n_float_register_parameters_c) { 843 // Put double in register ... 844 reg = farg_reg[freg]; 845 ++freg; 846 } else { 847 // Put double on stack. 848 reg = VMRegImpl::stack2reg(stk); 849 stack_used = true; 850 } 851 regs[i].set2(reg); 852 break; 853 854 case T_VOID: 855 // Do not count halves. 856 regs[i].set_bad(); 857 --arg; 858 break; 859 default: 860 ShouldNotReachHere(); 861 } 862 } 863 864 // Return size of the stack frame excluding the jit_out_preserve part in single-word slots. 865 #if defined(ABI_ELFv2) 866 assert(additional_frame_header_slots == 0, "ABIv2 shouldn't use extra slots"); 867 // ABIv2 allows omitting the Parameter Save Area if the callee's prototype 868 // indicates that all parameters can be passed in registers. 869 return stack_used ? (arg * 2) : 0; 870 #else 871 // The Parameter Save Area needs to be at least 8 double-word slots for ABIv1. 872 // We have to add extra slots because ABIv1 uses a larger header. 873 return MAX2(arg, 8) * 2 + additional_frame_header_slots; 874 #endif 875 } 876 #endif // COMPILER2 877 878 int SharedRuntime::vector_calling_convention(VMRegPair *regs, 879 uint num_bits, 880 uint total_args_passed) { 881 Unimplemented(); 882 return 0; 883 } 884 885 static address gen_c2i_adapter(MacroAssembler *masm, 886 int total_args_passed, 887 int comp_args_on_stack, 888 const BasicType *sig_bt, 889 const VMRegPair *regs, 890 Label& call_interpreter, 891 const Register& ientry) { 892 893 address c2i_entrypoint; 894 895 const Register sender_SP = R21_sender_SP; // == R21_tmp1 896 const Register code = R22_tmp2; 897 //const Register ientry = R23_tmp3; 898 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 }; 899 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 900 int value_regs_index = 0; 901 902 const Register return_pc = R27_tmp7; 903 const Register tmp = R28_tmp8; 904 905 assert_different_registers(sender_SP, code, ientry, return_pc, tmp); 906 907 // Adapter needs TOP_IJAVA_FRAME_ABI. 908 const int adapter_size = frame::top_ijava_frame_abi_size + 909 align_up(total_args_passed * wordSize, frame::alignment_in_bytes); 910 911 // regular (verified) c2i entry point 912 c2i_entrypoint = __ pc(); 913 914 // Does compiled code exists? If yes, patch the caller's callsite. 915 __ ld(code, method_(code)); 916 __ cmpdi(CCR0, code, 0); 917 __ ld(ientry, method_(interpreter_entry)); // preloaded 918 __ beq(CCR0, call_interpreter); 919 920 921 // Patch caller's callsite, method_(code) was not null which means that 922 // compiled code exists. 923 __ mflr(return_pc); 924 __ std(return_pc, _abi0(lr), R1_SP); 925 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs); 926 927 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc); 928 929 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs); 930 __ ld(return_pc, _abi0(lr), R1_SP); 931 __ ld(ientry, method_(interpreter_entry)); // preloaded 932 __ mtlr(return_pc); 933 934 935 // Call the interpreter. 936 __ BIND(call_interpreter); 937 __ mtctr(ientry); 938 939 // Get a copy of the current SP for loading caller's arguments. 940 __ mr(sender_SP, R1_SP); 941 942 // Add space for the adapter. 943 __ resize_frame(-adapter_size, R12_scratch2); 944 945 int st_off = adapter_size - wordSize; 946 947 // Write the args into the outgoing interpreter space. 948 for (int i = 0; i < total_args_passed; i++) { 949 VMReg r_1 = regs[i].first(); 950 VMReg r_2 = regs[i].second(); 951 if (!r_1->is_valid()) { 952 assert(!r_2->is_valid(), ""); 953 continue; 954 } 955 if (r_1->is_stack()) { 956 Register tmp_reg = value_regs[value_regs_index]; 957 value_regs_index = (value_regs_index + 1) % num_value_regs; 958 // The calling convention produces OptoRegs that ignore the out 959 // preserve area (JIT's ABI). We must account for it here. 960 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 961 if (!r_2->is_valid()) { 962 __ lwz(tmp_reg, ld_off, sender_SP); 963 } else { 964 __ ld(tmp_reg, ld_off, sender_SP); 965 } 966 // Pretend stack targets were loaded into tmp_reg. 967 r_1 = tmp_reg->as_VMReg(); 968 } 969 970 if (r_1->is_Register()) { 971 Register r = r_1->as_Register(); 972 if (!r_2->is_valid()) { 973 __ stw(r, st_off, R1_SP); 974 st_off-=wordSize; 975 } else { 976 // Longs are given 2 64-bit slots in the interpreter, but the 977 // data is passed in only 1 slot. 978 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 979 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 980 st_off-=wordSize; 981 } 982 __ std(r, st_off, R1_SP); 983 st_off-=wordSize; 984 } 985 } else { 986 assert(r_1->is_FloatRegister(), ""); 987 FloatRegister f = r_1->as_FloatRegister(); 988 if (!r_2->is_valid()) { 989 __ stfs(f, st_off, R1_SP); 990 st_off-=wordSize; 991 } else { 992 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the 993 // data is passed in only 1 slot. 994 // One of these should get known junk... 995 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 996 st_off-=wordSize; 997 __ stfd(f, st_off, R1_SP); 998 st_off-=wordSize; 999 } 1000 } 1001 } 1002 1003 // Jump to the interpreter just as if interpreter was doing it. 1004 1005 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 1006 1007 // load TOS 1008 __ addi(R15_esp, R1_SP, st_off); 1009 1010 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1. 1011 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register"); 1012 __ bctr(); 1013 1014 return c2i_entrypoint; 1015 } 1016 1017 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 1018 int total_args_passed, 1019 int comp_args_on_stack, 1020 const BasicType *sig_bt, 1021 const VMRegPair *regs) { 1022 1023 // Load method's entry-point from method. 1024 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 1025 __ mtctr(R12_scratch2); 1026 1027 // We will only enter here from an interpreted frame and never from after 1028 // passing thru a c2i. Azul allowed this but we do not. If we lose the 1029 // race and use a c2i we will remain interpreted for the race loser(s). 1030 // This removes all sorts of headaches on the x86 side and also eliminates 1031 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 1032 1033 // Note: r13 contains the senderSP on entry. We must preserve it since 1034 // we may do a i2c -> c2i transition if we lose a race where compiled 1035 // code goes non-entrant while we get args ready. 1036 // In addition we use r13 to locate all the interpreter args as 1037 // we must align the stack to 16 bytes on an i2c entry else we 1038 // lose alignment we expect in all compiled code and register 1039 // save code can segv when fxsave instructions find improperly 1040 // aligned stack pointer. 1041 1042 const Register ld_ptr = R15_esp; 1043 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 }; 1044 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 1045 int value_regs_index = 0; 1046 1047 int ld_offset = total_args_passed*wordSize; 1048 1049 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 1050 // in registers, we will occasionally have no stack args. 1051 int comp_words_on_stack = 0; 1052 if (comp_args_on_stack) { 1053 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in 1054 // registers are below. By subtracting stack0, we either get a negative 1055 // number (all values in registers) or the maximum stack slot accessed. 1056 1057 // Convert 4-byte c2 stack slots to words. 1058 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1059 // Round up to miminum stack alignment, in wordSize. 1060 comp_words_on_stack = align_up(comp_words_on_stack, 2); 1061 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1); 1062 } 1063 1064 // Now generate the shuffle code. Pick up all register args and move the 1065 // rest through register value=Z_R12. 1066 BLOCK_COMMENT("Shuffle arguments"); 1067 for (int i = 0; i < total_args_passed; i++) { 1068 if (sig_bt[i] == T_VOID) { 1069 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1070 continue; 1071 } 1072 1073 // Pick up 0, 1 or 2 words from ld_ptr. 1074 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 1075 "scrambled load targets?"); 1076 VMReg r_1 = regs[i].first(); 1077 VMReg r_2 = regs[i].second(); 1078 if (!r_1->is_valid()) { 1079 assert(!r_2->is_valid(), ""); 1080 continue; 1081 } 1082 if (r_1->is_FloatRegister()) { 1083 if (!r_2->is_valid()) { 1084 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr); 1085 ld_offset-=wordSize; 1086 } else { 1087 // Skip the unused interpreter slot. 1088 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr); 1089 ld_offset-=2*wordSize; 1090 } 1091 } else { 1092 Register r; 1093 if (r_1->is_stack()) { 1094 // Must do a memory to memory move thru "value". 1095 r = value_regs[value_regs_index]; 1096 value_regs_index = (value_regs_index + 1) % num_value_regs; 1097 } else { 1098 r = r_1->as_Register(); 1099 } 1100 if (!r_2->is_valid()) { 1101 // Not sure we need to do this but it shouldn't hurt. 1102 if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) { 1103 __ ld(r, ld_offset, ld_ptr); 1104 ld_offset-=wordSize; 1105 } else { 1106 __ lwz(r, ld_offset, ld_ptr); 1107 ld_offset-=wordSize; 1108 } 1109 } else { 1110 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the 1111 // data is passed in only 1 slot. 1112 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1113 ld_offset-=wordSize; 1114 } 1115 __ ld(r, ld_offset, ld_ptr); 1116 ld_offset-=wordSize; 1117 } 1118 1119 if (r_1->is_stack()) { 1120 // Now store value where the compiler expects it 1121 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size; 1122 1123 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN || 1124 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) { 1125 __ stw(r, st_off, R1_SP); 1126 } else { 1127 __ std(r, st_off, R1_SP); 1128 } 1129 } 1130 } 1131 } 1132 1133 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about 1134 1135 BLOCK_COMMENT("Store method"); 1136 // Store method into thread->callee_target. 1137 // We might end up in handle_wrong_method if the callee is 1138 // deoptimized as we race thru here. If that happens we don't want 1139 // to take a safepoint because the caller frame will look 1140 // interpreted and arguments are now "compiled" so it is much better 1141 // to make this transition invisible to the stack walking 1142 // code. Unfortunately if we try and find the callee by normal means 1143 // a safepoint is possible. So we stash the desired callee in the 1144 // thread and the vm will find there should this case occur. 1145 __ std(R19_method, thread_(callee_target)); 1146 1147 // Jump to the compiled code just as if compiled code was doing it. 1148 __ bctr(); 1149 } 1150 1151 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1152 int total_args_passed, 1153 int comp_args_on_stack, 1154 const BasicType *sig_bt, 1155 const VMRegPair *regs, 1156 AdapterFingerPrint* fingerprint) { 1157 address i2c_entry; 1158 address c2i_unverified_entry; 1159 address c2i_entry; 1160 1161 1162 // entry: i2c 1163 1164 __ align(CodeEntryAlignment); 1165 i2c_entry = __ pc(); 1166 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1167 1168 1169 // entry: c2i unverified 1170 1171 __ align(CodeEntryAlignment); 1172 BLOCK_COMMENT("c2i unverified entry"); 1173 c2i_unverified_entry = __ pc(); 1174 1175 // inline_cache contains a CompiledICData 1176 const Register ic = R19_inline_cache_reg; 1177 const Register ic_klass = R11_scratch1; 1178 const Register receiver_klass = R12_scratch2; 1179 const Register code = R21_tmp1; 1180 const Register ientry = R23_tmp3; 1181 1182 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry); 1183 assert(R11_scratch1 == R11, "need prologue scratch register"); 1184 1185 Label call_interpreter; 1186 1187 __ ic_check(4 /* end_alignment */); 1188 __ ld(R19_method, CompiledICData::speculated_method_offset(), ic); 1189 // Argument is valid and klass is as expected, continue. 1190 1191 __ ld(code, method_(code)); 1192 __ cmpdi(CCR0, code, 0); 1193 __ ld(ientry, method_(interpreter_entry)); // preloaded 1194 __ beq_predict_taken(CCR0, call_interpreter); 1195 1196 // Branch to ic_miss_stub. 1197 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); 1198 1199 // entry: c2i 1200 1201 c2i_entry = __ pc(); 1202 1203 // Class initialization barrier for static methods 1204 address c2i_no_clinit_check_entry = nullptr; 1205 if (VM_Version::supports_fast_class_init_checks()) { 1206 Label L_skip_barrier; 1207 1208 { // Bypass the barrier for non-static methods 1209 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1210 __ andi_(R0, R0, JVM_ACC_STATIC); 1211 __ beq(CCR0, L_skip_barrier); // non-static 1212 } 1213 1214 Register klass = R11_scratch1; 1215 __ load_method_holder(klass, R19_method); 1216 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 1217 1218 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 1219 __ mtctr(klass); 1220 __ bctr(); 1221 1222 __ bind(L_skip_barrier); 1223 c2i_no_clinit_check_entry = __ pc(); 1224 } 1225 1226 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1227 bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code); 1228 1229 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry); 1230 1231 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, 1232 c2i_no_clinit_check_entry); 1233 } 1234 1235 // An oop arg. Must pass a handle not the oop itself. 1236 static void object_move(MacroAssembler* masm, 1237 int frame_size_in_slots, 1238 OopMap* oop_map, int oop_handle_offset, 1239 bool is_receiver, int* receiver_offset, 1240 VMRegPair src, VMRegPair dst, 1241 Register r_caller_sp, Register r_temp_1, Register r_temp_2) { 1242 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), 1243 "receiver has already been moved"); 1244 1245 // We must pass a handle. First figure out the location we use as a handle. 1246 1247 if (src.first()->is_stack()) { 1248 // stack to stack or reg 1249 1250 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1251 Label skip; 1252 const int oop_slot_in_callers_frame = reg2slot(src.first()); 1253 1254 guarantee(!is_receiver, "expecting receiver in register"); 1255 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots)); 1256 1257 __ addi(r_handle, r_caller_sp, reg2offset(src.first())); 1258 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); 1259 __ cmpdi(CCR0, r_temp_2, 0); 1260 __ bne(CCR0, skip); 1261 // Use a null handle if oop is null. 1262 __ li(r_handle, 0); 1263 __ bind(skip); 1264 1265 if (dst.first()->is_stack()) { 1266 // stack to stack 1267 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1268 } else { 1269 // stack to reg 1270 // Nothing to do, r_handle is already the dst register. 1271 } 1272 } else { 1273 // reg to stack or reg 1274 const Register r_oop = src.first()->as_Register(); 1275 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1276 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word 1277 + oop_handle_offset; // in slots 1278 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size; 1279 Label skip; 1280 1281 if (is_receiver) { 1282 *receiver_offset = oop_offset; 1283 } 1284 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1285 1286 __ std( r_oop, oop_offset, R1_SP); 1287 __ addi(r_handle, R1_SP, oop_offset); 1288 1289 __ cmpdi(CCR0, r_oop, 0); 1290 __ bne(CCR0, skip); 1291 // Use a null handle if oop is null. 1292 __ li(r_handle, 0); 1293 __ bind(skip); 1294 1295 if (dst.first()->is_stack()) { 1296 // reg to stack 1297 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1298 } else { 1299 // reg to reg 1300 // Nothing to do, r_handle is already the dst register. 1301 } 1302 } 1303 } 1304 1305 static void int_move(MacroAssembler*masm, 1306 VMRegPair src, VMRegPair dst, 1307 Register r_caller_sp, Register r_temp) { 1308 assert(src.first()->is_valid(), "incoming must be int"); 1309 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1310 1311 if (src.first()->is_stack()) { 1312 if (dst.first()->is_stack()) { 1313 // stack to stack 1314 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp); 1315 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1316 } else { 1317 // stack to reg 1318 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1319 } 1320 } else if (dst.first()->is_stack()) { 1321 // reg to stack 1322 __ extsw(r_temp, src.first()->as_Register()); 1323 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1324 } else { 1325 // reg to reg 1326 __ extsw(dst.first()->as_Register(), src.first()->as_Register()); 1327 } 1328 } 1329 1330 static void long_move(MacroAssembler*masm, 1331 VMRegPair src, VMRegPair dst, 1332 Register r_caller_sp, Register r_temp) { 1333 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long"); 1334 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1335 1336 if (src.first()->is_stack()) { 1337 if (dst.first()->is_stack()) { 1338 // stack to stack 1339 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1340 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1341 } else { 1342 // stack to reg 1343 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1344 } 1345 } else if (dst.first()->is_stack()) { 1346 // reg to stack 1347 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1348 } else { 1349 // reg to reg 1350 if (dst.first()->as_Register() != src.first()->as_Register()) 1351 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1352 } 1353 } 1354 1355 static void float_move(MacroAssembler*masm, 1356 VMRegPair src, VMRegPair dst, 1357 Register r_caller_sp, Register r_temp) { 1358 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float"); 1359 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float"); 1360 1361 if (src.first()->is_stack()) { 1362 if (dst.first()->is_stack()) { 1363 // stack to stack 1364 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp); 1365 __ stw(r_temp, reg2offset(dst.first()), R1_SP); 1366 } else { 1367 // stack to reg 1368 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1369 } 1370 } else if (dst.first()->is_stack()) { 1371 // reg to stack 1372 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1373 } else { 1374 // reg to reg 1375 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1376 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1377 } 1378 } 1379 1380 static void double_move(MacroAssembler*masm, 1381 VMRegPair src, VMRegPair dst, 1382 Register r_caller_sp, Register r_temp) { 1383 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double"); 1384 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double"); 1385 1386 if (src.first()->is_stack()) { 1387 if (dst.first()->is_stack()) { 1388 // stack to stack 1389 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1390 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1391 } else { 1392 // stack to reg 1393 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1394 } 1395 } else if (dst.first()->is_stack()) { 1396 // reg to stack 1397 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1398 } else { 1399 // reg to reg 1400 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1401 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1402 } 1403 } 1404 1405 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1406 switch (ret_type) { 1407 case T_BOOLEAN: 1408 case T_CHAR: 1409 case T_BYTE: 1410 case T_SHORT: 1411 case T_INT: 1412 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1413 break; 1414 case T_ARRAY: 1415 case T_OBJECT: 1416 case T_LONG: 1417 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1418 break; 1419 case T_FLOAT: 1420 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1421 break; 1422 case T_DOUBLE: 1423 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1424 break; 1425 case T_VOID: 1426 break; 1427 default: 1428 ShouldNotReachHere(); 1429 break; 1430 } 1431 } 1432 1433 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1434 switch (ret_type) { 1435 case T_BOOLEAN: 1436 case T_CHAR: 1437 case T_BYTE: 1438 case T_SHORT: 1439 case T_INT: 1440 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1441 break; 1442 case T_ARRAY: 1443 case T_OBJECT: 1444 case T_LONG: 1445 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1446 break; 1447 case T_FLOAT: 1448 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1449 break; 1450 case T_DOUBLE: 1451 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1452 break; 1453 case T_VOID: 1454 break; 1455 default: 1456 ShouldNotReachHere(); 1457 break; 1458 } 1459 } 1460 1461 static void verify_oop_args(MacroAssembler* masm, 1462 const methodHandle& method, 1463 const BasicType* sig_bt, 1464 const VMRegPair* regs) { 1465 Register temp_reg = R19_method; // not part of any compiled calling seq 1466 if (VerifyOops) { 1467 for (int i = 0; i < method->size_of_parameters(); i++) { 1468 if (is_reference_type(sig_bt[i])) { 1469 VMReg r = regs[i].first(); 1470 assert(r->is_valid(), "bad oop arg"); 1471 if (r->is_stack()) { 1472 __ ld(temp_reg, reg2offset(r), R1_SP); 1473 __ verify_oop(temp_reg, FILE_AND_LINE); 1474 } else { 1475 __ verify_oop(r->as_Register(), FILE_AND_LINE); 1476 } 1477 } 1478 } 1479 } 1480 } 1481 1482 static void gen_special_dispatch(MacroAssembler* masm, 1483 const methodHandle& method, 1484 const BasicType* sig_bt, 1485 const VMRegPair* regs) { 1486 verify_oop_args(masm, method, sig_bt, regs); 1487 vmIntrinsics::ID iid = method->intrinsic_id(); 1488 1489 // Now write the args into the outgoing interpreter space 1490 bool has_receiver = false; 1491 Register receiver_reg = noreg; 1492 int member_arg_pos = -1; 1493 Register member_reg = noreg; 1494 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1495 if (ref_kind != 0) { 1496 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1497 member_reg = R19_method; // known to be free at this point 1498 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1499 } else if (iid == vmIntrinsics::_invokeBasic) { 1500 has_receiver = true; 1501 } else if (iid == vmIntrinsics::_linkToNative) { 1502 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument 1503 member_reg = R19_method; // known to be free at this point 1504 } else { 1505 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid)); 1506 } 1507 1508 if (member_reg != noreg) { 1509 // Load the member_arg into register, if necessary. 1510 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1511 VMReg r = regs[member_arg_pos].first(); 1512 if (r->is_stack()) { 1513 __ ld(member_reg, reg2offset(r), R1_SP); 1514 } else { 1515 // no data motion is needed 1516 member_reg = r->as_Register(); 1517 } 1518 } 1519 1520 if (has_receiver) { 1521 // Make sure the receiver is loaded into a register. 1522 assert(method->size_of_parameters() > 0, "oob"); 1523 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1524 VMReg r = regs[0].first(); 1525 assert(r->is_valid(), "bad receiver arg"); 1526 if (r->is_stack()) { 1527 // Porting note: This assumes that compiled calling conventions always 1528 // pass the receiver oop in a register. If this is not true on some 1529 // platform, pick a temp and load the receiver from stack. 1530 fatal("receiver always in a register"); 1531 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point? 1532 __ ld(receiver_reg, reg2offset(r), R1_SP); 1533 } else { 1534 // no data motion is needed 1535 receiver_reg = r->as_Register(); 1536 } 1537 } 1538 1539 // Figure out which address we are really jumping to: 1540 MethodHandles::generate_method_handle_dispatch(masm, iid, 1541 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1542 } 1543 1544 //---------------------------- continuation_enter_setup --------------------------- 1545 // 1546 // Frame setup. 1547 // 1548 // Arguments: 1549 // None. 1550 // 1551 // Results: 1552 // R1_SP: pointer to blank ContinuationEntry in the pushed frame. 1553 // 1554 // Kills: 1555 // R0, R20 1556 // 1557 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& framesize_words) { 1558 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, ""); 1559 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1560 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1561 1562 const int frame_size_in_bytes = (int)ContinuationEntry::size(); 1563 assert(is_aligned(frame_size_in_bytes, frame::alignment_in_bytes), "alignment error"); 1564 1565 framesize_words = frame_size_in_bytes / wordSize; 1566 1567 DEBUG_ONLY(__ block_comment("setup {")); 1568 // Save return pc and push entry frame 1569 const Register return_pc = R20; 1570 __ mflr(return_pc); 1571 __ std(return_pc, _abi0(lr), R1_SP); // SP->lr = return_pc 1572 __ push_frame(frame_size_in_bytes , R0); // SP -= frame_size_in_bytes 1573 1574 OopMap* map = new OopMap((int)frame_size_in_bytes / VMRegImpl::stack_slot_size, 0 /* arg_slots*/); 1575 1576 __ ld_ptr(R0, JavaThread::cont_entry_offset(), R16_thread); 1577 __ st_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread); 1578 __ st_ptr(R0, ContinuationEntry::parent_offset(), R1_SP); 1579 DEBUG_ONLY(__ block_comment("} setup")); 1580 1581 return map; 1582 } 1583 1584 //---------------------------- fill_continuation_entry --------------------------- 1585 // 1586 // Initialize the new ContinuationEntry. 1587 // 1588 // Arguments: 1589 // R1_SP: pointer to blank Continuation entry 1590 // reg_cont_obj: pointer to the continuation 1591 // reg_flags: flags 1592 // 1593 // Results: 1594 // R1_SP: pointer to filled out ContinuationEntry 1595 // 1596 // Kills: 1597 // R8_ARG6, R9_ARG7, R10_ARG8 1598 // 1599 static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, Register reg_flags) { 1600 assert_different_registers(reg_cont_obj, reg_flags); 1601 Register zero = R8_ARG6; 1602 Register tmp2 = R9_ARG7; 1603 Register tmp3 = R10_ARG8; 1604 1605 DEBUG_ONLY(__ block_comment("fill {")); 1606 #ifdef ASSERT 1607 __ load_const_optimized(tmp2, ContinuationEntry::cookie_value()); 1608 __ stw(tmp2, in_bytes(ContinuationEntry::cookie_offset()), R1_SP); 1609 #endif //ASSERT 1610 1611 __ li(zero, 0); 1612 __ st_ptr(reg_cont_obj, ContinuationEntry::cont_offset(), R1_SP); 1613 __ stw(reg_flags, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1614 __ st_ptr(zero, ContinuationEntry::chunk_offset(), R1_SP); 1615 __ stw(zero, in_bytes(ContinuationEntry::argsize_offset()), R1_SP); 1616 __ stw(zero, in_bytes(ContinuationEntry::pin_count_offset()), R1_SP); 1617 1618 __ ld_ptr(tmp2, JavaThread::cont_fastpath_offset(), R16_thread); 1619 __ ld(tmp3, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1620 __ st_ptr(tmp2, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP); 1621 __ std(tmp3, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP); 1622 1623 __ st_ptr(zero, JavaThread::cont_fastpath_offset(), R16_thread); 1624 __ std(zero, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1625 DEBUG_ONLY(__ block_comment("} fill")); 1626 } 1627 1628 //---------------------------- continuation_enter_cleanup --------------------------- 1629 // 1630 // Copy corresponding attributes from the top ContinuationEntry to the JavaThread 1631 // before deleting it. 1632 // 1633 // Arguments: 1634 // R1_SP: pointer to the ContinuationEntry 1635 // 1636 // Results: 1637 // None. 1638 // 1639 // Kills: 1640 // R8_ARG6, R9_ARG7, R10_ARG8, R15_esp 1641 // 1642 static void continuation_enter_cleanup(MacroAssembler* masm) { 1643 Register tmp1 = R8_ARG6; 1644 Register tmp2 = R9_ARG7; 1645 Register tmp3 = R10_ARG8; 1646 1647 #ifdef ASSERT 1648 __ block_comment("clean {"); 1649 __ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread); 1650 __ cmpd(CCR0, R1_SP, tmp1); 1651 __ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP"); 1652 #endif 1653 1654 __ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP); 1655 __ st_ptr(tmp1, JavaThread::cont_fastpath_offset(), R16_thread); 1656 1657 if (CheckJNICalls) { 1658 // Check if this is a virtual thread continuation 1659 Label L_skip_vthread_code; 1660 __ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1661 __ cmpwi(CCR0, R0, 0); 1662 __ beq(CCR0, L_skip_vthread_code); 1663 1664 // If the held monitor count is > 0 and this vthread is terminating then 1665 // it failed to release a JNI monitor. So we issue the same log message 1666 // that JavaThread::exit does. 1667 __ ld(R0, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1668 __ cmpdi(CCR0, R0, 0); 1669 __ beq(CCR0, L_skip_vthread_code); 1670 1671 // Save return value potentially containing the exception oop 1672 Register ex_oop = R15_esp; // nonvolatile register 1673 __ mr(ex_oop, R3_RET); 1674 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held)); 1675 // Restore potental return value 1676 __ mr(R3_RET, ex_oop); 1677 1678 // For vthreads we have to explicitly zero the JNI monitor count of the carrier 1679 // on termination. The held count is implicitly zeroed below when we restore from 1680 // the parent held count (which has to be zero). 1681 __ li(tmp1, 0); 1682 __ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1683 1684 __ bind(L_skip_vthread_code); 1685 } 1686 #ifdef ASSERT 1687 else { 1688 // Check if this is a virtual thread continuation 1689 Label L_skip_vthread_code; 1690 __ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1691 __ cmpwi(CCR0, R0, 0); 1692 __ beq(CCR0, L_skip_vthread_code); 1693 1694 // See comment just above. If not checking JNI calls the JNI count is only 1695 // needed for assertion checking. 1696 __ li(tmp1, 0); 1697 __ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1698 1699 __ bind(L_skip_vthread_code); 1700 } 1701 #endif 1702 1703 __ ld(tmp2, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP); 1704 __ ld_ptr(tmp3, ContinuationEntry::parent_offset(), R1_SP); 1705 __ std(tmp2, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1706 __ st_ptr(tmp3, JavaThread::cont_entry_offset(), R16_thread); 1707 DEBUG_ONLY(__ block_comment("} clean")); 1708 } 1709 1710 static void check_continuation_enter_argument(VMReg actual_vmreg, 1711 Register expected_reg, 1712 const char* name) { 1713 assert(!actual_vmreg->is_stack(), "%s cannot be on stack", name); 1714 assert(actual_vmreg->as_Register() == expected_reg, 1715 "%s is in unexpected register: %s instead of %s", 1716 name, actual_vmreg->as_Register()->name(), expected_reg->name()); 1717 } 1718 1719 static void gen_continuation_enter(MacroAssembler* masm, 1720 const VMRegPair* regs, 1721 int& exception_offset, 1722 OopMapSet* oop_maps, 1723 int& frame_complete, 1724 int& framesize_words, 1725 int& interpreted_entry_offset, 1726 int& compiled_entry_offset) { 1727 1728 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread) 1729 int pos_cont_obj = 0; 1730 int pos_is_cont = 1; 1731 int pos_is_virtual = 2; 1732 1733 // The platform-specific calling convention may present the arguments in various registers. 1734 // To simplify the rest of the code, we expect the arguments to reside at these known 1735 // registers, and we additionally check the placement here in case calling convention ever 1736 // changes. 1737 Register reg_cont_obj = R3_ARG1; 1738 Register reg_is_cont = R4_ARG2; 1739 Register reg_is_virtual = R5_ARG3; 1740 1741 check_continuation_enter_argument(regs[pos_cont_obj].first(), reg_cont_obj, "Continuation object"); 1742 check_continuation_enter_argument(regs[pos_is_cont].first(), reg_is_cont, "isContinue"); 1743 check_continuation_enter_argument(regs[pos_is_virtual].first(), reg_is_virtual, "isVirtualThread"); 1744 1745 address resolve_static_call = SharedRuntime::get_resolve_static_call_stub(); 1746 1747 address start = __ pc(); 1748 1749 Label L_thaw, L_exit; 1750 1751 // i2i entry used at interp_only_mode only 1752 interpreted_entry_offset = __ pc() - start; 1753 { 1754 #ifdef ASSERT 1755 Label is_interp_only; 1756 __ lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread); 1757 __ cmpwi(CCR0, R0, 0); 1758 __ bne(CCR0, is_interp_only); 1759 __ stop("enterSpecial interpreter entry called when not in interp_only_mode"); 1760 __ bind(is_interp_only); 1761 #endif 1762 1763 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter) 1764 __ ld(reg_cont_obj, Interpreter::stackElementSize*3, R15_esp); 1765 __ lwz(reg_is_cont, Interpreter::stackElementSize*2, R15_esp); 1766 __ lwz(reg_is_virtual, Interpreter::stackElementSize*1, R15_esp); 1767 1768 __ push_cont_fastpath(); 1769 1770 OopMap* map = continuation_enter_setup(masm, framesize_words); 1771 1772 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe, 1773 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway. 1774 1775 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual); 1776 1777 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue) 1778 __ cmpwi(CCR0, reg_is_cont, 0); 1779 __ bne(CCR0, L_thaw); 1780 1781 // --- call Continuation.enter(Continuation c, boolean isContinue) 1782 1783 // Emit compiled static call. The call will be always resolved to the c2i 1784 // entry of Continuation.enter(Continuation c, boolean isContinue). 1785 // There are special cases in SharedRuntime::resolve_static_call_C() and 1786 // SharedRuntime::resolve_sub_helper_internal() to achieve this 1787 // See also corresponding call below. 1788 address c2i_call_pc = __ pc(); 1789 int start_offset = __ offset(); 1790 // Put the entry point as a constant into the constant pool. 1791 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none); 1792 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 1793 guarantee(entry_point_toc_addr != nullptr, "const section overflow"); 1794 1795 // Emit the trampoline stub which will be related to the branch-and-link below. 1796 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset); 1797 guarantee(stub != nullptr, "no space for trampoline stub"); 1798 1799 __ relocate(relocInfo::static_call_type); 1800 // Note: At this point we do not have the address of the trampoline 1801 // stub, and the entry point might be too far away for bl, so __ pc() 1802 // serves as dummy and the bl will be patched later. 1803 __ bl(__ pc()); 1804 oop_maps->add_gc_map(__ pc() - start, map); 1805 __ post_call_nop(); 1806 1807 __ b(L_exit); 1808 1809 // static stub for the call above 1810 stub = CompiledDirectCall::emit_to_interp_stub(masm, c2i_call_pc); 1811 guarantee(stub != nullptr, "no space for static stub"); 1812 } 1813 1814 // compiled entry 1815 __ align(CodeEntryAlignment); 1816 compiled_entry_offset = __ pc() - start; 1817 1818 OopMap* map = continuation_enter_setup(masm, framesize_words); 1819 1820 // Frame is now completed as far as size and linkage. 1821 frame_complete =__ pc() - start; 1822 1823 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual); 1824 1825 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue) 1826 __ cmpwi(CCR0, reg_is_cont, 0); 1827 __ bne(CCR0, L_thaw); 1828 1829 // --- call Continuation.enter(Continuation c, boolean isContinue) 1830 1831 // Emit compiled static call 1832 // The call needs to be resolved. There's a special case for this in 1833 // SharedRuntime::find_callee_info_helper() which calls 1834 // LinkResolver::resolve_continuation_enter() which resolves the call to 1835 // Continuation.enter(Continuation c, boolean isContinue). 1836 address call_pc = __ pc(); 1837 int start_offset = __ offset(); 1838 // Put the entry point as a constant into the constant pool. 1839 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none); 1840 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 1841 guarantee(entry_point_toc_addr != nullptr, "const section overflow"); 1842 1843 // Emit the trampoline stub which will be related to the branch-and-link below. 1844 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset); 1845 guarantee(stub != nullptr, "no space for trampoline stub"); 1846 1847 __ relocate(relocInfo::static_call_type); 1848 // Note: At this point we do not have the address of the trampoline 1849 // stub, and the entry point might be too far away for bl, so __ pc() 1850 // serves as dummy and the bl will be patched later. 1851 __ bl(__ pc()); 1852 oop_maps->add_gc_map(__ pc() - start, map); 1853 __ post_call_nop(); 1854 1855 __ b(L_exit); 1856 1857 // --- Thawing path 1858 1859 __ bind(L_thaw); 1860 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(StubRoutines::cont_thaw())); 1861 __ mtctr(R0); 1862 __ bctrl(); 1863 oop_maps->add_gc_map(__ pc() - start, map->deep_copy()); 1864 ContinuationEntry::_return_pc_offset = __ pc() - start; 1865 __ post_call_nop(); 1866 1867 // --- Normal exit (resolve/thawing) 1868 1869 __ bind(L_exit); 1870 continuation_enter_cleanup(masm); 1871 1872 // Pop frame and return 1873 DEBUG_ONLY(__ ld_ptr(R0, 0, R1_SP)); 1874 __ addi(R1_SP, R1_SP, framesize_words*wordSize); 1875 DEBUG_ONLY(__ cmpd(CCR0, R0, R1_SP)); 1876 __ asm_assert_eq(FILE_AND_LINE ": inconsistent frame size"); 1877 __ ld(R0, _abi0(lr), R1_SP); // Return pc 1878 __ mtlr(R0); 1879 __ blr(); 1880 1881 // --- Exception handling path 1882 1883 exception_offset = __ pc() - start; 1884 1885 continuation_enter_cleanup(masm); 1886 Register ex_pc = R17_tos; // nonvolatile register 1887 Register ex_oop = R15_esp; // nonvolatile register 1888 __ ld(ex_pc, _abi0(callers_sp), R1_SP); // Load caller's return pc 1889 __ ld(ex_pc, _abi0(lr), ex_pc); 1890 __ mr(ex_oop, R3_RET); // save return value containing the exception oop 1891 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, ex_pc); 1892 __ mtlr(R3_RET); // the exception handler 1893 __ ld(R1_SP, _abi0(callers_sp), R1_SP); // remove enterSpecial frame 1894 1895 // Continue at exception handler 1896 // See OptoRuntime::generate_exception_blob for register arguments 1897 __ mr(R3_ARG1, ex_oop); // pass exception oop 1898 __ mr(R4_ARG2, ex_pc); // pass exception pc 1899 __ blr(); 1900 1901 // static stub for the call above 1902 stub = CompiledDirectCall::emit_to_interp_stub(masm, call_pc); 1903 guarantee(stub != nullptr, "no space for static stub"); 1904 } 1905 1906 static void gen_continuation_yield(MacroAssembler* masm, 1907 const VMRegPair* regs, 1908 OopMapSet* oop_maps, 1909 int& frame_complete, 1910 int& framesize_words, 1911 int& compiled_entry_offset) { 1912 Register tmp = R10_ARG8; 1913 1914 const int framesize_bytes = (int)align_up((int)frame::native_abi_reg_args_size, frame::alignment_in_bytes); 1915 framesize_words = framesize_bytes / wordSize; 1916 1917 address start = __ pc(); 1918 compiled_entry_offset = __ pc() - start; 1919 1920 // Save return pc and push entry frame 1921 __ mflr(tmp); 1922 __ std(tmp, _abi0(lr), R1_SP); // SP->lr = return_pc 1923 __ push_frame(framesize_bytes , R0); // SP -= frame_size_in_bytes 1924 1925 DEBUG_ONLY(__ block_comment("Frame Complete")); 1926 frame_complete = __ pc() - start; 1927 address last_java_pc = __ pc(); 1928 1929 // This nop must be exactly at the PC we push into the frame info. 1930 // We use this nop for fast CodeBlob lookup, associate the OopMap 1931 // with it right away. 1932 __ post_call_nop(); 1933 OopMap* map = new OopMap(framesize_bytes / VMRegImpl::stack_slot_size, 1); 1934 oop_maps->add_gc_map(last_java_pc - start, map); 1935 1936 __ calculate_address_from_global_toc(tmp, last_java_pc); // will be relocated 1937 __ set_last_Java_frame(R1_SP, tmp); 1938 __ call_VM_leaf(Continuation::freeze_entry(), R16_thread, R1_SP); 1939 __ reset_last_Java_frame(); 1940 1941 Label L_pinned; 1942 1943 __ cmpwi(CCR0, R3_RET, 0); 1944 __ bne(CCR0, L_pinned); 1945 1946 // yield succeeded 1947 1948 // Pop frames of continuation including this stub's frame 1949 __ ld_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread); 1950 // The frame pushed by gen_continuation_enter is on top now again 1951 continuation_enter_cleanup(masm); 1952 1953 // Pop frame and return 1954 Label L_return; 1955 __ bind(L_return); 1956 __ pop_frame(); 1957 __ ld(R0, _abi0(lr), R1_SP); // Return pc 1958 __ mtlr(R0); 1959 __ blr(); 1960 1961 // yield failed - continuation is pinned 1962 1963 __ bind(L_pinned); 1964 1965 // handle pending exception thrown by freeze 1966 __ ld(tmp, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 1967 __ cmpdi(CCR0, tmp, 0); 1968 __ beq(CCR0, L_return); // return if no exception is pending 1969 __ pop_frame(); 1970 __ ld(R0, _abi0(lr), R1_SP); // Return pc 1971 __ mtlr(R0); 1972 __ load_const_optimized(tmp, StubRoutines::forward_exception_entry(), R0); 1973 __ mtctr(tmp); 1974 __ bctr(); 1975 } 1976 1977 // --------------------------------------------------------------------------- 1978 // Generate a native wrapper for a given method. The method takes arguments 1979 // in the Java compiled code convention, marshals them to the native 1980 // convention (handlizes oops, etc), transitions to native, makes the call, 1981 // returns to java state (possibly blocking), unhandlizes any result and 1982 // returns. 1983 // 1984 // Critical native functions are a shorthand for the use of 1985 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1986 // functions. The wrapper is expected to unpack the arguments before 1987 // passing them to the callee. Critical native functions leave the state _in_Java, 1988 // since they cannot stop for GC. 1989 // Some other parts of JNI setup are skipped like the tear down of the JNI handle 1990 // block and the check for pending exceptions it's impossible for them 1991 // to be thrown. 1992 // 1993 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, 1994 const methodHandle& method, 1995 int compile_id, 1996 BasicType *in_sig_bt, 1997 VMRegPair *in_regs, 1998 BasicType ret_type) { 1999 if (method->is_continuation_native_intrinsic()) { 2000 int exception_offset = -1; 2001 OopMapSet* oop_maps = new OopMapSet(); 2002 int frame_complete = -1; 2003 int stack_slots = -1; 2004 int interpreted_entry_offset = -1; 2005 int vep_offset = -1; 2006 if (method->is_continuation_enter_intrinsic()) { 2007 gen_continuation_enter(masm, 2008 in_regs, 2009 exception_offset, 2010 oop_maps, 2011 frame_complete, 2012 stack_slots, 2013 interpreted_entry_offset, 2014 vep_offset); 2015 } else if (method->is_continuation_yield_intrinsic()) { 2016 gen_continuation_yield(masm, 2017 in_regs, 2018 oop_maps, 2019 frame_complete, 2020 stack_slots, 2021 vep_offset); 2022 } else { 2023 guarantee(false, "Unknown Continuation native intrinsic"); 2024 } 2025 2026 #ifdef ASSERT 2027 if (method->is_continuation_enter_intrinsic()) { 2028 assert(interpreted_entry_offset != -1, "Must be set"); 2029 assert(exception_offset != -1, "Must be set"); 2030 } else { 2031 assert(interpreted_entry_offset == -1, "Must be unset"); 2032 assert(exception_offset == -1, "Must be unset"); 2033 } 2034 assert(frame_complete != -1, "Must be set"); 2035 assert(stack_slots != -1, "Must be set"); 2036 assert(vep_offset != -1, "Must be set"); 2037 #endif 2038 2039 __ flush(); 2040 nmethod* nm = nmethod::new_native_nmethod(method, 2041 compile_id, 2042 masm->code(), 2043 vep_offset, 2044 frame_complete, 2045 stack_slots, 2046 in_ByteSize(-1), 2047 in_ByteSize(-1), 2048 oop_maps, 2049 exception_offset); 2050 if (nm == nullptr) return nm; 2051 if (method->is_continuation_enter_intrinsic()) { 2052 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset); 2053 } else if (method->is_continuation_yield_intrinsic()) { 2054 _cont_doYield_stub = nm; 2055 } 2056 return nm; 2057 } 2058 2059 if (method->is_method_handle_intrinsic()) { 2060 vmIntrinsics::ID iid = method->intrinsic_id(); 2061 intptr_t start = (intptr_t)__ pc(); 2062 int vep_offset = ((intptr_t)__ pc()) - start; 2063 gen_special_dispatch(masm, 2064 method, 2065 in_sig_bt, 2066 in_regs); 2067 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 2068 __ flush(); 2069 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 2070 return nmethod::new_native_nmethod(method, 2071 compile_id, 2072 masm->code(), 2073 vep_offset, 2074 frame_complete, 2075 stack_slots / VMRegImpl::slots_per_word, 2076 in_ByteSize(-1), 2077 in_ByteSize(-1), 2078 (OopMapSet*)nullptr); 2079 } 2080 2081 address native_func = method->native_function(); 2082 assert(native_func != nullptr, "must have function"); 2083 2084 // First, create signature for outgoing C call 2085 // -------------------------------------------------------------------------- 2086 2087 int total_in_args = method->size_of_parameters(); 2088 // We have received a description of where all the java args are located 2089 // on entry to the wrapper. We need to convert these args to where 2090 // the jni function will expect them. To figure out where they go 2091 // we convert the java signature to a C signature by inserting 2092 // the hidden arguments as arg[0] and possibly arg[1] (static method) 2093 2094 // Calculate the total number of C arguments and create arrays for the 2095 // signature and the outgoing registers. 2096 // On ppc64, we have two arrays for the outgoing registers, because 2097 // some floating-point arguments must be passed in registers _and_ 2098 // in stack locations. 2099 bool method_is_static = method->is_static(); 2100 int total_c_args = total_in_args + (method_is_static ? 2 : 1); 2101 2102 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 2103 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 2104 BasicType* in_elem_bt = nullptr; 2105 2106 // Create the signature for the C call: 2107 // 1) add the JNIEnv* 2108 // 2) add the class if the method is static 2109 // 3) copy the rest of the incoming signature (shifted by the number of 2110 // hidden arguments). 2111 2112 int argc = 0; 2113 out_sig_bt[argc++] = T_ADDRESS; 2114 if (method->is_static()) { 2115 out_sig_bt[argc++] = T_OBJECT; 2116 } 2117 2118 for (int i = 0; i < total_in_args ; i++ ) { 2119 out_sig_bt[argc++] = in_sig_bt[i]; 2120 } 2121 2122 2123 // Compute the wrapper's frame size. 2124 // -------------------------------------------------------------------------- 2125 2126 // Now figure out where the args must be stored and how much stack space 2127 // they require. 2128 // 2129 // Compute framesize for the wrapper. We need to handlize all oops in 2130 // incoming registers. 2131 // 2132 // Calculate the total number of stack slots we will need: 2133 // 1) abi requirements 2134 // 2) outgoing arguments 2135 // 3) space for inbound oop handle area 2136 // 4) space for handlizing a klass if static method 2137 // 5) space for a lock if synchronized method 2138 // 6) workspace for saving return values, int <-> float reg moves, etc. 2139 // 7) alignment 2140 // 2141 // Layout of the native wrapper frame: 2142 // (stack grows upwards, memory grows downwards) 2143 // 2144 // NW [ABI_REG_ARGS] <-- 1) R1_SP 2145 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset 2146 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset 2147 // klass <-- 4) R1_SP + klass_offset 2148 // lock <-- 5) R1_SP + lock_offset 2149 // [workspace] <-- 6) R1_SP + workspace_offset 2150 // [alignment] (optional) <-- 7) 2151 // caller [JIT_TOP_ABI_48] <-- r_callers_sp 2152 // 2153 // - *_slot_offset Indicates offset from SP in number of stack slots. 2154 // - *_offset Indicates offset from SP in bytes. 2155 2156 int stack_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args) + // 1+2) 2157 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention. 2158 2159 // Now the space for the inbound oop handle area. 2160 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word; 2161 2162 int oop_handle_slot_offset = stack_slots; 2163 stack_slots += total_save_slots; // 3) 2164 2165 int klass_slot_offset = 0; 2166 int klass_offset = -1; 2167 if (method_is_static) { // 4) 2168 klass_slot_offset = stack_slots; 2169 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2170 stack_slots += VMRegImpl::slots_per_word; 2171 } 2172 2173 int lock_slot_offset = 0; 2174 int lock_offset = -1; 2175 if (method->is_synchronized()) { // 5) 2176 lock_slot_offset = stack_slots; 2177 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size; 2178 stack_slots += VMRegImpl::slots_per_word; 2179 } 2180 2181 int workspace_slot_offset = stack_slots; // 6) 2182 stack_slots += 2; 2183 2184 // Now compute actual number of stack words we need. 2185 // Rounding to make stack properly aligned. 2186 stack_slots = align_up(stack_slots, // 7) 2187 frame::alignment_in_bytes / VMRegImpl::stack_slot_size); 2188 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size; 2189 2190 2191 // Now we can start generating code. 2192 // -------------------------------------------------------------------------- 2193 2194 intptr_t start_pc = (intptr_t)__ pc(); 2195 intptr_t vep_start_pc; 2196 intptr_t frame_done_pc; 2197 intptr_t oopmap_pc; 2198 2199 Label handle_pending_exception; 2200 2201 Register r_callers_sp = R21; 2202 Register r_temp_1 = R22; 2203 Register r_temp_2 = R23; 2204 Register r_temp_3 = R24; 2205 Register r_temp_4 = R25; 2206 Register r_temp_5 = R26; 2207 Register r_temp_6 = R27; 2208 Register r_return_pc = R28; 2209 2210 Register r_carg1_jnienv = noreg; 2211 Register r_carg2_classorobject = noreg; 2212 r_carg1_jnienv = out_regs[0].first()->as_Register(); 2213 r_carg2_classorobject = out_regs[1].first()->as_Register(); 2214 2215 2216 // Generate the Unverified Entry Point (UEP). 2217 // -------------------------------------------------------------------------- 2218 assert(start_pc == (intptr_t)__ pc(), "uep must be at start"); 2219 2220 // Check ic: object class == cached class? 2221 if (!method_is_static) { 2222 __ ic_check(4 /* end_alignment */); 2223 } 2224 2225 // Generate the Verified Entry Point (VEP). 2226 // -------------------------------------------------------------------------- 2227 vep_start_pc = (intptr_t)__ pc(); 2228 2229 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { 2230 Label L_skip_barrier; 2231 Register klass = r_temp_1; 2232 // Notify OOP recorder (don't need the relocation) 2233 AddressLiteral md = __ constant_metadata_address(method->method_holder()); 2234 __ load_const_optimized(klass, md.value(), R0); 2235 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 2236 2237 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 2238 __ mtctr(klass); 2239 __ bctr(); 2240 2241 __ bind(L_skip_barrier); 2242 } 2243 2244 __ save_LR_CR(r_temp_1); 2245 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. 2246 __ mr(r_callers_sp, R1_SP); // Remember frame pointer. 2247 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame. 2248 2249 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2250 bs->nmethod_entry_barrier(masm, r_temp_1); 2251 2252 frame_done_pc = (intptr_t)__ pc(); 2253 2254 // Native nmethod wrappers never take possession of the oop arguments. 2255 // So the caller will gc the arguments. 2256 // The only thing we need an oopMap for is if the call is static. 2257 // 2258 // An OopMap for lock (and class if static), and one for the VM call itself. 2259 OopMapSet *oop_maps = new OopMapSet(); 2260 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2261 2262 // Move arguments from register/stack to register/stack. 2263 // -------------------------------------------------------------------------- 2264 // 2265 // We immediately shuffle the arguments so that for any vm call we have 2266 // to make from here on out (sync slow path, jvmti, etc.) we will have 2267 // captured the oops from our caller and have a valid oopMap for them. 2268 // 2269 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2270 // (derived from JavaThread* which is in R16_thread) and, if static, 2271 // the class mirror instead of a receiver. This pretty much guarantees that 2272 // register layout will not match. We ignore these extra arguments during 2273 // the shuffle. The shuffle is described by the two calling convention 2274 // vectors we have in our possession. We simply walk the java vector to 2275 // get the source locations and the c vector to get the destinations. 2276 2277 // Record sp-based slot for receiver on stack for non-static methods. 2278 int receiver_offset = -1; 2279 2280 // We move the arguments backward because the floating point registers 2281 // destination will always be to a register with a greater or equal 2282 // register number or the stack. 2283 // in is the index of the incoming Java arguments 2284 // out is the index of the outgoing C arguments 2285 2286 #ifdef ASSERT 2287 bool reg_destroyed[Register::number_of_registers]; 2288 bool freg_destroyed[FloatRegister::number_of_registers]; 2289 for (int r = 0 ; r < Register::number_of_registers ; r++) { 2290 reg_destroyed[r] = false; 2291 } 2292 for (int f = 0 ; f < FloatRegister::number_of_registers ; f++) { 2293 freg_destroyed[f] = false; 2294 } 2295 #endif // ASSERT 2296 2297 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) { 2298 2299 #ifdef ASSERT 2300 if (in_regs[in].first()->is_Register()) { 2301 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!"); 2302 } else if (in_regs[in].first()->is_FloatRegister()) { 2303 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!"); 2304 } 2305 if (out_regs[out].first()->is_Register()) { 2306 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true; 2307 } else if (out_regs[out].first()->is_FloatRegister()) { 2308 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true; 2309 } 2310 #endif // ASSERT 2311 2312 switch (in_sig_bt[in]) { 2313 case T_BOOLEAN: 2314 case T_CHAR: 2315 case T_BYTE: 2316 case T_SHORT: 2317 case T_INT: 2318 // Move int and do sign extension. 2319 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2320 break; 2321 case T_LONG: 2322 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2323 break; 2324 case T_ARRAY: 2325 case T_OBJECT: 2326 object_move(masm, stack_slots, 2327 oop_map, oop_handle_slot_offset, 2328 ((in == 0) && (!method_is_static)), &receiver_offset, 2329 in_regs[in], out_regs[out], 2330 r_callers_sp, r_temp_1, r_temp_2); 2331 break; 2332 case T_VOID: 2333 break; 2334 case T_FLOAT: 2335 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2336 break; 2337 case T_DOUBLE: 2338 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2339 break; 2340 case T_ADDRESS: 2341 fatal("found type (T_ADDRESS) in java args"); 2342 break; 2343 default: 2344 ShouldNotReachHere(); 2345 break; 2346 } 2347 } 2348 2349 // Pre-load a static method's oop into ARG2. 2350 // Used both by locking code and the normal JNI call code. 2351 if (method_is_static) { 2352 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), 2353 r_carg2_classorobject); 2354 2355 // Now handlize the static class mirror in carg2. It's known not-null. 2356 __ std(r_carg2_classorobject, klass_offset, R1_SP); 2357 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2358 __ addi(r_carg2_classorobject, R1_SP, klass_offset); 2359 } 2360 2361 // Get JNIEnv* which is first argument to native. 2362 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset())); 2363 2364 // NOTE: 2365 // 2366 // We have all of the arguments setup at this point. 2367 // We MUST NOT touch any outgoing regs from this point on. 2368 // So if we must call out we must push a new frame. 2369 2370 // Get current pc for oopmap, and load it patchable relative to global toc. 2371 oopmap_pc = (intptr_t) __ pc(); 2372 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true); 2373 2374 // We use the same pc/oopMap repeatedly when we call out. 2375 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); 2376 2377 // r_return_pc now has the pc loaded that we will use when we finally call 2378 // to native. 2379 2380 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. 2381 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); 2382 2383 # if 0 2384 // DTrace method entry 2385 # endif 2386 2387 // Lock a synchronized method. 2388 // -------------------------------------------------------------------------- 2389 2390 if (method->is_synchronized()) { 2391 Register r_oop = r_temp_4; 2392 const Register r_box = r_temp_5; 2393 Label done, locked; 2394 2395 // Load the oop for the object or class. r_carg2_classorobject contains 2396 // either the handlized oop from the incoming arguments or the handlized 2397 // class mirror (if the method is static). 2398 __ ld(r_oop, 0, r_carg2_classorobject); 2399 2400 // Get the lock box slot's address. 2401 __ addi(r_box, R1_SP, lock_offset); 2402 2403 // Try fastpath for locking. 2404 if (LockingMode == LM_LIGHTWEIGHT) { 2405 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2406 __ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_temp_1, r_temp_2, r_temp_3); 2407 } else { 2408 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2409 __ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2410 } 2411 __ beq(CCR0, locked); 2412 2413 // None of the above fast optimizations worked so we have to get into the 2414 // slow case of monitor enter. Inline a special case of call_VM that 2415 // disallows any pending_exception. 2416 2417 // Save argument registers and leave room for C-compatible ABI_REG_ARGS. 2418 int frame_size = frame::native_abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); 2419 __ mr(R11_scratch1, R1_SP); 2420 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs); 2421 2422 // Do the call. 2423 __ set_last_Java_frame(R11_scratch1, r_return_pc); 2424 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register"); 2425 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); 2426 __ reset_last_Java_frame(); 2427 2428 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs); 2429 2430 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2431 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C"); 2432 2433 __ bind(locked); 2434 } 2435 2436 // Use that pc we placed in r_return_pc a while back as the current frame anchor. 2437 __ set_last_Java_frame(R1_SP, r_return_pc); 2438 2439 // Publish thread state 2440 // -------------------------------------------------------------------------- 2441 2442 // Transition from _thread_in_Java to _thread_in_native. 2443 __ li(R0, _thread_in_native); 2444 __ release(); 2445 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2446 __ stw(R0, thread_(thread_state)); 2447 2448 2449 // The JNI call 2450 // -------------------------------------------------------------------------- 2451 #if defined(ABI_ELFv2) 2452 __ call_c(native_func, relocInfo::runtime_call_type); 2453 #else 2454 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func; 2455 __ call_c(fd_native_method, relocInfo::runtime_call_type); 2456 #endif 2457 2458 2459 // Now, we are back from the native code. 2460 2461 2462 // Unpack the native result. 2463 // -------------------------------------------------------------------------- 2464 2465 // For int-types, we do any needed sign-extension required. 2466 // Care must be taken that the return values (R3_RET and F1_RET) 2467 // will survive any VM calls for blocking or unlocking. 2468 // An OOP result (handle) is done specially in the slow-path code. 2469 2470 switch (ret_type) { 2471 case T_VOID: break; // Nothing to do! 2472 case T_FLOAT: break; // Got it where we want it (unless slow-path). 2473 case T_DOUBLE: break; // Got it where we want it (unless slow-path). 2474 case T_LONG: break; // Got it where we want it (unless slow-path). 2475 case T_OBJECT: break; // Really a handle. 2476 // Cannot de-handlize until after reclaiming jvm_lock. 2477 case T_ARRAY: break; 2478 2479 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1) 2480 Label skip_modify; 2481 __ cmpwi(CCR0, R3_RET, 0); 2482 __ beq(CCR0, skip_modify); 2483 __ li(R3_RET, 1); 2484 __ bind(skip_modify); 2485 break; 2486 } 2487 case T_BYTE: { // sign extension 2488 __ extsb(R3_RET, R3_RET); 2489 break; 2490 } 2491 case T_CHAR: { // unsigned result 2492 __ andi(R3_RET, R3_RET, 0xffff); 2493 break; 2494 } 2495 case T_SHORT: { // sign extension 2496 __ extsh(R3_RET, R3_RET); 2497 break; 2498 } 2499 case T_INT: // nothing to do 2500 break; 2501 default: 2502 ShouldNotReachHere(); 2503 break; 2504 } 2505 2506 Label after_transition; 2507 2508 // Publish thread state 2509 // -------------------------------------------------------------------------- 2510 2511 // Switch thread to "native transition" state before reading the 2512 // synchronization state. This additional state is necessary because reading 2513 // and testing the synchronization state is not atomic w.r.t. GC, as this 2514 // scenario demonstrates: 2515 // - Java thread A, in _thread_in_native state, loads _not_synchronized 2516 // and is preempted. 2517 // - VM thread changes sync state to synchronizing and suspends threads 2518 // for GC. 2519 // - Thread A is resumed to finish this native method, but doesn't block 2520 // here since it didn't see any synchronization in progress, and escapes. 2521 2522 // Transition from _thread_in_native to _thread_in_native_trans. 2523 __ li(R0, _thread_in_native_trans); 2524 __ release(); 2525 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2526 __ stw(R0, thread_(thread_state)); 2527 2528 2529 // Must we block? 2530 // -------------------------------------------------------------------------- 2531 2532 // Block, if necessary, before resuming in _thread_in_Java state. 2533 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2534 { 2535 Label no_block, sync; 2536 2537 // Force this write out before the read below. 2538 if (!UseSystemMemoryBarrier) { 2539 __ fence(); 2540 } 2541 2542 Register sync_state_addr = r_temp_4; 2543 Register sync_state = r_temp_5; 2544 Register suspend_flags = r_temp_6; 2545 2546 // No synchronization in progress nor yet synchronized 2547 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 2548 __ safepoint_poll(sync, sync_state, true /* at_return */, false /* in_nmethod */); 2549 2550 // Not suspended. 2551 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 2552 __ lwz(suspend_flags, thread_(suspend_flags)); 2553 __ cmpwi(CCR1, suspend_flags, 0); 2554 __ beq(CCR1, no_block); 2555 2556 // Block. Save any potential method result value before the operation and 2557 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2558 // lets us share the oopMap we used when we went native rather than create 2559 // a distinct one for this pc. 2560 __ bind(sync); 2561 __ isync(); 2562 2563 address entry_point = 2564 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans); 2565 save_native_result(masm, ret_type, workspace_slot_offset); 2566 __ call_VM_leaf(entry_point, R16_thread); 2567 restore_native_result(masm, ret_type, workspace_slot_offset); 2568 2569 __ bind(no_block); 2570 2571 // Publish thread state. 2572 // -------------------------------------------------------------------------- 2573 2574 // Thread state is thread_in_native_trans. Any safepoint blocking has 2575 // already happened so we can now change state to _thread_in_Java. 2576 2577 // Transition from _thread_in_native_trans to _thread_in_Java. 2578 __ li(R0, _thread_in_Java); 2579 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 2580 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2581 __ stw(R0, thread_(thread_state)); 2582 __ bind(after_transition); 2583 } 2584 2585 // Reguard any pages if necessary. 2586 // -------------------------------------------------------------------------- 2587 2588 Label no_reguard; 2589 __ lwz(r_temp_1, thread_(stack_guard_state)); 2590 __ cmpwi(CCR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled); 2591 __ bne(CCR0, no_reguard); 2592 2593 save_native_result(masm, ret_type, workspace_slot_offset); 2594 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2595 restore_native_result(masm, ret_type, workspace_slot_offset); 2596 2597 __ bind(no_reguard); 2598 2599 2600 // Unlock 2601 // -------------------------------------------------------------------------- 2602 2603 if (method->is_synchronized()) { 2604 const Register r_oop = r_temp_4; 2605 const Register r_box = r_temp_5; 2606 const Register r_exception = r_temp_6; 2607 Label done; 2608 2609 // Get oop and address of lock object box. 2610 if (method_is_static) { 2611 assert(klass_offset != -1, ""); 2612 __ ld(r_oop, klass_offset, R1_SP); 2613 } else { 2614 assert(receiver_offset != -1, ""); 2615 __ ld(r_oop, receiver_offset, R1_SP); 2616 } 2617 __ addi(r_box, R1_SP, lock_offset); 2618 2619 // Try fastpath for unlocking. 2620 if (LockingMode == LM_LIGHTWEIGHT) { 2621 __ compiler_fast_unlock_lightweight_object(CCR0, r_oop, r_temp_1, r_temp_2, r_temp_3); 2622 } else { 2623 __ compiler_fast_unlock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2624 } 2625 __ beq(CCR0, done); 2626 2627 // Save and restore any potential method result value around the unlocking operation. 2628 save_native_result(masm, ret_type, workspace_slot_offset); 2629 2630 // Must save pending exception around the slow-path VM call. Since it's a 2631 // leaf call, the pending exception (if any) can be kept in a register. 2632 __ ld(r_exception, thread_(pending_exception)); 2633 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile"); 2634 __ li(R0, 0); 2635 __ std(R0, thread_(pending_exception)); 2636 2637 // Slow case of monitor enter. 2638 // Inline a special case of call_VM that disallows any pending_exception. 2639 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread). 2640 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread); 2641 2642 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2643 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C"); 2644 2645 restore_native_result(masm, ret_type, workspace_slot_offset); 2646 2647 // Check_forward_pending_exception jump to forward_exception if any pending 2648 // exception is set. The forward_exception routine expects to see the 2649 // exception in pending_exception and not in a register. Kind of clumsy, 2650 // since all folks who branch to forward_exception must have tested 2651 // pending_exception first and hence have it in a register already. 2652 __ std(r_exception, thread_(pending_exception)); 2653 2654 __ bind(done); 2655 } 2656 2657 # if 0 2658 // DTrace method exit 2659 # endif 2660 2661 // Clear "last Java frame" SP and PC. 2662 // -------------------------------------------------------------------------- 2663 2664 __ reset_last_Java_frame(); 2665 2666 // Unbox oop result, e.g. JNIHandles::resolve value. 2667 // -------------------------------------------------------------------------- 2668 2669 if (is_reference_type(ret_type)) { 2670 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, MacroAssembler::PRESERVATION_NONE); 2671 } 2672 2673 if (CheckJNICalls) { 2674 // clear_pending_jni_exception_check 2675 __ load_const_optimized(R0, 0L); 2676 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 2677 } 2678 2679 // Reset handle block. 2680 // -------------------------------------------------------------------------- 2681 __ ld(r_temp_1, thread_(active_handles)); 2682 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 2683 __ li(r_temp_2, 0); 2684 __ stw(r_temp_2, in_bytes(JNIHandleBlock::top_offset()), r_temp_1); 2685 2686 2687 // Check for pending exceptions. 2688 // -------------------------------------------------------------------------- 2689 __ ld(r_temp_2, thread_(pending_exception)); 2690 __ cmpdi(CCR0, r_temp_2, 0); 2691 __ bne(CCR0, handle_pending_exception); 2692 2693 // Return 2694 // -------------------------------------------------------------------------- 2695 2696 __ pop_frame(); 2697 __ restore_LR_CR(R11); 2698 __ blr(); 2699 2700 2701 // Handler for pending exceptions (out-of-line). 2702 // -------------------------------------------------------------------------- 2703 // Since this is a native call, we know the proper exception handler 2704 // is the empty function. We just pop this frame and then jump to 2705 // forward_exception_entry. 2706 __ bind(handle_pending_exception); 2707 2708 __ pop_frame(); 2709 __ restore_LR_CR(R11); 2710 __ b64_patchable((address)StubRoutines::forward_exception_entry(), 2711 relocInfo::runtime_call_type); 2712 2713 // Done. 2714 // -------------------------------------------------------------------------- 2715 2716 __ flush(); 2717 2718 nmethod *nm = nmethod::new_native_nmethod(method, 2719 compile_id, 2720 masm->code(), 2721 vep_start_pc-start_pc, 2722 frame_done_pc-start_pc, 2723 stack_slots / VMRegImpl::slots_per_word, 2724 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2725 in_ByteSize(lock_offset), 2726 oop_maps); 2727 2728 return nm; 2729 } 2730 2731 // This function returns the adjust size (in number of words) to a c2i adapter 2732 // activation for use during deoptimization. 2733 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2734 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::frame_alignment_in_words); 2735 } 2736 2737 uint SharedRuntime::in_preserve_stack_slots() { 2738 return frame::jit_in_preserve_size / VMRegImpl::stack_slot_size; 2739 } 2740 2741 uint SharedRuntime::out_preserve_stack_slots() { 2742 #if defined(COMPILER1) || defined(COMPILER2) 2743 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size; 2744 #else 2745 return 0; 2746 #endif 2747 } 2748 2749 #if defined(COMPILER1) || defined(COMPILER2) 2750 // Frame generation for deopt and uncommon trap blobs. 2751 static void push_skeleton_frame(MacroAssembler* masm, bool deopt, 2752 /* Read */ 2753 Register unroll_block_reg, 2754 /* Update */ 2755 Register frame_sizes_reg, 2756 Register number_of_frames_reg, 2757 Register pcs_reg, 2758 /* Invalidate */ 2759 Register frame_size_reg, 2760 Register pc_reg) { 2761 2762 __ ld(pc_reg, 0, pcs_reg); 2763 __ ld(frame_size_reg, 0, frame_sizes_reg); 2764 __ std(pc_reg, _abi0(lr), R1_SP); 2765 __ push_frame(frame_size_reg, R0/*tmp*/); 2766 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP); 2767 __ addi(number_of_frames_reg, number_of_frames_reg, -1); 2768 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize); 2769 __ addi(pcs_reg, pcs_reg, wordSize); 2770 } 2771 2772 // Loop through the UnrollBlock info and create new frames. 2773 static void push_skeleton_frames(MacroAssembler* masm, bool deopt, 2774 /* read */ 2775 Register unroll_block_reg, 2776 /* invalidate */ 2777 Register frame_sizes_reg, 2778 Register number_of_frames_reg, 2779 Register pcs_reg, 2780 Register frame_size_reg, 2781 Register pc_reg) { 2782 Label loop; 2783 2784 // _number_of_frames is of type int (deoptimization.hpp) 2785 __ lwa(number_of_frames_reg, 2786 in_bytes(Deoptimization::UnrollBlock::number_of_frames_offset()), 2787 unroll_block_reg); 2788 __ ld(pcs_reg, 2789 in_bytes(Deoptimization::UnrollBlock::frame_pcs_offset()), 2790 unroll_block_reg); 2791 __ ld(frame_sizes_reg, 2792 in_bytes(Deoptimization::UnrollBlock::frame_sizes_offset()), 2793 unroll_block_reg); 2794 2795 // stack: (caller_of_deoptee, ...). 2796 2797 // At this point we either have an interpreter frame or a compiled 2798 // frame on top of stack. If it is a compiled frame we push a new c2i 2799 // adapter here 2800 2801 // Memorize top-frame stack-pointer. 2802 __ mr(frame_size_reg/*old_sp*/, R1_SP); 2803 2804 // Resize interpreter top frame OR C2I adapter. 2805 2806 // At this moment, the top frame (which is the caller of the deoptee) is 2807 // an interpreter frame or a newly pushed C2I adapter or an entry frame. 2808 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the 2809 // outgoing arguments. 2810 // 2811 // In order to push the interpreter frame for the deoptee, we need to 2812 // resize the top frame such that we are able to place the deoptee's 2813 // locals in the frame. 2814 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI 2815 // into a valid PARENT_IJAVA_FRAME_ABI. 2816 2817 __ lwa(R11_scratch1, 2818 in_bytes(Deoptimization::UnrollBlock::caller_adjustment_offset()), 2819 unroll_block_reg); 2820 __ neg(R11_scratch1, R11_scratch1); 2821 2822 // R11_scratch1 contains size of locals for frame resizing. 2823 // R12_scratch2 contains top frame's lr. 2824 2825 // Resize frame by complete frame size prevents TOC from being 2826 // overwritten by locals. A more stack space saving way would be 2827 // to copy the TOC to its location in the new abi. 2828 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size); 2829 2830 // now, resize the frame 2831 __ resize_frame(R11_scratch1, pc_reg/*tmp*/); 2832 2833 // In the case where we have resized a c2i frame above, the optional 2834 // alignment below the locals has size 32 (why?). 2835 __ std(R12_scratch2, _abi0(lr), R1_SP); 2836 2837 // Initialize initial_caller_sp. 2838 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP); 2839 2840 #ifdef ASSERT 2841 // Make sure that there is at least one entry in the array. 2842 __ cmpdi(CCR0, number_of_frames_reg, 0); 2843 __ asm_assert_ne("array_size must be > 0"); 2844 #endif 2845 2846 // Now push the new interpreter frames. 2847 // 2848 __ bind(loop); 2849 // Allocate a new frame, fill in the pc. 2850 push_skeleton_frame(masm, deopt, 2851 unroll_block_reg, 2852 frame_sizes_reg, 2853 number_of_frames_reg, 2854 pcs_reg, 2855 frame_size_reg, 2856 pc_reg); 2857 __ cmpdi(CCR0, number_of_frames_reg, 0); 2858 __ bne(CCR0, loop); 2859 2860 // Get the return address pointing into the frame manager. 2861 __ ld(R0, 0, pcs_reg); 2862 // Store it in the top interpreter frame. 2863 __ std(R0, _abi0(lr), R1_SP); 2864 // Initialize frame_manager_lr of interpreter top frame. 2865 } 2866 #endif 2867 2868 void SharedRuntime::generate_deopt_blob() { 2869 // Allocate space for the code 2870 ResourceMark rm; 2871 // Setup code generation tools 2872 CodeBuffer buffer("deopt_blob", 2048, 1024); 2873 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2874 Label exec_mode_initialized; 2875 int frame_size_in_words; 2876 OopMap* map = nullptr; 2877 OopMapSet *oop_maps = new OopMapSet(); 2878 2879 // size of ABI112 plus spill slots for R3_RET and F1_RET. 2880 const int frame_size_in_bytes = frame::native_abi_reg_args_spill_size; 2881 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 2882 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. 2883 2884 const Register exec_mode_reg = R21_tmp1; 2885 2886 const address start = __ pc(); 2887 2888 #if defined(COMPILER1) || defined(COMPILER2) 2889 // -------------------------------------------------------------------------- 2890 // Prolog for non exception case! 2891 2892 // We have been called from the deopt handler of the deoptee. 2893 // 2894 // deoptee: 2895 // ... 2896 // call X 2897 // ... 2898 // deopt_handler: call_deopt_stub 2899 // cur. return pc --> ... 2900 // 2901 // So currently SR_LR points behind the call in the deopt handler. 2902 // We adjust it such that it points to the start of the deopt handler. 2903 // The return_pc has been stored in the frame of the deoptee and 2904 // will replace the address of the deopt_handler in the call 2905 // to Deoptimization::fetch_unroll_info below. 2906 // We can't grab a free register here, because all registers may 2907 // contain live values, so let the RegisterSaver do the adjustment 2908 // of the return pc. 2909 const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size; 2910 2911 // Push the "unpack frame" 2912 // Save everything in sight. 2913 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2914 &first_frame_size_in_bytes, 2915 /*generate_oop_map=*/ true, 2916 return_pc_adjustment_no_exception, 2917 RegisterSaver::return_pc_is_lr); 2918 assert(map != nullptr, "OopMap must have been created"); 2919 2920 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); 2921 // Save exec mode for unpack_frames. 2922 __ b(exec_mode_initialized); 2923 2924 // -------------------------------------------------------------------------- 2925 // Prolog for exception case 2926 2927 // An exception is pending. 2928 // We have been called with a return (interpreter) or a jump (exception blob). 2929 // 2930 // - R3_ARG1: exception oop 2931 // - R4_ARG2: exception pc 2932 2933 int exception_offset = __ pc() - start; 2934 2935 BLOCK_COMMENT("Prolog for exception case"); 2936 2937 // Store exception oop and pc in thread (location known to GC). 2938 // This is needed since the call to "fetch_unroll_info()" may safepoint. 2939 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2940 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2941 __ std(R4_ARG2, _abi0(lr), R1_SP); 2942 2943 // Vanilla deoptimization with an exception pending in exception_oop. 2944 int exception_in_tls_offset = __ pc() - start; 2945 2946 // Push the "unpack frame". 2947 // Save everything in sight. 2948 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2949 &first_frame_size_in_bytes, 2950 /*generate_oop_map=*/ false, 2951 /*return_pc_adjustment_exception=*/ 0, 2952 RegisterSaver::return_pc_is_pre_saved); 2953 2954 // Deopt during an exception. Save exec mode for unpack_frames. 2955 __ li(exec_mode_reg, Deoptimization::Unpack_exception); 2956 2957 // fall through 2958 2959 int reexecute_offset = 0; 2960 #ifdef COMPILER1 2961 __ b(exec_mode_initialized); 2962 2963 // Reexecute entry, similar to c2 uncommon trap 2964 reexecute_offset = __ pc() - start; 2965 2966 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2967 &first_frame_size_in_bytes, 2968 /*generate_oop_map=*/ false, 2969 /*return_pc_adjustment_reexecute=*/ 0, 2970 RegisterSaver::return_pc_is_pre_saved); 2971 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute); 2972 #endif 2973 2974 // -------------------------------------------------------------------------- 2975 __ BIND(exec_mode_initialized); 2976 2977 const Register unroll_block_reg = R22_tmp2; 2978 2979 // We need to set `last_Java_frame' because `fetch_unroll_info' will 2980 // call `last_Java_frame()'. The value of the pc in the frame is not 2981 // particularly important. It just needs to identify this blob. 2982 __ set_last_Java_frame(R1_SP, noreg); 2983 2984 // With EscapeAnalysis turned on, this call may safepoint! 2985 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg); 2986 address calls_return_pc = __ last_calls_return_pc(); 2987 // Set an oopmap for the call site that describes all our saved registers. 2988 oop_maps->add_gc_map(calls_return_pc - start, map); 2989 2990 __ reset_last_Java_frame(); 2991 // Save the return value. 2992 __ mr(unroll_block_reg, R3_RET); 2993 2994 // Restore only the result registers that have been saved 2995 // by save_volatile_registers(...). 2996 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); 2997 2998 // reload the exec mode from the UnrollBlock (it might have changed) 2999 __ lwz(exec_mode_reg, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg); 3000 // In excp_deopt_mode, restore and clear exception oop which we 3001 // stored in the thread during exception entry above. The exception 3002 // oop will be the return value of this stub. 3003 Label skip_restore_excp; 3004 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception); 3005 __ bne(CCR0, skip_restore_excp); 3006 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 3007 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 3008 __ li(R0, 0); 3009 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 3010 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 3011 __ BIND(skip_restore_excp); 3012 3013 __ pop_frame(); 3014 3015 // stack: (deoptee, optional i2c, caller of deoptee, ...). 3016 3017 // pop the deoptee's frame 3018 __ pop_frame(); 3019 3020 // stack: (caller_of_deoptee, ...). 3021 3022 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled. 3023 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info()) 3024 // and the frame is effectively not resized. 3025 Register caller_sp = R23_tmp3; 3026 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg); 3027 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5); 3028 3029 // Loop through the `UnrollBlock' info and create interpreter frames. 3030 push_skeleton_frames(masm, true/*deopt*/, 3031 unroll_block_reg, 3032 R23_tmp3, 3033 R24_tmp4, 3034 R25_tmp5, 3035 R26_tmp6, 3036 R27_tmp7); 3037 3038 // stack: (skeletal interpreter frame, ..., optional skeletal 3039 // interpreter frame, optional c2i, caller of deoptee, ...). 3040 3041 // push an `unpack_frame' taking care of float / int return values. 3042 __ push_frame(frame_size_in_bytes, R0/*tmp*/); 3043 3044 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3045 // skeletal interpreter frame, optional c2i, caller of deoptee, 3046 // ...). 3047 3048 // Spill live volatile registers since we'll do a call. 3049 __ std( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP); 3050 __ stfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP); 3051 3052 // Let the unpacker layout information in the skeletal frames just 3053 // allocated. 3054 __ calculate_address_from_global_toc(R3_RET, calls_return_pc, true, true, true, true); 3055 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET); 3056 // This is a call to a LEAF method, so no oop map is required. 3057 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3058 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/); 3059 __ reset_last_Java_frame(); 3060 3061 // Restore the volatiles saved above. 3062 __ ld( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP); 3063 __ lfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP); 3064 3065 // Pop the unpack frame. 3066 __ pop_frame(); 3067 __ restore_LR_CR(R0); 3068 3069 // stack: (top interpreter frame, ..., optional interpreter frame, 3070 // optional c2i, caller of deoptee, ...). 3071 3072 // Initialize R14_state. 3073 __ restore_interpreter_state(R11_scratch1); 3074 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3075 3076 // Return to the interpreter entry point. 3077 __ blr(); 3078 __ flush(); 3079 #else // COMPILER2 3080 __ unimplemented("deopt blob needed only with compiler"); 3081 int exception_offset = __ pc() - start; 3082 #endif // COMPILER2 3083 3084 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 3085 reexecute_offset, first_frame_size_in_bytes / wordSize); 3086 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3087 } 3088 3089 #ifdef COMPILER2 3090 void SharedRuntime::generate_uncommon_trap_blob() { 3091 // Allocate space for the code. 3092 ResourceMark rm; 3093 // Setup code generation tools. 3094 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 3095 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 3096 address start = __ pc(); 3097 3098 Register unroll_block_reg = R21_tmp1; 3099 Register klass_index_reg = R22_tmp2; 3100 Register unc_trap_reg = R23_tmp3; 3101 Register r_return_pc = R27_tmp7; 3102 3103 OopMapSet* oop_maps = new OopMapSet(); 3104 int frame_size_in_bytes = frame::native_abi_reg_args_size; 3105 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 3106 3107 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 3108 3109 // Push a dummy `unpack_frame' and call 3110 // `Deoptimization::uncommon_trap' to pack the compiled frame into a 3111 // vframe array and return the `UnrollBlock' information. 3112 3113 // Save LR to compiled frame. 3114 __ save_LR_CR(R11_scratch1); 3115 3116 // Push an "uncommon_trap" frame. 3117 __ push_frame_reg_args(0, R11_scratch1); 3118 3119 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). 3120 3121 // Set the `unpack_frame' as last_Java_frame. 3122 // `Deoptimization::uncommon_trap' expects it and considers its 3123 // sender frame as the deoptee frame. 3124 // Remember the offset of the instruction whose address will be 3125 // moved to R11_scratch1. 3126 address gc_map_pc = __ pc(); 3127 __ calculate_address_from_global_toc(r_return_pc, gc_map_pc, true, true, true, true); 3128 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc); 3129 3130 __ mr(klass_index_reg, R3); 3131 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap); 3132 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), 3133 R16_thread, klass_index_reg, R5_ARG3); 3134 3135 // Set an oopmap for the call site. 3136 oop_maps->add_gc_map(gc_map_pc - start, map); 3137 3138 __ reset_last_Java_frame(); 3139 3140 // Pop the `unpack frame'. 3141 __ pop_frame(); 3142 3143 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 3144 3145 // Save the return value. 3146 __ mr(unroll_block_reg, R3_RET); 3147 3148 // Pop the uncommon_trap frame. 3149 __ pop_frame(); 3150 3151 // stack: (caller_of_deoptee, ...). 3152 3153 #ifdef ASSERT 3154 __ lwz(R22_tmp2, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg); 3155 __ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap); 3156 __ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap"); 3157 #endif 3158 3159 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled. 3160 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info()) 3161 // and the frame is effectively not resized. 3162 Register caller_sp = R23_tmp3; 3163 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg); 3164 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5); 3165 3166 // Allocate new interpreter frame(s) and possibly a c2i adapter 3167 // frame. 3168 push_skeleton_frames(masm, false/*deopt*/, 3169 unroll_block_reg, 3170 R22_tmp2, 3171 R23_tmp3, 3172 R24_tmp4, 3173 R25_tmp5, 3174 R26_tmp6); 3175 3176 // stack: (skeletal interpreter frame, ..., optional skeletal 3177 // interpreter frame, optional c2i, caller of deoptee, ...). 3178 3179 // Push a dummy `unpack_frame' taking care of float return values. 3180 // Call `Deoptimization::unpack_frames' to layout information in the 3181 // interpreter frames just created. 3182 3183 // Push a simple "unpack frame" here. 3184 __ push_frame_reg_args(0, R11_scratch1); 3185 3186 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3187 // skeletal interpreter frame, optional c2i, caller of deoptee, 3188 // ...). 3189 3190 // Set the "unpack_frame" as last_Java_frame. 3191 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc); 3192 3193 // Indicate it is the uncommon trap case. 3194 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap); 3195 // Let the unpacker layout information in the skeletal frames just 3196 // allocated. 3197 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3198 R16_thread, unc_trap_reg); 3199 3200 __ reset_last_Java_frame(); 3201 // Pop the `unpack frame'. 3202 __ pop_frame(); 3203 // Restore LR from top interpreter frame. 3204 __ restore_LR_CR(R11_scratch1); 3205 3206 // stack: (top interpreter frame, ..., optional interpreter frame, 3207 // optional c2i, caller of deoptee, ...). 3208 3209 __ restore_interpreter_state(R11_scratch1); 3210 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3211 3212 // Return to the interpreter entry point. 3213 __ blr(); 3214 3215 masm->flush(); 3216 3217 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize); 3218 } 3219 #endif // COMPILER2 3220 3221 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. 3222 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3223 assert(StubRoutines::forward_exception_entry() != nullptr, 3224 "must be generated before"); 3225 3226 ResourceMark rm; 3227 OopMapSet *oop_maps = new OopMapSet(); 3228 OopMap* map; 3229 3230 // Allocate space for the code. Setup code generation tools. 3231 CodeBuffer buffer("handler_blob", 2048, 1024); 3232 MacroAssembler* masm = new MacroAssembler(&buffer); 3233 3234 address start = __ pc(); 3235 int frame_size_in_bytes = 0; 3236 3237 RegisterSaver::ReturnPCLocation return_pc_location; 3238 bool cause_return = (poll_type == POLL_AT_RETURN); 3239 if (cause_return) { 3240 // Nothing to do here. The frame has already been popped in MachEpilogNode. 3241 // Register LR already contains the return pc. 3242 return_pc_location = RegisterSaver::return_pc_is_pre_saved; 3243 } else { 3244 // Use thread()->saved_exception_pc() as return pc. 3245 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; 3246 } 3247 3248 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP); 3249 3250 // Save registers, fpu state, and flags. Set R31 = return pc. 3251 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3252 &frame_size_in_bytes, 3253 /*generate_oop_map=*/ true, 3254 /*return_pc_adjustment=*/0, 3255 return_pc_location, save_vectors); 3256 3257 // The following is basically a call_VM. However, we need the precise 3258 // address of the call in order to generate an oopmap. Hence, we do all the 3259 // work ourselves. 3260 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg); 3261 3262 // The return address must always be correct so that the frame constructor 3263 // never sees an invalid pc. 3264 3265 // Do the call 3266 __ call_VM_leaf(call_ptr, R16_thread); 3267 address calls_return_pc = __ last_calls_return_pc(); 3268 3269 // Set an oopmap for the call site. This oopmap will map all 3270 // oop-registers and debug-info registers as callee-saved. This 3271 // will allow deoptimization at this safepoint to find all possible 3272 // debug-info recordings, as well as let GC find all oops. 3273 oop_maps->add_gc_map(calls_return_pc - start, map); 3274 3275 Label noException; 3276 3277 // Clear the last Java frame. 3278 __ reset_last_Java_frame(); 3279 3280 BLOCK_COMMENT(" Check pending exception."); 3281 const Register pending_exception = R0; 3282 __ ld(pending_exception, thread_(pending_exception)); 3283 __ cmpdi(CCR0, pending_exception, 0); 3284 __ beq(CCR0, noException); 3285 3286 // Exception pending 3287 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3288 frame_size_in_bytes, 3289 /*restore_ctr=*/true, save_vectors); 3290 3291 BLOCK_COMMENT(" Jump to forward_exception_entry."); 3292 // Jump to forward_exception_entry, with the issuing PC in LR 3293 // so it looks like the original nmethod called forward_exception_entry. 3294 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3295 3296 // No exception case. 3297 __ BIND(noException); 3298 3299 if (!cause_return) { 3300 Label no_adjust; 3301 // If our stashed return pc was modified by the runtime we avoid touching it 3302 __ ld(R0, frame_size_in_bytes + _abi0(lr), R1_SP); 3303 __ cmpd(CCR0, R0, R31); 3304 __ bne(CCR0, no_adjust); 3305 3306 // Adjust return pc forward to step over the safepoint poll instruction 3307 __ addi(R31, R31, 4); 3308 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 3309 3310 __ bind(no_adjust); 3311 } 3312 3313 // Normal exit, restore registers and exit. 3314 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3315 frame_size_in_bytes, 3316 /*restore_ctr=*/true, save_vectors); 3317 3318 __ blr(); 3319 3320 // Make sure all code is generated 3321 masm->flush(); 3322 3323 // Fill-out other meta info 3324 // CodeBlob frame size is in words. 3325 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize); 3326 } 3327 3328 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss) 3329 // 3330 // Generate a stub that calls into the vm to find out the proper destination 3331 // of a java call. All the argument registers are live at this point 3332 // but since this is generic code we don't know what they are and the caller 3333 // must do any gc of the args. 3334 // 3335 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3336 3337 // allocate space for the code 3338 ResourceMark rm; 3339 3340 CodeBuffer buffer(name, 1000, 512); 3341 MacroAssembler* masm = new MacroAssembler(&buffer); 3342 3343 int frame_size_in_bytes; 3344 3345 OopMapSet *oop_maps = new OopMapSet(); 3346 OopMap* map = nullptr; 3347 3348 address start = __ pc(); 3349 3350 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3351 &frame_size_in_bytes, 3352 /*generate_oop_map*/ true, 3353 /*return_pc_adjustment*/ 0, 3354 RegisterSaver::return_pc_is_lr); 3355 3356 // Use noreg as last_Java_pc, the return pc will be reconstructed 3357 // from the physical frame. 3358 __ set_last_Java_frame(/*sp*/R1_SP, noreg); 3359 3360 int frame_complete = __ offset(); 3361 3362 // Pass R19_method as 2nd (optional) argument, used by 3363 // counter_overflow_stub. 3364 __ call_VM_leaf(destination, R16_thread, R19_method); 3365 address calls_return_pc = __ last_calls_return_pc(); 3366 // Set an oopmap for the call site. 3367 // We need this not only for callee-saved registers, but also for volatile 3368 // registers that the compiler might be keeping live across a safepoint. 3369 // Create the oopmap for the call's return pc. 3370 oop_maps->add_gc_map(calls_return_pc - start, map); 3371 3372 // R3_RET contains the address we are going to jump to assuming no exception got installed. 3373 3374 // clear last_Java_sp 3375 __ reset_last_Java_frame(); 3376 3377 // Check for pending exceptions. 3378 BLOCK_COMMENT("Check for pending exceptions."); 3379 Label pending; 3380 __ ld(R11_scratch1, thread_(pending_exception)); 3381 __ cmpdi(CCR0, R11_scratch1, 0); 3382 __ bne(CCR0, pending); 3383 3384 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame. 3385 3386 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false); 3387 3388 // Get the returned method. 3389 __ get_vm_result_2(R19_method); 3390 3391 __ bctr(); 3392 3393 3394 // Pending exception after the safepoint. 3395 __ BIND(pending); 3396 3397 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true); 3398 3399 // exception pending => remove activation and forward to exception handler 3400 3401 __ li(R11_scratch1, 0); 3402 __ ld(R3_ARG1, thread_(pending_exception)); 3403 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread); 3404 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3405 3406 // ------------- 3407 // Make sure all code is generated. 3408 masm->flush(); 3409 3410 // return the blob 3411 // frame_size_words or bytes?? 3412 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, 3413 oop_maps, true); 3414 } 3415 3416 3417 //------------------------------Montgomery multiplication------------------------ 3418 // 3419 3420 // Subtract 0:b from carry:a. Return carry. 3421 static unsigned long 3422 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 3423 long i = 0; 3424 unsigned long tmp, tmp2; 3425 __asm__ __volatile__ ( 3426 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA 3427 "mtctr %[len] \n" 3428 "0: \n" 3429 "ldx %[tmp], %[i], %[a] \n" 3430 "ldx %[tmp2], %[i], %[b] \n" 3431 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended 3432 "stdx %[tmp], %[i], %[a] \n" 3433 "addi %[i], %[i], 8 \n" 3434 "bdnz 0b \n" 3435 "addme %[tmp], %[carry] \n" // carry + CA - 1 3436 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2) 3437 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len) 3438 : "ctr", "xer", "memory" 3439 ); 3440 return tmp; 3441 } 3442 3443 // Multiply (unsigned) Long A by Long B, accumulating the double- 3444 // length result into the accumulator formed of T0, T1, and T2. 3445 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3446 unsigned long hi, lo; 3447 __asm__ __volatile__ ( 3448 "mulld %[lo], %[A], %[B] \n" 3449 "mulhdu %[hi], %[A], %[B] \n" 3450 "addc %[T0], %[T0], %[lo] \n" 3451 "adde %[T1], %[T1], %[hi] \n" 3452 "addze %[T2], %[T2] \n" 3453 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3454 : [A]"r"(A), [B]"r"(B) 3455 : "xer" 3456 ); 3457 } 3458 3459 // As above, but add twice the double-length result into the 3460 // accumulator. 3461 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3462 unsigned long hi, lo; 3463 __asm__ __volatile__ ( 3464 "mulld %[lo], %[A], %[B] \n" 3465 "mulhdu %[hi], %[A], %[B] \n" 3466 "addc %[T0], %[T0], %[lo] \n" 3467 "adde %[T1], %[T1], %[hi] \n" 3468 "addze %[T2], %[T2] \n" 3469 "addc %[T0], %[T0], %[lo] \n" 3470 "adde %[T1], %[T1], %[hi] \n" 3471 "addze %[T2], %[T2] \n" 3472 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3473 : [A]"r"(A), [B]"r"(B) 3474 : "xer" 3475 ); 3476 } 3477 3478 // Fast Montgomery multiplication. The derivation of the algorithm is 3479 // in "A Cryptographic Library for the Motorola DSP56000, 3480 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237". 3481 static void 3482 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 3483 unsigned long m[], unsigned long inv, int len) { 3484 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3485 int i; 3486 3487 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3488 3489 for (i = 0; i < len; i++) { 3490 int j; 3491 for (j = 0; j < i; j++) { 3492 MACC(a[j], b[i-j], t0, t1, t2); 3493 MACC(m[j], n[i-j], t0, t1, t2); 3494 } 3495 MACC(a[i], b[0], t0, t1, t2); 3496 m[i] = t0 * inv; 3497 MACC(m[i], n[0], t0, t1, t2); 3498 3499 assert(t0 == 0, "broken Montgomery multiply"); 3500 3501 t0 = t1; t1 = t2; t2 = 0; 3502 } 3503 3504 for (i = len; i < 2*len; i++) { 3505 int j; 3506 for (j = i-len+1; j < len; j++) { 3507 MACC(a[j], b[i-j], t0, t1, t2); 3508 MACC(m[j], n[i-j], t0, t1, t2); 3509 } 3510 m[i-len] = t0; 3511 t0 = t1; t1 = t2; t2 = 0; 3512 } 3513 3514 while (t0) { 3515 t0 = sub(m, n, t0, len); 3516 } 3517 } 3518 3519 // Fast Montgomery squaring. This uses asymptotically 25% fewer 3520 // multiplies so it should be up to 25% faster than Montgomery 3521 // multiplication. However, its loop control is more complex and it 3522 // may actually run slower on some machines. 3523 static void 3524 montgomery_square(unsigned long a[], unsigned long n[], 3525 unsigned long m[], unsigned long inv, int len) { 3526 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3527 int i; 3528 3529 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3530 3531 for (i = 0; i < len; i++) { 3532 int j; 3533 int end = (i+1)/2; 3534 for (j = 0; j < end; j++) { 3535 MACC2(a[j], a[i-j], t0, t1, t2); 3536 MACC(m[j], n[i-j], t0, t1, t2); 3537 } 3538 if ((i & 1) == 0) { 3539 MACC(a[j], a[j], t0, t1, t2); 3540 } 3541 for (; j < i; j++) { 3542 MACC(m[j], n[i-j], t0, t1, t2); 3543 } 3544 m[i] = t0 * inv; 3545 MACC(m[i], n[0], t0, t1, t2); 3546 3547 assert(t0 == 0, "broken Montgomery square"); 3548 3549 t0 = t1; t1 = t2; t2 = 0; 3550 } 3551 3552 for (i = len; i < 2*len; i++) { 3553 int start = i-len+1; 3554 int end = start + (len - start)/2; 3555 int j; 3556 for (j = start; j < end; j++) { 3557 MACC2(a[j], a[i-j], t0, t1, t2); 3558 MACC(m[j], n[i-j], t0, t1, t2); 3559 } 3560 if ((i & 1) == 0) { 3561 MACC(a[j], a[j], t0, t1, t2); 3562 } 3563 for (; j < len; j++) { 3564 MACC(m[j], n[i-j], t0, t1, t2); 3565 } 3566 m[i-len] = t0; 3567 t0 = t1; t1 = t2; t2 = 0; 3568 } 3569 3570 while (t0) { 3571 t0 = sub(m, n, t0, len); 3572 } 3573 } 3574 3575 // The threshold at which squaring is advantageous was determined 3576 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 3577 // Doesn't seem to be relevant for Power8 so we use the same value. 3578 #define MONTGOMERY_SQUARING_THRESHOLD 64 3579 3580 // Copy len longwords from s to d, word-swapping as we go. The 3581 // destination array is reversed. 3582 static void reverse_words(unsigned long *s, unsigned long *d, int len) { 3583 d += len; 3584 while(len-- > 0) { 3585 d--; 3586 unsigned long s_val = *s; 3587 // Swap words in a longword on little endian machines. 3588 #ifdef VM_LITTLE_ENDIAN 3589 s_val = (s_val << 32) | (s_val >> 32); 3590 #endif 3591 *d = s_val; 3592 s++; 3593 } 3594 } 3595 3596 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 3597 jint len, jlong inv, 3598 jint *m_ints) { 3599 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3600 assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 3601 int longwords = len/2; 3602 3603 // Make very sure we don't use so much space that the stack might 3604 // overflow. 512 jints corresponds to an 16384-bit integer and 3605 // will use here a total of 8k bytes of stack space. 3606 int divisor = sizeof(unsigned long) * 4; 3607 guarantee(longwords <= 8192 / divisor, "must be"); 3608 int total_allocation = longwords * sizeof (unsigned long) * 4; 3609 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3610 3611 // Local scratch arrays 3612 unsigned long 3613 *a = scratch + 0 * longwords, 3614 *b = scratch + 1 * longwords, 3615 *n = scratch + 2 * longwords, 3616 *m = scratch + 3 * longwords; 3617 3618 reverse_words((unsigned long *)a_ints, a, longwords); 3619 reverse_words((unsigned long *)b_ints, b, longwords); 3620 reverse_words((unsigned long *)n_ints, n, longwords); 3621 3622 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 3623 3624 reverse_words(m, (unsigned long *)m_ints, longwords); 3625 } 3626 3627 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 3628 jint len, jlong inv, 3629 jint *m_ints) { 3630 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3631 assert(len % 2 == 0, "array length in montgomery_square must be even"); 3632 int longwords = len/2; 3633 3634 // Make very sure we don't use so much space that the stack might 3635 // overflow. 512 jints corresponds to an 16384-bit integer and 3636 // will use here a total of 6k bytes of stack space. 3637 int divisor = sizeof(unsigned long) * 3; 3638 guarantee(longwords <= (8192 / divisor), "must be"); 3639 int total_allocation = longwords * sizeof (unsigned long) * 3; 3640 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3641 3642 // Local scratch arrays 3643 unsigned long 3644 *a = scratch + 0 * longwords, 3645 *n = scratch + 1 * longwords, 3646 *m = scratch + 2 * longwords; 3647 3648 reverse_words((unsigned long *)a_ints, a, longwords); 3649 reverse_words((unsigned long *)n_ints, n, longwords); 3650 3651 if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 3652 ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 3653 } else { 3654 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 3655 } 3656 3657 reverse_words(m, (unsigned long *)m_ints, longwords); 3658 }