1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2025 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "asm/macroAssembler.inline.hpp" 27 #include "code/debugInfoRec.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/vtableStubs.hpp" 30 #include "frame_ppc.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "gc/shared/gcLocker.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interp_masm.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/continuation.hpp" 39 #include "runtime/continuationEntry.inline.hpp" 40 #include "runtime/jniHandles.hpp" 41 #include "runtime/os.inline.hpp" 42 #include "runtime/safepointMechanism.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/signature.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "runtime/timerTrace.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/align.hpp" 49 #include "utilities/macros.hpp" 50 #include "vmreg_ppc.inline.hpp" 51 #ifdef COMPILER1 52 #include "c1/c1_Runtime1.hpp" 53 #endif 54 #ifdef COMPILER2 55 #include "opto/ad.hpp" 56 #include "opto/runtime.hpp" 57 #endif 58 59 #include <alloca.h> 60 61 #define __ masm-> 62 63 #ifdef PRODUCT 64 #define BLOCK_COMMENT(str) // nothing 65 #else 66 #define BLOCK_COMMENT(str) __ block_comment(str) 67 #endif 68 69 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 70 71 72 class RegisterSaver { 73 // Used for saving volatile registers. 74 public: 75 76 // Support different return pc locations. 77 enum ReturnPCLocation { 78 return_pc_is_lr, 79 return_pc_is_pre_saved, 80 return_pc_is_thread_saved_exception_pc 81 }; 82 83 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 84 int* out_frame_size_in_bytes, 85 bool generate_oop_map, 86 int return_pc_adjustment, 87 ReturnPCLocation return_pc_location, 88 bool save_vectors = false); 89 static void restore_live_registers_and_pop_frame(MacroAssembler* masm, 90 int frame_size_in_bytes, 91 bool restore_ctr, 92 bool save_vectors = false); 93 94 static void push_frame_and_save_argument_registers(MacroAssembler* masm, 95 Register r_temp, 96 int frame_size, 97 int total_args, 98 const VMRegPair *regs, const VMRegPair *regs2 = nullptr); 99 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, 100 int frame_size, 101 int total_args, 102 const VMRegPair *regs, const VMRegPair *regs2 = nullptr); 103 104 // During deoptimization only the result registers need to be restored 105 // all the other values have already been extracted. 106 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); 107 108 // Constants and data structures: 109 110 typedef enum { 111 int_reg, 112 float_reg, 113 special_reg, 114 vs_reg 115 } RegisterType; 116 117 typedef enum { 118 reg_size = 8, 119 half_reg_size = reg_size / 2, 120 vs_reg_size = 16 121 } RegisterConstants; 122 123 typedef struct { 124 RegisterType reg_type; 125 int reg_num; 126 VMReg vmreg; 127 } LiveRegType; 128 }; 129 130 131 #define RegisterSaver_LiveIntReg(regname) \ 132 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() } 133 134 #define RegisterSaver_LiveFloatReg(regname) \ 135 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() } 136 137 #define RegisterSaver_LiveSpecialReg(regname) \ 138 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } 139 140 #define RegisterSaver_LiveVSReg(regname) \ 141 { RegisterSaver::vs_reg, regname->encoding(), regname->as_VMReg() } 142 143 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { 144 // Live registers which get spilled to the stack. Register 145 // positions in this array correspond directly to the stack layout. 146 147 // 148 // live special registers: 149 // 150 RegisterSaver_LiveSpecialReg(SR_CTR), 151 // 152 // live float registers: 153 // 154 RegisterSaver_LiveFloatReg( F0 ), 155 RegisterSaver_LiveFloatReg( F1 ), 156 RegisterSaver_LiveFloatReg( F2 ), 157 RegisterSaver_LiveFloatReg( F3 ), 158 RegisterSaver_LiveFloatReg( F4 ), 159 RegisterSaver_LiveFloatReg( F5 ), 160 RegisterSaver_LiveFloatReg( F6 ), 161 RegisterSaver_LiveFloatReg( F7 ), 162 RegisterSaver_LiveFloatReg( F8 ), 163 RegisterSaver_LiveFloatReg( F9 ), 164 RegisterSaver_LiveFloatReg( F10 ), 165 RegisterSaver_LiveFloatReg( F11 ), 166 RegisterSaver_LiveFloatReg( F12 ), 167 RegisterSaver_LiveFloatReg( F13 ), 168 RegisterSaver_LiveFloatReg( F14 ), 169 RegisterSaver_LiveFloatReg( F15 ), 170 RegisterSaver_LiveFloatReg( F16 ), 171 RegisterSaver_LiveFloatReg( F17 ), 172 RegisterSaver_LiveFloatReg( F18 ), 173 RegisterSaver_LiveFloatReg( F19 ), 174 RegisterSaver_LiveFloatReg( F20 ), 175 RegisterSaver_LiveFloatReg( F21 ), 176 RegisterSaver_LiveFloatReg( F22 ), 177 RegisterSaver_LiveFloatReg( F23 ), 178 RegisterSaver_LiveFloatReg( F24 ), 179 RegisterSaver_LiveFloatReg( F25 ), 180 RegisterSaver_LiveFloatReg( F26 ), 181 RegisterSaver_LiveFloatReg( F27 ), 182 RegisterSaver_LiveFloatReg( F28 ), 183 RegisterSaver_LiveFloatReg( F29 ), 184 RegisterSaver_LiveFloatReg( F30 ), 185 RegisterSaver_LiveFloatReg( F31 ), 186 // 187 // live integer registers: 188 // 189 RegisterSaver_LiveIntReg( R0 ), 190 //RegisterSaver_LiveIntReg( R1 ), // stack pointer 191 RegisterSaver_LiveIntReg( R2 ), 192 RegisterSaver_LiveIntReg( R3 ), 193 RegisterSaver_LiveIntReg( R4 ), 194 RegisterSaver_LiveIntReg( R5 ), 195 RegisterSaver_LiveIntReg( R6 ), 196 RegisterSaver_LiveIntReg( R7 ), 197 RegisterSaver_LiveIntReg( R8 ), 198 RegisterSaver_LiveIntReg( R9 ), 199 RegisterSaver_LiveIntReg( R10 ), 200 RegisterSaver_LiveIntReg( R11 ), 201 RegisterSaver_LiveIntReg( R12 ), 202 //RegisterSaver_LiveIntReg( R13 ), // system thread id 203 RegisterSaver_LiveIntReg( R14 ), 204 RegisterSaver_LiveIntReg( R15 ), 205 RegisterSaver_LiveIntReg( R16 ), 206 RegisterSaver_LiveIntReg( R17 ), 207 RegisterSaver_LiveIntReg( R18 ), 208 RegisterSaver_LiveIntReg( R19 ), 209 RegisterSaver_LiveIntReg( R20 ), 210 RegisterSaver_LiveIntReg( R21 ), 211 RegisterSaver_LiveIntReg( R22 ), 212 RegisterSaver_LiveIntReg( R23 ), 213 RegisterSaver_LiveIntReg( R24 ), 214 RegisterSaver_LiveIntReg( R25 ), 215 RegisterSaver_LiveIntReg( R26 ), 216 RegisterSaver_LiveIntReg( R27 ), 217 RegisterSaver_LiveIntReg( R28 ), 218 RegisterSaver_LiveIntReg( R29 ), 219 RegisterSaver_LiveIntReg( R30 ), 220 RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below) 221 }; 222 223 static const RegisterSaver::LiveRegType RegisterSaver_LiveVSRegs[] = { 224 // 225 // live vector scalar registers (optional, only these ones are used by C2): 226 // 227 RegisterSaver_LiveVSReg( VSR32 ), 228 RegisterSaver_LiveVSReg( VSR33 ), 229 RegisterSaver_LiveVSReg( VSR34 ), 230 RegisterSaver_LiveVSReg( VSR35 ), 231 RegisterSaver_LiveVSReg( VSR36 ), 232 RegisterSaver_LiveVSReg( VSR37 ), 233 RegisterSaver_LiveVSReg( VSR38 ), 234 RegisterSaver_LiveVSReg( VSR39 ), 235 RegisterSaver_LiveVSReg( VSR40 ), 236 RegisterSaver_LiveVSReg( VSR41 ), 237 RegisterSaver_LiveVSReg( VSR42 ), 238 RegisterSaver_LiveVSReg( VSR43 ), 239 RegisterSaver_LiveVSReg( VSR44 ), 240 RegisterSaver_LiveVSReg( VSR45 ), 241 RegisterSaver_LiveVSReg( VSR46 ), 242 RegisterSaver_LiveVSReg( VSR47 ), 243 RegisterSaver_LiveVSReg( VSR48 ), 244 RegisterSaver_LiveVSReg( VSR49 ), 245 RegisterSaver_LiveVSReg( VSR50 ), 246 RegisterSaver_LiveVSReg( VSR51 ) 247 }; 248 249 250 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 251 int* out_frame_size_in_bytes, 252 bool generate_oop_map, 253 int return_pc_adjustment, 254 ReturnPCLocation return_pc_location, 255 bool save_vectors) { 256 // Push an abi_reg_args-frame and store all registers which may be live. 257 // If requested, create an OopMap: Record volatile registers as 258 // callee-save values in an OopMap so their save locations will be 259 // propagated to the RegisterMap of the caller frame during 260 // StackFrameStream construction (needed for deoptimization; see 261 // compiledVFrame::create_stack_value). 262 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment. 263 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved). 264 265 // calculate frame size 266 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 267 sizeof(RegisterSaver::LiveRegType); 268 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / 269 sizeof(RegisterSaver::LiveRegType)) 270 : 0; 271 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; 272 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) 273 + frame::native_abi_reg_args_size; 274 275 *out_frame_size_in_bytes = frame_size_in_bytes; 276 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 277 const int register_save_offset = frame_size_in_bytes - register_save_size; 278 279 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. 280 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : nullptr; 281 282 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); 283 284 // push a new frame 285 __ push_frame(frame_size_in_bytes, noreg); 286 287 // Save some registers in the last (non-vector) slots of the new frame so we 288 // can use them as scratch regs or to determine the return pc. 289 __ std(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); 290 __ std(R30, frame_size_in_bytes - 2*reg_size - vsregstosave_num * vs_reg_size, R1_SP); 291 292 // save the flags 293 // Do the save_LR by hand and adjust the return pc if requested. 294 switch (return_pc_location) { 295 case return_pc_is_lr: __ mflr(R31); break; 296 case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break; 297 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break; 298 default: ShouldNotReachHere(); 299 } 300 if (return_pc_location != return_pc_is_pre_saved) { 301 if (return_pc_adjustment != 0) { 302 __ addi(R31, R31, return_pc_adjustment); 303 } 304 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 305 } 306 307 // save all registers (ints and floats) 308 int offset = register_save_offset; 309 310 for (int i = 0; i < regstosave_num; i++) { 311 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 312 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 313 314 switch (reg_type) { 315 case RegisterSaver::int_reg: { 316 if (reg_num < 30) { // We spilled R30-31 right at the beginning. 317 __ std(as_Register(reg_num), offset, R1_SP); 318 } 319 break; 320 } 321 case RegisterSaver::float_reg: { 322 __ stfd(as_FloatRegister(reg_num), offset, R1_SP); 323 break; 324 } 325 case RegisterSaver::special_reg: { 326 if (reg_num == SR_CTR.encoding()) { 327 __ mfctr(R30); 328 __ std(R30, offset, R1_SP); 329 } else { 330 Unimplemented(); 331 } 332 break; 333 } 334 default: 335 ShouldNotReachHere(); 336 } 337 338 if (generate_oop_map) { 339 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 340 RegisterSaver_LiveRegs[i].vmreg); 341 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), 342 RegisterSaver_LiveRegs[i].vmreg->next()); 343 } 344 offset += reg_size; 345 } 346 347 for (int i = 0; i < vsregstosave_num; i++) { 348 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; 349 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type; 350 351 __ li(R30, offset); 352 __ stxvd2x(as_VectorSRegister(reg_num), R30, R1_SP); 353 354 if (generate_oop_map) { 355 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 356 RegisterSaver_LiveVSRegs[i].vmreg); 357 } 358 offset += vs_reg_size; 359 } 360 361 assert(offset == frame_size_in_bytes, "consistency check"); 362 363 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers"); 364 365 // And we're done. 366 return map; 367 } 368 369 370 // Pop the current frame and restore all the registers that we 371 // saved. 372 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, 373 int frame_size_in_bytes, 374 bool restore_ctr, 375 bool save_vectors) { 376 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 377 sizeof(RegisterSaver::LiveRegType); 378 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / 379 sizeof(RegisterSaver::LiveRegType)) 380 : 0; 381 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; 382 383 const int register_save_offset = frame_size_in_bytes - register_save_size; 384 385 BLOCK_COMMENT("restore_live_registers_and_pop_frame {"); 386 387 // restore all registers (ints and floats) 388 int offset = register_save_offset; 389 390 for (int i = 0; i < regstosave_num; i++) { 391 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 392 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 393 394 switch (reg_type) { 395 case RegisterSaver::int_reg: { 396 if (reg_num != 31) // R31 restored at the end, it's the tmp reg! 397 __ ld(as_Register(reg_num), offset, R1_SP); 398 break; 399 } 400 case RegisterSaver::float_reg: { 401 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 402 break; 403 } 404 case RegisterSaver::special_reg: { 405 if (reg_num == SR_CTR.encoding()) { 406 if (restore_ctr) { // Nothing to do here if ctr already contains the next address. 407 __ ld(R31, offset, R1_SP); 408 __ mtctr(R31); 409 } 410 } else { 411 Unimplemented(); 412 } 413 break; 414 } 415 default: 416 ShouldNotReachHere(); 417 } 418 offset += reg_size; 419 } 420 421 for (int i = 0; i < vsregstosave_num; i++) { 422 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; 423 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type; 424 425 __ li(R31, offset); 426 __ lxvd2x(as_VectorSRegister(reg_num), R31, R1_SP); 427 428 offset += vs_reg_size; 429 } 430 431 assert(offset == frame_size_in_bytes, "consistency check"); 432 433 // restore link and the flags 434 __ ld(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 435 __ mtlr(R31); 436 437 // restore scratch register's value 438 __ ld(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); 439 440 // pop the frame 441 __ addi(R1_SP, R1_SP, frame_size_in_bytes); 442 443 BLOCK_COMMENT("} restore_live_registers_and_pop_frame"); 444 } 445 446 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp, 447 int frame_size,int total_args, const VMRegPair *regs, 448 const VMRegPair *regs2) { 449 __ push_frame(frame_size, r_temp); 450 int st_off = frame_size - wordSize; 451 for (int i = 0; i < total_args; i++) { 452 VMReg r_1 = regs[i].first(); 453 VMReg r_2 = regs[i].second(); 454 if (!r_1->is_valid()) { 455 assert(!r_2->is_valid(), ""); 456 continue; 457 } 458 if (r_1->is_Register()) { 459 Register r = r_1->as_Register(); 460 __ std(r, st_off, R1_SP); 461 st_off -= wordSize; 462 } else if (r_1->is_FloatRegister()) { 463 FloatRegister f = r_1->as_FloatRegister(); 464 __ stfd(f, st_off, R1_SP); 465 st_off -= wordSize; 466 } 467 } 468 if (regs2 != nullptr) { 469 for (int i = 0; i < total_args; i++) { 470 VMReg r_1 = regs2[i].first(); 471 VMReg r_2 = regs2[i].second(); 472 if (!r_1->is_valid()) { 473 assert(!r_2->is_valid(), ""); 474 continue; 475 } 476 if (r_1->is_Register()) { 477 Register r = r_1->as_Register(); 478 __ std(r, st_off, R1_SP); 479 st_off -= wordSize; 480 } else if (r_1->is_FloatRegister()) { 481 FloatRegister f = r_1->as_FloatRegister(); 482 __ stfd(f, st_off, R1_SP); 483 st_off -= wordSize; 484 } 485 } 486 } 487 } 488 489 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size, 490 int total_args, const VMRegPair *regs, 491 const VMRegPair *regs2) { 492 int st_off = frame_size - wordSize; 493 for (int i = 0; i < total_args; i++) { 494 VMReg r_1 = regs[i].first(); 495 VMReg r_2 = regs[i].second(); 496 if (r_1->is_Register()) { 497 Register r = r_1->as_Register(); 498 __ ld(r, st_off, R1_SP); 499 st_off -= wordSize; 500 } else if (r_1->is_FloatRegister()) { 501 FloatRegister f = r_1->as_FloatRegister(); 502 __ lfd(f, st_off, R1_SP); 503 st_off -= wordSize; 504 } 505 } 506 if (regs2 != nullptr) 507 for (int i = 0; i < total_args; i++) { 508 VMReg r_1 = regs2[i].first(); 509 VMReg r_2 = regs2[i].second(); 510 if (r_1->is_Register()) { 511 Register r = r_1->as_Register(); 512 __ ld(r, st_off, R1_SP); 513 st_off -= wordSize; 514 } else if (r_1->is_FloatRegister()) { 515 FloatRegister f = r_1->as_FloatRegister(); 516 __ lfd(f, st_off, R1_SP); 517 st_off -= wordSize; 518 } 519 } 520 __ pop_frame(); 521 } 522 523 // Restore the registers that might be holding a result. 524 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { 525 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 526 sizeof(RegisterSaver::LiveRegType); 527 const int register_save_size = regstosave_num * reg_size; // VS registers not relevant here. 528 const int register_save_offset = frame_size_in_bytes - register_save_size; 529 530 // restore all result registers (ints and floats) 531 int offset = register_save_offset; 532 for (int i = 0; i < regstosave_num; i++) { 533 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 534 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 535 switch (reg_type) { 536 case RegisterSaver::int_reg: { 537 if (as_Register(reg_num)==R3_RET) // int result_reg 538 __ ld(as_Register(reg_num), offset, R1_SP); 539 break; 540 } 541 case RegisterSaver::float_reg: { 542 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg 543 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 544 break; 545 } 546 case RegisterSaver::special_reg: { 547 // Special registers don't hold a result. 548 break; 549 } 550 default: 551 ShouldNotReachHere(); 552 } 553 offset += reg_size; 554 } 555 556 assert(offset == frame_size_in_bytes, "consistency check"); 557 } 558 559 // Is vector's size (in bytes) bigger than a size saved by default? 560 bool SharedRuntime::is_wide_vector(int size) { 561 // Note, MaxVectorSize == 8/16 on PPC64. 562 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size); 563 return size > 8; 564 } 565 566 static int reg2slot(VMReg r) { 567 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 568 } 569 570 static int reg2offset(VMReg r) { 571 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 572 } 573 574 // --------------------------------------------------------------------------- 575 // Read the array of BasicTypes from a signature, and compute where the 576 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 577 // quantities. Values less than VMRegImpl::stack0 are registers, those above 578 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 579 // as framesizes are fixed. 580 // VMRegImpl::stack0 refers to the first slot 0(sp). 581 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register 582 // up to Register::number_of_registers) are the 64-bit 583 // integer registers. 584 585 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 586 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 587 // units regardless of build. Of course for i486 there is no 64 bit build 588 589 // The Java calling convention is a "shifted" version of the C ABI. 590 // By skipping the first C ABI register we can call non-static jni methods 591 // with small numbers of arguments without having to shuffle the arguments 592 // at all. Since we control the java ABI we ought to at least get some 593 // advantage out of it. 594 595 const VMReg java_iarg_reg[8] = { 596 R3->as_VMReg(), 597 R4->as_VMReg(), 598 R5->as_VMReg(), 599 R6->as_VMReg(), 600 R7->as_VMReg(), 601 R8->as_VMReg(), 602 R9->as_VMReg(), 603 R10->as_VMReg() 604 }; 605 606 const VMReg java_farg_reg[13] = { 607 F1->as_VMReg(), 608 F2->as_VMReg(), 609 F3->as_VMReg(), 610 F4->as_VMReg(), 611 F5->as_VMReg(), 612 F6->as_VMReg(), 613 F7->as_VMReg(), 614 F8->as_VMReg(), 615 F9->as_VMReg(), 616 F10->as_VMReg(), 617 F11->as_VMReg(), 618 F12->as_VMReg(), 619 F13->as_VMReg() 620 }; 621 622 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]); 623 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]); 624 625 STATIC_ASSERT(num_java_iarg_registers == Argument::n_int_register_parameters_j); 626 STATIC_ASSERT(num_java_farg_registers == Argument::n_float_register_parameters_j); 627 628 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 629 VMRegPair *regs, 630 int total_args_passed) { 631 // C2c calling conventions for compiled-compiled calls. 632 // Put 8 ints/longs into registers _AND_ 13 float/doubles into 633 // registers _AND_ put the rest on the stack. 634 635 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats 636 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 637 638 int i; 639 VMReg reg; 640 int stk = 0; 641 int ireg = 0; 642 int freg = 0; 643 644 // We put the first 8 arguments into registers and the rest on the 645 // stack, float arguments are already in their argument registers 646 // due to c2c calling conventions (see calling_convention). 647 for (int i = 0; i < total_args_passed; ++i) { 648 switch(sig_bt[i]) { 649 case T_BOOLEAN: 650 case T_CHAR: 651 case T_BYTE: 652 case T_SHORT: 653 case T_INT: 654 if (ireg < num_java_iarg_registers) { 655 // Put int/ptr in register 656 reg = java_iarg_reg[ireg]; 657 ++ireg; 658 } else { 659 // Put int/ptr on stack. 660 reg = VMRegImpl::stack2reg(stk); 661 stk += inc_stk_for_intfloat; 662 } 663 regs[i].set1(reg); 664 break; 665 case T_LONG: 666 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 667 if (ireg < num_java_iarg_registers) { 668 // Put long in register. 669 reg = java_iarg_reg[ireg]; 670 ++ireg; 671 } else { 672 // Put long on stack. They must be aligned to 2 slots. 673 if (stk & 0x1) ++stk; 674 reg = VMRegImpl::stack2reg(stk); 675 stk += inc_stk_for_longdouble; 676 } 677 regs[i].set2(reg); 678 break; 679 case T_OBJECT: 680 case T_ARRAY: 681 case T_ADDRESS: 682 if (ireg < num_java_iarg_registers) { 683 // Put ptr in register. 684 reg = java_iarg_reg[ireg]; 685 ++ireg; 686 } else { 687 // Put ptr on stack. Objects must be aligned to 2 slots too, 688 // because "64-bit pointers record oop-ishness on 2 aligned 689 // adjacent registers." (see OopFlow::build_oop_map). 690 if (stk & 0x1) ++stk; 691 reg = VMRegImpl::stack2reg(stk); 692 stk += inc_stk_for_longdouble; 693 } 694 regs[i].set2(reg); 695 break; 696 case T_FLOAT: 697 if (freg < num_java_farg_registers) { 698 // Put float in register. 699 reg = java_farg_reg[freg]; 700 ++freg; 701 } else { 702 // Put float on stack. 703 reg = VMRegImpl::stack2reg(stk); 704 stk += inc_stk_for_intfloat; 705 } 706 regs[i].set1(reg); 707 break; 708 case T_DOUBLE: 709 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 710 if (freg < num_java_farg_registers) { 711 // Put double in register. 712 reg = java_farg_reg[freg]; 713 ++freg; 714 } else { 715 // Put double on stack. They must be aligned to 2 slots. 716 if (stk & 0x1) ++stk; 717 reg = VMRegImpl::stack2reg(stk); 718 stk += inc_stk_for_longdouble; 719 } 720 regs[i].set2(reg); 721 break; 722 case T_VOID: 723 // Do not count halves. 724 regs[i].set_bad(); 725 break; 726 default: 727 ShouldNotReachHere(); 728 } 729 } 730 return stk; 731 } 732 733 #if defined(COMPILER1) || defined(COMPILER2) 734 // Calling convention for calling C code. 735 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 736 VMRegPair *regs, 737 int total_args_passed) { 738 // Calling conventions for C runtime calls and calls to JNI native methods. 739 // 740 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8 741 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist 742 // the first 13 flt/dbl's in the first 13 fp regs but additionally 743 // copy flt/dbl to the stack if they are beyond the 8th argument. 744 745 const VMReg iarg_reg[8] = { 746 R3->as_VMReg(), 747 R4->as_VMReg(), 748 R5->as_VMReg(), 749 R6->as_VMReg(), 750 R7->as_VMReg(), 751 R8->as_VMReg(), 752 R9->as_VMReg(), 753 R10->as_VMReg() 754 }; 755 756 const VMReg farg_reg[13] = { 757 F1->as_VMReg(), 758 F2->as_VMReg(), 759 F3->as_VMReg(), 760 F4->as_VMReg(), 761 F5->as_VMReg(), 762 F6->as_VMReg(), 763 F7->as_VMReg(), 764 F8->as_VMReg(), 765 F9->as_VMReg(), 766 F10->as_VMReg(), 767 F11->as_VMReg(), 768 F12->as_VMReg(), 769 F13->as_VMReg() 770 }; 771 772 // Check calling conventions consistency. 773 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c && 774 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c, 775 "consistency"); 776 777 const int additional_frame_header_slots = ((frame::native_abi_minframe_size - frame::jit_out_preserve_size) 778 / VMRegImpl::stack_slot_size); 779 const int float_offset_in_slots = Argument::float_on_stack_offset_in_bytes_c / VMRegImpl::stack_slot_size; 780 781 VMReg reg; 782 int arg = 0; 783 int freg = 0; 784 bool stack_used = false; 785 786 for (int i = 0; i < total_args_passed; ++i, ++arg) { 787 // Each argument corresponds to a slot in the Parameter Save Area (if not omitted) 788 int stk = (arg * 2) + additional_frame_header_slots; 789 790 switch(sig_bt[i]) { 791 // 792 // If arguments 0-7 are integers, they are passed in integer registers. 793 // Argument i is placed in iarg_reg[i]. 794 // 795 case T_BOOLEAN: 796 case T_CHAR: 797 case T_BYTE: 798 case T_SHORT: 799 case T_INT: 800 // We must cast ints to longs and use full 64 bit stack slots 801 // here. Thus fall through, handle as long. 802 case T_LONG: 803 case T_OBJECT: 804 case T_ARRAY: 805 case T_ADDRESS: 806 case T_METADATA: 807 // Oops are already boxed if required (JNI). 808 if (arg < Argument::n_int_register_parameters_c) { 809 reg = iarg_reg[arg]; 810 } else { 811 reg = VMRegImpl::stack2reg(stk); 812 stack_used = true; 813 } 814 regs[i].set2(reg); 815 break; 816 817 // 818 // Floats are treated differently from int regs: The first 13 float arguments 819 // are passed in registers (not the float args among the first 13 args). 820 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed 821 // in farg_reg[j] if argument i is the j-th float argument of this call. 822 // 823 case T_FLOAT: 824 if (freg < Argument::n_float_register_parameters_c) { 825 // Put float in register ... 826 reg = farg_reg[freg]; 827 ++freg; 828 } else { 829 // Put float on stack. 830 reg = VMRegImpl::stack2reg(stk + float_offset_in_slots); 831 stack_used = true; 832 } 833 regs[i].set1(reg); 834 break; 835 case T_DOUBLE: 836 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 837 if (freg < Argument::n_float_register_parameters_c) { 838 // Put double in register ... 839 reg = farg_reg[freg]; 840 ++freg; 841 } else { 842 // Put double on stack. 843 reg = VMRegImpl::stack2reg(stk); 844 stack_used = true; 845 } 846 regs[i].set2(reg); 847 break; 848 849 case T_VOID: 850 // Do not count halves. 851 regs[i].set_bad(); 852 --arg; 853 break; 854 default: 855 ShouldNotReachHere(); 856 } 857 } 858 859 // Return size of the stack frame excluding the jit_out_preserve part in single-word slots. 860 #if defined(ABI_ELFv2) 861 assert(additional_frame_header_slots == 0, "ABIv2 shouldn't use extra slots"); 862 // ABIv2 allows omitting the Parameter Save Area if the callee's prototype 863 // indicates that all parameters can be passed in registers. 864 return stack_used ? (arg * 2) : 0; 865 #else 866 // The Parameter Save Area needs to be at least 8 double-word slots for ABIv1. 867 // We have to add extra slots because ABIv1 uses a larger header. 868 return MAX2(arg, 8) * 2 + additional_frame_header_slots; 869 #endif 870 } 871 #endif // COMPILER2 872 873 int SharedRuntime::vector_calling_convention(VMRegPair *regs, 874 uint num_bits, 875 uint total_args_passed) { 876 Unimplemented(); 877 return 0; 878 } 879 880 static address gen_c2i_adapter(MacroAssembler *masm, 881 int total_args_passed, 882 int comp_args_on_stack, 883 const BasicType *sig_bt, 884 const VMRegPair *regs, 885 Label& call_interpreter, 886 const Register& ientry) { 887 888 address c2i_entrypoint; 889 890 const Register sender_SP = R21_sender_SP; // == R21_tmp1 891 const Register code = R22_tmp2; 892 //const Register ientry = R23_tmp3; 893 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 }; 894 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 895 int value_regs_index = 0; 896 897 const Register return_pc = R27_tmp7; 898 const Register tmp = R28_tmp8; 899 900 assert_different_registers(sender_SP, code, ientry, return_pc, tmp); 901 902 // Adapter needs TOP_IJAVA_FRAME_ABI. 903 const int adapter_size = frame::top_ijava_frame_abi_size + 904 align_up(total_args_passed * wordSize, frame::alignment_in_bytes); 905 906 // regular (verified) c2i entry point 907 c2i_entrypoint = __ pc(); 908 909 // Does compiled code exists? If yes, patch the caller's callsite. 910 __ ld(code, method_(code)); 911 __ cmpdi(CR0, code, 0); 912 __ ld(ientry, method_(interpreter_entry)); // preloaded 913 __ beq(CR0, call_interpreter); 914 915 916 // Patch caller's callsite, method_(code) was not null which means that 917 // compiled code exists. 918 __ mflr(return_pc); 919 __ std(return_pc, _abi0(lr), R1_SP); 920 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs); 921 922 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc); 923 924 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs); 925 __ ld(return_pc, _abi0(lr), R1_SP); 926 __ ld(ientry, method_(interpreter_entry)); // preloaded 927 __ mtlr(return_pc); 928 929 930 // Call the interpreter. 931 __ BIND(call_interpreter); 932 __ mtctr(ientry); 933 934 // Get a copy of the current SP for loading caller's arguments. 935 __ mr(sender_SP, R1_SP); 936 937 // Add space for the adapter. 938 __ resize_frame(-adapter_size, R12_scratch2); 939 940 int st_off = adapter_size - wordSize; 941 942 // Write the args into the outgoing interpreter space. 943 for (int i = 0; i < total_args_passed; i++) { 944 VMReg r_1 = regs[i].first(); 945 VMReg r_2 = regs[i].second(); 946 if (!r_1->is_valid()) { 947 assert(!r_2->is_valid(), ""); 948 continue; 949 } 950 if (r_1->is_stack()) { 951 Register tmp_reg = value_regs[value_regs_index]; 952 value_regs_index = (value_regs_index + 1) % num_value_regs; 953 // The calling convention produces OptoRegs that ignore the out 954 // preserve area (JIT's ABI). We must account for it here. 955 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 956 if (!r_2->is_valid()) { 957 __ lwz(tmp_reg, ld_off, sender_SP); 958 } else { 959 __ ld(tmp_reg, ld_off, sender_SP); 960 } 961 // Pretend stack targets were loaded into tmp_reg. 962 r_1 = tmp_reg->as_VMReg(); 963 } 964 965 if (r_1->is_Register()) { 966 Register r = r_1->as_Register(); 967 if (!r_2->is_valid()) { 968 __ stw(r, st_off, R1_SP); 969 st_off-=wordSize; 970 } else { 971 // Longs are given 2 64-bit slots in the interpreter, but the 972 // data is passed in only 1 slot. 973 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 974 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 975 st_off-=wordSize; 976 } 977 __ std(r, st_off, R1_SP); 978 st_off-=wordSize; 979 } 980 } else { 981 assert(r_1->is_FloatRegister(), ""); 982 FloatRegister f = r_1->as_FloatRegister(); 983 if (!r_2->is_valid()) { 984 __ stfs(f, st_off, R1_SP); 985 st_off-=wordSize; 986 } else { 987 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the 988 // data is passed in only 1 slot. 989 // One of these should get known junk... 990 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 991 st_off-=wordSize; 992 __ stfd(f, st_off, R1_SP); 993 st_off-=wordSize; 994 } 995 } 996 } 997 998 // Jump to the interpreter just as if interpreter was doing it. 999 1000 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 1001 1002 // load TOS 1003 __ addi(R15_esp, R1_SP, st_off); 1004 1005 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1. 1006 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register"); 1007 __ bctr(); 1008 1009 return c2i_entrypoint; 1010 } 1011 1012 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 1013 int total_args_passed, 1014 int comp_args_on_stack, 1015 const BasicType *sig_bt, 1016 const VMRegPair *regs) { 1017 1018 // Load method's entry-point from method. 1019 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 1020 __ mtctr(R12_scratch2); 1021 1022 // We will only enter here from an interpreted frame and never from after 1023 // passing thru a c2i. Azul allowed this but we do not. If we lose the 1024 // race and use a c2i we will remain interpreted for the race loser(s). 1025 // This removes all sorts of headaches on the x86 side and also eliminates 1026 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 1027 1028 // Note: r13 contains the senderSP on entry. We must preserve it since 1029 // we may do a i2c -> c2i transition if we lose a race where compiled 1030 // code goes non-entrant while we get args ready. 1031 // In addition we use r13 to locate all the interpreter args as 1032 // we must align the stack to 16 bytes on an i2c entry else we 1033 // lose alignment we expect in all compiled code and register 1034 // save code can segv when fxsave instructions find improperly 1035 // aligned stack pointer. 1036 1037 const Register ld_ptr = R15_esp; 1038 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 }; 1039 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 1040 int value_regs_index = 0; 1041 1042 int ld_offset = total_args_passed*wordSize; 1043 1044 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 1045 // in registers, we will occasionally have no stack args. 1046 int comp_words_on_stack = 0; 1047 if (comp_args_on_stack) { 1048 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in 1049 // registers are below. By subtracting stack0, we either get a negative 1050 // number (all values in registers) or the maximum stack slot accessed. 1051 1052 // Convert 4-byte c2 stack slots to words. 1053 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1054 // Round up to miminum stack alignment, in wordSize. 1055 comp_words_on_stack = align_up(comp_words_on_stack, 2); 1056 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1); 1057 } 1058 1059 // Now generate the shuffle code. Pick up all register args and move the 1060 // rest through register value=Z_R12. 1061 BLOCK_COMMENT("Shuffle arguments"); 1062 for (int i = 0; i < total_args_passed; i++) { 1063 if (sig_bt[i] == T_VOID) { 1064 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1065 continue; 1066 } 1067 1068 // Pick up 0, 1 or 2 words from ld_ptr. 1069 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 1070 "scrambled load targets?"); 1071 VMReg r_1 = regs[i].first(); 1072 VMReg r_2 = regs[i].second(); 1073 if (!r_1->is_valid()) { 1074 assert(!r_2->is_valid(), ""); 1075 continue; 1076 } 1077 if (r_1->is_FloatRegister()) { 1078 if (!r_2->is_valid()) { 1079 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr); 1080 ld_offset-=wordSize; 1081 } else { 1082 // Skip the unused interpreter slot. 1083 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr); 1084 ld_offset-=2*wordSize; 1085 } 1086 } else { 1087 Register r; 1088 if (r_1->is_stack()) { 1089 // Must do a memory to memory move thru "value". 1090 r = value_regs[value_regs_index]; 1091 value_regs_index = (value_regs_index + 1) % num_value_regs; 1092 } else { 1093 r = r_1->as_Register(); 1094 } 1095 if (!r_2->is_valid()) { 1096 // Not sure we need to do this but it shouldn't hurt. 1097 if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) { 1098 __ ld(r, ld_offset, ld_ptr); 1099 ld_offset-=wordSize; 1100 } else { 1101 __ lwz(r, ld_offset, ld_ptr); 1102 ld_offset-=wordSize; 1103 } 1104 } else { 1105 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the 1106 // data is passed in only 1 slot. 1107 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1108 ld_offset-=wordSize; 1109 } 1110 __ ld(r, ld_offset, ld_ptr); 1111 ld_offset-=wordSize; 1112 } 1113 1114 if (r_1->is_stack()) { 1115 // Now store value where the compiler expects it 1116 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size; 1117 1118 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN || 1119 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) { 1120 __ stw(r, st_off, R1_SP); 1121 } else { 1122 __ std(r, st_off, R1_SP); 1123 } 1124 } 1125 } 1126 } 1127 1128 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about 1129 1130 BLOCK_COMMENT("Store method"); 1131 // Store method into thread->callee_target. 1132 // We might end up in handle_wrong_method if the callee is 1133 // deoptimized as we race thru here. If that happens we don't want 1134 // to take a safepoint because the caller frame will look 1135 // interpreted and arguments are now "compiled" so it is much better 1136 // to make this transition invisible to the stack walking 1137 // code. Unfortunately if we try and find the callee by normal means 1138 // a safepoint is possible. So we stash the desired callee in the 1139 // thread and the vm will find there should this case occur. 1140 __ std(R19_method, thread_(callee_target)); 1141 1142 // Jump to the compiled code just as if compiled code was doing it. 1143 __ bctr(); 1144 } 1145 1146 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1147 int total_args_passed, 1148 int comp_args_on_stack, 1149 const BasicType *sig_bt, 1150 const VMRegPair *regs, 1151 AdapterFingerPrint* fingerprint) { 1152 address i2c_entry; 1153 address c2i_unverified_entry; 1154 address c2i_entry; 1155 1156 1157 // entry: i2c 1158 1159 __ align(CodeEntryAlignment); 1160 i2c_entry = __ pc(); 1161 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1162 1163 1164 // entry: c2i unverified 1165 1166 __ align(CodeEntryAlignment); 1167 BLOCK_COMMENT("c2i unverified entry"); 1168 c2i_unverified_entry = __ pc(); 1169 1170 // inline_cache contains a CompiledICData 1171 const Register ic = R19_inline_cache_reg; 1172 const Register ic_klass = R11_scratch1; 1173 const Register receiver_klass = R12_scratch2; 1174 const Register code = R21_tmp1; 1175 const Register ientry = R23_tmp3; 1176 1177 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry); 1178 assert(R11_scratch1 == R11, "need prologue scratch register"); 1179 1180 Label call_interpreter; 1181 1182 __ ic_check(4 /* end_alignment */); 1183 __ ld(R19_method, CompiledICData::speculated_method_offset(), ic); 1184 // Argument is valid and klass is as expected, continue. 1185 1186 __ ld(code, method_(code)); 1187 __ cmpdi(CR0, code, 0); 1188 __ ld(ientry, method_(interpreter_entry)); // preloaded 1189 __ beq_predict_taken(CR0, call_interpreter); 1190 1191 // Branch to ic_miss_stub. 1192 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); 1193 1194 // entry: c2i 1195 1196 c2i_entry = __ pc(); 1197 1198 // Class initialization barrier for static methods 1199 address c2i_no_clinit_check_entry = nullptr; 1200 if (VM_Version::supports_fast_class_init_checks()) { 1201 Label L_skip_barrier; 1202 1203 { // Bypass the barrier for non-static methods 1204 __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1205 __ andi_(R0, R0, JVM_ACC_STATIC); 1206 __ beq(CR0, L_skip_barrier); // non-static 1207 } 1208 1209 Register klass = R11_scratch1; 1210 __ load_method_holder(klass, R19_method); 1211 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 1212 1213 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 1214 __ mtctr(klass); 1215 __ bctr(); 1216 1217 __ bind(L_skip_barrier); 1218 c2i_no_clinit_check_entry = __ pc(); 1219 } 1220 1221 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1222 bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code); 1223 1224 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry); 1225 1226 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, 1227 c2i_no_clinit_check_entry); 1228 } 1229 1230 // An oop arg. Must pass a handle not the oop itself. 1231 static void object_move(MacroAssembler* masm, 1232 int frame_size_in_slots, 1233 OopMap* oop_map, int oop_handle_offset, 1234 bool is_receiver, int* receiver_offset, 1235 VMRegPair src, VMRegPair dst, 1236 Register r_caller_sp, Register r_temp_1, Register r_temp_2) { 1237 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), 1238 "receiver has already been moved"); 1239 1240 // We must pass a handle. First figure out the location we use as a handle. 1241 1242 if (src.first()->is_stack()) { 1243 // stack to stack or reg 1244 1245 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1246 Label skip; 1247 const int oop_slot_in_callers_frame = reg2slot(src.first()); 1248 1249 guarantee(!is_receiver, "expecting receiver in register"); 1250 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots)); 1251 1252 __ addi(r_handle, r_caller_sp, reg2offset(src.first())); 1253 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); 1254 __ cmpdi(CR0, r_temp_2, 0); 1255 __ bne(CR0, skip); 1256 // Use a null handle if oop is null. 1257 __ li(r_handle, 0); 1258 __ bind(skip); 1259 1260 if (dst.first()->is_stack()) { 1261 // stack to stack 1262 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1263 } else { 1264 // stack to reg 1265 // Nothing to do, r_handle is already the dst register. 1266 } 1267 } else { 1268 // reg to stack or reg 1269 const Register r_oop = src.first()->as_Register(); 1270 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1271 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word 1272 + oop_handle_offset; // in slots 1273 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size; 1274 Label skip; 1275 1276 if (is_receiver) { 1277 *receiver_offset = oop_offset; 1278 } 1279 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1280 1281 __ std( r_oop, oop_offset, R1_SP); 1282 __ addi(r_handle, R1_SP, oop_offset); 1283 1284 __ cmpdi(CR0, r_oop, 0); 1285 __ bne(CR0, skip); 1286 // Use a null handle if oop is null. 1287 __ li(r_handle, 0); 1288 __ bind(skip); 1289 1290 if (dst.first()->is_stack()) { 1291 // reg to stack 1292 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1293 } else { 1294 // reg to reg 1295 // Nothing to do, r_handle is already the dst register. 1296 } 1297 } 1298 } 1299 1300 static void int_move(MacroAssembler*masm, 1301 VMRegPair src, VMRegPair dst, 1302 Register r_caller_sp, Register r_temp) { 1303 assert(src.first()->is_valid(), "incoming must be int"); 1304 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1305 1306 if (src.first()->is_stack()) { 1307 if (dst.first()->is_stack()) { 1308 // stack to stack 1309 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp); 1310 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1311 } else { 1312 // stack to reg 1313 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1314 } 1315 } else if (dst.first()->is_stack()) { 1316 // reg to stack 1317 __ extsw(r_temp, src.first()->as_Register()); 1318 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1319 } else { 1320 // reg to reg 1321 __ extsw(dst.first()->as_Register(), src.first()->as_Register()); 1322 } 1323 } 1324 1325 static void long_move(MacroAssembler*masm, 1326 VMRegPair src, VMRegPair dst, 1327 Register r_caller_sp, Register r_temp) { 1328 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long"); 1329 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1330 1331 if (src.first()->is_stack()) { 1332 if (dst.first()->is_stack()) { 1333 // stack to stack 1334 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1335 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1336 } else { 1337 // stack to reg 1338 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1339 } 1340 } else if (dst.first()->is_stack()) { 1341 // reg to stack 1342 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1343 } else { 1344 // reg to reg 1345 if (dst.first()->as_Register() != src.first()->as_Register()) 1346 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1347 } 1348 } 1349 1350 static void float_move(MacroAssembler*masm, 1351 VMRegPair src, VMRegPair dst, 1352 Register r_caller_sp, Register r_temp) { 1353 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float"); 1354 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float"); 1355 1356 if (src.first()->is_stack()) { 1357 if (dst.first()->is_stack()) { 1358 // stack to stack 1359 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp); 1360 __ stw(r_temp, reg2offset(dst.first()), R1_SP); 1361 } else { 1362 // stack to reg 1363 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1364 } 1365 } else if (dst.first()->is_stack()) { 1366 // reg to stack 1367 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1368 } else { 1369 // reg to reg 1370 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1371 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1372 } 1373 } 1374 1375 static void double_move(MacroAssembler*masm, 1376 VMRegPair src, VMRegPair dst, 1377 Register r_caller_sp, Register r_temp) { 1378 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double"); 1379 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double"); 1380 1381 if (src.first()->is_stack()) { 1382 if (dst.first()->is_stack()) { 1383 // stack to stack 1384 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1385 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1386 } else { 1387 // stack to reg 1388 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1389 } 1390 } else if (dst.first()->is_stack()) { 1391 // reg to stack 1392 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1393 } else { 1394 // reg to reg 1395 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1396 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1397 } 1398 } 1399 1400 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1401 switch (ret_type) { 1402 case T_BOOLEAN: 1403 case T_CHAR: 1404 case T_BYTE: 1405 case T_SHORT: 1406 case T_INT: 1407 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1408 break; 1409 case T_ARRAY: 1410 case T_OBJECT: 1411 case T_LONG: 1412 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1413 break; 1414 case T_FLOAT: 1415 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1416 break; 1417 case T_DOUBLE: 1418 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1419 break; 1420 case T_VOID: 1421 break; 1422 default: 1423 ShouldNotReachHere(); 1424 break; 1425 } 1426 } 1427 1428 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1429 switch (ret_type) { 1430 case T_BOOLEAN: 1431 case T_CHAR: 1432 case T_BYTE: 1433 case T_SHORT: 1434 case T_INT: 1435 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1436 break; 1437 case T_ARRAY: 1438 case T_OBJECT: 1439 case T_LONG: 1440 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1441 break; 1442 case T_FLOAT: 1443 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1444 break; 1445 case T_DOUBLE: 1446 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1447 break; 1448 case T_VOID: 1449 break; 1450 default: 1451 ShouldNotReachHere(); 1452 break; 1453 } 1454 } 1455 1456 static void verify_oop_args(MacroAssembler* masm, 1457 const methodHandle& method, 1458 const BasicType* sig_bt, 1459 const VMRegPair* regs) { 1460 Register temp_reg = R19_method; // not part of any compiled calling seq 1461 if (VerifyOops) { 1462 for (int i = 0; i < method->size_of_parameters(); i++) { 1463 if (is_reference_type(sig_bt[i])) { 1464 VMReg r = regs[i].first(); 1465 assert(r->is_valid(), "bad oop arg"); 1466 if (r->is_stack()) { 1467 __ ld(temp_reg, reg2offset(r), R1_SP); 1468 __ verify_oop(temp_reg, FILE_AND_LINE); 1469 } else { 1470 __ verify_oop(r->as_Register(), FILE_AND_LINE); 1471 } 1472 } 1473 } 1474 } 1475 } 1476 1477 static void gen_special_dispatch(MacroAssembler* masm, 1478 const methodHandle& method, 1479 const BasicType* sig_bt, 1480 const VMRegPair* regs) { 1481 verify_oop_args(masm, method, sig_bt, regs); 1482 vmIntrinsics::ID iid = method->intrinsic_id(); 1483 1484 // Now write the args into the outgoing interpreter space 1485 bool has_receiver = false; 1486 Register receiver_reg = noreg; 1487 int member_arg_pos = -1; 1488 Register member_reg = noreg; 1489 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1490 if (ref_kind != 0) { 1491 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1492 member_reg = R19_method; // known to be free at this point 1493 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1494 } else if (iid == vmIntrinsics::_invokeBasic) { 1495 has_receiver = true; 1496 } else if (iid == vmIntrinsics::_linkToNative) { 1497 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument 1498 member_reg = R19_method; // known to be free at this point 1499 } else { 1500 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid)); 1501 } 1502 1503 if (member_reg != noreg) { 1504 // Load the member_arg into register, if necessary. 1505 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1506 VMReg r = regs[member_arg_pos].first(); 1507 if (r->is_stack()) { 1508 __ ld(member_reg, reg2offset(r), R1_SP); 1509 } else { 1510 // no data motion is needed 1511 member_reg = r->as_Register(); 1512 } 1513 } 1514 1515 if (has_receiver) { 1516 // Make sure the receiver is loaded into a register. 1517 assert(method->size_of_parameters() > 0, "oob"); 1518 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1519 VMReg r = regs[0].first(); 1520 assert(r->is_valid(), "bad receiver arg"); 1521 if (r->is_stack()) { 1522 // Porting note: This assumes that compiled calling conventions always 1523 // pass the receiver oop in a register. If this is not true on some 1524 // platform, pick a temp and load the receiver from stack. 1525 fatal("receiver always in a register"); 1526 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point? 1527 __ ld(receiver_reg, reg2offset(r), R1_SP); 1528 } else { 1529 // no data motion is needed 1530 receiver_reg = r->as_Register(); 1531 } 1532 } 1533 1534 // Figure out which address we are really jumping to: 1535 MethodHandles::generate_method_handle_dispatch(masm, iid, 1536 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1537 } 1538 1539 //---------------------------- continuation_enter_setup --------------------------- 1540 // 1541 // Frame setup. 1542 // 1543 // Arguments: 1544 // None. 1545 // 1546 // Results: 1547 // R1_SP: pointer to blank ContinuationEntry in the pushed frame. 1548 // 1549 // Kills: 1550 // R0, R20 1551 // 1552 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& framesize_words) { 1553 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, ""); 1554 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1555 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1556 1557 const int frame_size_in_bytes = (int)ContinuationEntry::size(); 1558 assert(is_aligned(frame_size_in_bytes, frame::alignment_in_bytes), "alignment error"); 1559 1560 framesize_words = frame_size_in_bytes / wordSize; 1561 1562 DEBUG_ONLY(__ block_comment("setup {")); 1563 // Save return pc and push entry frame 1564 const Register return_pc = R20; 1565 __ mflr(return_pc); 1566 __ std(return_pc, _abi0(lr), R1_SP); // SP->lr = return_pc 1567 __ push_frame(frame_size_in_bytes , R0); // SP -= frame_size_in_bytes 1568 1569 OopMap* map = new OopMap((int)frame_size_in_bytes / VMRegImpl::stack_slot_size, 0 /* arg_slots*/); 1570 1571 __ ld_ptr(R0, JavaThread::cont_entry_offset(), R16_thread); 1572 __ st_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread); 1573 __ st_ptr(R0, ContinuationEntry::parent_offset(), R1_SP); 1574 DEBUG_ONLY(__ block_comment("} setup")); 1575 1576 return map; 1577 } 1578 1579 //---------------------------- fill_continuation_entry --------------------------- 1580 // 1581 // Initialize the new ContinuationEntry. 1582 // 1583 // Arguments: 1584 // R1_SP: pointer to blank Continuation entry 1585 // reg_cont_obj: pointer to the continuation 1586 // reg_flags: flags 1587 // 1588 // Results: 1589 // R1_SP: pointer to filled out ContinuationEntry 1590 // 1591 // Kills: 1592 // R8_ARG6, R9_ARG7, R10_ARG8 1593 // 1594 static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, Register reg_flags) { 1595 assert_different_registers(reg_cont_obj, reg_flags); 1596 Register zero = R8_ARG6; 1597 Register tmp2 = R9_ARG7; 1598 Register tmp3 = R10_ARG8; 1599 1600 DEBUG_ONLY(__ block_comment("fill {")); 1601 #ifdef ASSERT 1602 __ load_const_optimized(tmp2, ContinuationEntry::cookie_value()); 1603 __ stw(tmp2, in_bytes(ContinuationEntry::cookie_offset()), R1_SP); 1604 #endif //ASSERT 1605 1606 __ li(zero, 0); 1607 __ st_ptr(reg_cont_obj, ContinuationEntry::cont_offset(), R1_SP); 1608 __ stw(reg_flags, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1609 __ st_ptr(zero, ContinuationEntry::chunk_offset(), R1_SP); 1610 __ stw(zero, in_bytes(ContinuationEntry::argsize_offset()), R1_SP); 1611 __ stw(zero, in_bytes(ContinuationEntry::pin_count_offset()), R1_SP); 1612 1613 __ ld_ptr(tmp2, JavaThread::cont_fastpath_offset(), R16_thread); 1614 __ ld(tmp3, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1615 __ st_ptr(tmp2, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP); 1616 __ std(tmp3, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP); 1617 1618 __ st_ptr(zero, JavaThread::cont_fastpath_offset(), R16_thread); 1619 __ std(zero, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1620 DEBUG_ONLY(__ block_comment("} fill")); 1621 } 1622 1623 //---------------------------- continuation_enter_cleanup --------------------------- 1624 // 1625 // Copy corresponding attributes from the top ContinuationEntry to the JavaThread 1626 // before deleting it. 1627 // 1628 // Arguments: 1629 // R1_SP: pointer to the ContinuationEntry 1630 // 1631 // Results: 1632 // None. 1633 // 1634 // Kills: 1635 // R8_ARG6, R9_ARG7, R10_ARG8, R15_esp 1636 // 1637 static void continuation_enter_cleanup(MacroAssembler* masm) { 1638 Register tmp1 = R8_ARG6; 1639 Register tmp2 = R9_ARG7; 1640 Register tmp3 = R10_ARG8; 1641 1642 #ifdef ASSERT 1643 __ block_comment("clean {"); 1644 __ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread); 1645 __ cmpd(CR0, R1_SP, tmp1); 1646 __ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP"); 1647 #endif 1648 1649 __ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP); 1650 __ st_ptr(tmp1, JavaThread::cont_fastpath_offset(), R16_thread); 1651 1652 if (CheckJNICalls) { 1653 // Check if this is a virtual thread continuation 1654 Label L_skip_vthread_code; 1655 __ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1656 __ cmpwi(CR0, R0, 0); 1657 __ beq(CR0, L_skip_vthread_code); 1658 1659 // If the held monitor count is > 0 and this vthread is terminating then 1660 // it failed to release a JNI monitor. So we issue the same log message 1661 // that JavaThread::exit does. 1662 __ ld(R0, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1663 __ cmpdi(CR0, R0, 0); 1664 __ beq(CR0, L_skip_vthread_code); 1665 1666 // Save return value potentially containing the exception oop 1667 Register ex_oop = R15_esp; // nonvolatile register 1668 __ mr(ex_oop, R3_RET); 1669 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held)); 1670 // Restore potental return value 1671 __ mr(R3_RET, ex_oop); 1672 1673 // For vthreads we have to explicitly zero the JNI monitor count of the carrier 1674 // on termination. The held count is implicitly zeroed below when we restore from 1675 // the parent held count (which has to be zero). 1676 __ li(tmp1, 0); 1677 __ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1678 1679 __ bind(L_skip_vthread_code); 1680 } 1681 #ifdef ASSERT 1682 else { 1683 // Check if this is a virtual thread continuation 1684 Label L_skip_vthread_code; 1685 __ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1686 __ cmpwi(CR0, R0, 0); 1687 __ beq(CR0, L_skip_vthread_code); 1688 1689 // See comment just above. If not checking JNI calls the JNI count is only 1690 // needed for assertion checking. 1691 __ li(tmp1, 0); 1692 __ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1693 1694 __ bind(L_skip_vthread_code); 1695 } 1696 #endif 1697 1698 __ ld(tmp2, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP); 1699 __ ld_ptr(tmp3, ContinuationEntry::parent_offset(), R1_SP); 1700 __ std(tmp2, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1701 __ st_ptr(tmp3, JavaThread::cont_entry_offset(), R16_thread); 1702 DEBUG_ONLY(__ block_comment("} clean")); 1703 } 1704 1705 static void check_continuation_enter_argument(VMReg actual_vmreg, 1706 Register expected_reg, 1707 const char* name) { 1708 assert(!actual_vmreg->is_stack(), "%s cannot be on stack", name); 1709 assert(actual_vmreg->as_Register() == expected_reg, 1710 "%s is in unexpected register: %s instead of %s", 1711 name, actual_vmreg->as_Register()->name(), expected_reg->name()); 1712 } 1713 1714 static void gen_continuation_enter(MacroAssembler* masm, 1715 const VMRegPair* regs, 1716 int& exception_offset, 1717 OopMapSet* oop_maps, 1718 int& frame_complete, 1719 int& framesize_words, 1720 int& interpreted_entry_offset, 1721 int& compiled_entry_offset) { 1722 1723 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread) 1724 int pos_cont_obj = 0; 1725 int pos_is_cont = 1; 1726 int pos_is_virtual = 2; 1727 1728 // The platform-specific calling convention may present the arguments in various registers. 1729 // To simplify the rest of the code, we expect the arguments to reside at these known 1730 // registers, and we additionally check the placement here in case calling convention ever 1731 // changes. 1732 Register reg_cont_obj = R3_ARG1; 1733 Register reg_is_cont = R4_ARG2; 1734 Register reg_is_virtual = R5_ARG3; 1735 1736 check_continuation_enter_argument(regs[pos_cont_obj].first(), reg_cont_obj, "Continuation object"); 1737 check_continuation_enter_argument(regs[pos_is_cont].first(), reg_is_cont, "isContinue"); 1738 check_continuation_enter_argument(regs[pos_is_virtual].first(), reg_is_virtual, "isVirtualThread"); 1739 1740 address resolve_static_call = SharedRuntime::get_resolve_static_call_stub(); 1741 1742 address start = __ pc(); 1743 1744 Label L_thaw, L_exit; 1745 1746 // i2i entry used at interp_only_mode only 1747 interpreted_entry_offset = __ pc() - start; 1748 { 1749 #ifdef ASSERT 1750 Label is_interp_only; 1751 __ lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread); 1752 __ cmpwi(CR0, R0, 0); 1753 __ bne(CR0, is_interp_only); 1754 __ stop("enterSpecial interpreter entry called when not in interp_only_mode"); 1755 __ bind(is_interp_only); 1756 #endif 1757 1758 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter) 1759 __ ld(reg_cont_obj, Interpreter::stackElementSize*3, R15_esp); 1760 __ lwz(reg_is_cont, Interpreter::stackElementSize*2, R15_esp); 1761 __ lwz(reg_is_virtual, Interpreter::stackElementSize*1, R15_esp); 1762 1763 __ push_cont_fastpath(); 1764 1765 OopMap* map = continuation_enter_setup(masm, framesize_words); 1766 1767 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe, 1768 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway. 1769 1770 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual); 1771 1772 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue) 1773 __ cmpwi(CR0, reg_is_cont, 0); 1774 __ bne(CR0, L_thaw); 1775 1776 // --- call Continuation.enter(Continuation c, boolean isContinue) 1777 1778 // Emit compiled static call. The call will be always resolved to the c2i 1779 // entry of Continuation.enter(Continuation c, boolean isContinue). 1780 // There are special cases in SharedRuntime::resolve_static_call_C() and 1781 // SharedRuntime::resolve_sub_helper_internal() to achieve this 1782 // See also corresponding call below. 1783 address c2i_call_pc = __ pc(); 1784 int start_offset = __ offset(); 1785 // Put the entry point as a constant into the constant pool. 1786 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none); 1787 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 1788 guarantee(entry_point_toc_addr != nullptr, "const section overflow"); 1789 1790 // Emit the trampoline stub which will be related to the branch-and-link below. 1791 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset); 1792 guarantee(stub != nullptr, "no space for trampoline stub"); 1793 1794 __ relocate(relocInfo::static_call_type); 1795 // Note: At this point we do not have the address of the trampoline 1796 // stub, and the entry point might be too far away for bl, so __ pc() 1797 // serves as dummy and the bl will be patched later. 1798 __ bl(__ pc()); 1799 oop_maps->add_gc_map(__ pc() - start, map); 1800 __ post_call_nop(); 1801 1802 __ b(L_exit); 1803 1804 // static stub for the call above 1805 stub = CompiledDirectCall::emit_to_interp_stub(masm, c2i_call_pc); 1806 guarantee(stub != nullptr, "no space for static stub"); 1807 } 1808 1809 // compiled entry 1810 __ align(CodeEntryAlignment); 1811 compiled_entry_offset = __ pc() - start; 1812 1813 OopMap* map = continuation_enter_setup(masm, framesize_words); 1814 1815 // Frame is now completed as far as size and linkage. 1816 frame_complete =__ pc() - start; 1817 1818 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual); 1819 1820 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue) 1821 __ cmpwi(CR0, reg_is_cont, 0); 1822 __ bne(CR0, L_thaw); 1823 1824 // --- call Continuation.enter(Continuation c, boolean isContinue) 1825 1826 // Emit compiled static call 1827 // The call needs to be resolved. There's a special case for this in 1828 // SharedRuntime::find_callee_info_helper() which calls 1829 // LinkResolver::resolve_continuation_enter() which resolves the call to 1830 // Continuation.enter(Continuation c, boolean isContinue). 1831 address call_pc = __ pc(); 1832 int start_offset = __ offset(); 1833 // Put the entry point as a constant into the constant pool. 1834 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none); 1835 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 1836 guarantee(entry_point_toc_addr != nullptr, "const section overflow"); 1837 1838 // Emit the trampoline stub which will be related to the branch-and-link below. 1839 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset); 1840 guarantee(stub != nullptr, "no space for trampoline stub"); 1841 1842 __ relocate(relocInfo::static_call_type); 1843 // Note: At this point we do not have the address of the trampoline 1844 // stub, and the entry point might be too far away for bl, so __ pc() 1845 // serves as dummy and the bl will be patched later. 1846 __ bl(__ pc()); 1847 oop_maps->add_gc_map(__ pc() - start, map); 1848 __ post_call_nop(); 1849 1850 __ b(L_exit); 1851 1852 // --- Thawing path 1853 1854 __ bind(L_thaw); 1855 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start; 1856 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(StubRoutines::cont_thaw())); 1857 __ mtctr(R0); 1858 __ bctrl(); 1859 oop_maps->add_gc_map(__ pc() - start, map->deep_copy()); 1860 ContinuationEntry::_return_pc_offset = __ pc() - start; 1861 __ post_call_nop(); 1862 1863 // --- Normal exit (resolve/thawing) 1864 1865 __ bind(L_exit); 1866 ContinuationEntry::_cleanup_offset = __ pc() - start; 1867 continuation_enter_cleanup(masm); 1868 1869 // Pop frame and return 1870 DEBUG_ONLY(__ ld_ptr(R0, 0, R1_SP)); 1871 __ addi(R1_SP, R1_SP, framesize_words*wordSize); 1872 DEBUG_ONLY(__ cmpd(CR0, R0, R1_SP)); 1873 __ asm_assert_eq(FILE_AND_LINE ": inconsistent frame size"); 1874 __ ld(R0, _abi0(lr), R1_SP); // Return pc 1875 __ mtlr(R0); 1876 __ blr(); 1877 1878 // --- Exception handling path 1879 1880 exception_offset = __ pc() - start; 1881 1882 continuation_enter_cleanup(masm); 1883 Register ex_pc = R17_tos; // nonvolatile register 1884 Register ex_oop = R15_esp; // nonvolatile register 1885 __ ld(ex_pc, _abi0(callers_sp), R1_SP); // Load caller's return pc 1886 __ ld(ex_pc, _abi0(lr), ex_pc); 1887 __ mr(ex_oop, R3_RET); // save return value containing the exception oop 1888 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, ex_pc); 1889 __ mtlr(R3_RET); // the exception handler 1890 __ ld(R1_SP, _abi0(callers_sp), R1_SP); // remove enterSpecial frame 1891 1892 // Continue at exception handler 1893 // See OptoRuntime::generate_exception_blob for register arguments 1894 __ mr(R3_ARG1, ex_oop); // pass exception oop 1895 __ mr(R4_ARG2, ex_pc); // pass exception pc 1896 __ blr(); 1897 1898 // static stub for the call above 1899 stub = CompiledDirectCall::emit_to_interp_stub(masm, call_pc); 1900 guarantee(stub != nullptr, "no space for static stub"); 1901 } 1902 1903 static void gen_continuation_yield(MacroAssembler* masm, 1904 const VMRegPair* regs, 1905 OopMapSet* oop_maps, 1906 int& frame_complete, 1907 int& framesize_words, 1908 int& compiled_entry_offset) { 1909 Register tmp = R10_ARG8; 1910 1911 const int framesize_bytes = (int)align_up((int)frame::native_abi_reg_args_size, frame::alignment_in_bytes); 1912 framesize_words = framesize_bytes / wordSize; 1913 1914 address start = __ pc(); 1915 compiled_entry_offset = __ pc() - start; 1916 1917 // Save return pc and push entry frame 1918 __ mflr(tmp); 1919 __ std(tmp, _abi0(lr), R1_SP); // SP->lr = return_pc 1920 __ push_frame(framesize_bytes , R0); // SP -= frame_size_in_bytes 1921 1922 DEBUG_ONLY(__ block_comment("Frame Complete")); 1923 frame_complete = __ pc() - start; 1924 address last_java_pc = __ pc(); 1925 1926 // This nop must be exactly at the PC we push into the frame info. 1927 // We use this nop for fast CodeBlob lookup, associate the OopMap 1928 // with it right away. 1929 __ post_call_nop(); 1930 OopMap* map = new OopMap(framesize_bytes / VMRegImpl::stack_slot_size, 1); 1931 oop_maps->add_gc_map(last_java_pc - start, map); 1932 1933 __ calculate_address_from_global_toc(tmp, last_java_pc); // will be relocated 1934 __ set_last_Java_frame(R1_SP, tmp); 1935 __ call_VM_leaf(Continuation::freeze_entry(), R16_thread, R1_SP); 1936 __ reset_last_Java_frame(); 1937 1938 Label L_pinned; 1939 1940 __ cmpwi(CR0, R3_RET, 0); 1941 __ bne(CR0, L_pinned); 1942 1943 // yield succeeded 1944 1945 // Pop frames of continuation including this stub's frame 1946 __ ld_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread); 1947 // The frame pushed by gen_continuation_enter is on top now again 1948 continuation_enter_cleanup(masm); 1949 1950 // Pop frame and return 1951 Label L_return; 1952 __ bind(L_return); 1953 __ pop_frame(); 1954 __ ld(R0, _abi0(lr), R1_SP); // Return pc 1955 __ mtlr(R0); 1956 __ blr(); 1957 1958 // yield failed - continuation is pinned 1959 1960 __ bind(L_pinned); 1961 1962 // handle pending exception thrown by freeze 1963 __ ld(tmp, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 1964 __ cmpdi(CR0, tmp, 0); 1965 __ beq(CR0, L_return); // return if no exception is pending 1966 __ pop_frame(); 1967 __ ld(R0, _abi0(lr), R1_SP); // Return pc 1968 __ mtlr(R0); 1969 __ load_const_optimized(tmp, StubRoutines::forward_exception_entry(), R0); 1970 __ mtctr(tmp); 1971 __ bctr(); 1972 } 1973 1974 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) { 1975 ::continuation_enter_cleanup(masm); 1976 } 1977 1978 // --------------------------------------------------------------------------- 1979 // Generate a native wrapper for a given method. The method takes arguments 1980 // in the Java compiled code convention, marshals them to the native 1981 // convention (handlizes oops, etc), transitions to native, makes the call, 1982 // returns to java state (possibly blocking), unhandlizes any result and 1983 // returns. 1984 // 1985 // Critical native functions are a shorthand for the use of 1986 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1987 // functions. The wrapper is expected to unpack the arguments before 1988 // passing them to the callee. Critical native functions leave the state _in_Java, 1989 // since they cannot stop for GC. 1990 // Some other parts of JNI setup are skipped like the tear down of the JNI handle 1991 // block and the check for pending exceptions it's impossible for them 1992 // to be thrown. 1993 // 1994 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, 1995 const methodHandle& method, 1996 int compile_id, 1997 BasicType *in_sig_bt, 1998 VMRegPair *in_regs, 1999 BasicType ret_type) { 2000 if (method->is_continuation_native_intrinsic()) { 2001 int exception_offset = -1; 2002 OopMapSet* oop_maps = new OopMapSet(); 2003 int frame_complete = -1; 2004 int stack_slots = -1; 2005 int interpreted_entry_offset = -1; 2006 int vep_offset = -1; 2007 if (method->is_continuation_enter_intrinsic()) { 2008 gen_continuation_enter(masm, 2009 in_regs, 2010 exception_offset, 2011 oop_maps, 2012 frame_complete, 2013 stack_slots, 2014 interpreted_entry_offset, 2015 vep_offset); 2016 } else if (method->is_continuation_yield_intrinsic()) { 2017 gen_continuation_yield(masm, 2018 in_regs, 2019 oop_maps, 2020 frame_complete, 2021 stack_slots, 2022 vep_offset); 2023 } else { 2024 guarantee(false, "Unknown Continuation native intrinsic"); 2025 } 2026 2027 #ifdef ASSERT 2028 if (method->is_continuation_enter_intrinsic()) { 2029 assert(interpreted_entry_offset != -1, "Must be set"); 2030 assert(exception_offset != -1, "Must be set"); 2031 } else { 2032 assert(interpreted_entry_offset == -1, "Must be unset"); 2033 assert(exception_offset == -1, "Must be unset"); 2034 } 2035 assert(frame_complete != -1, "Must be set"); 2036 assert(stack_slots != -1, "Must be set"); 2037 assert(vep_offset != -1, "Must be set"); 2038 #endif 2039 2040 __ flush(); 2041 nmethod* nm = nmethod::new_native_nmethod(method, 2042 compile_id, 2043 masm->code(), 2044 vep_offset, 2045 frame_complete, 2046 stack_slots, 2047 in_ByteSize(-1), 2048 in_ByteSize(-1), 2049 oop_maps, 2050 exception_offset); 2051 if (nm == nullptr) return nm; 2052 if (method->is_continuation_enter_intrinsic()) { 2053 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset); 2054 } else if (method->is_continuation_yield_intrinsic()) { 2055 _cont_doYield_stub = nm; 2056 } 2057 return nm; 2058 } 2059 2060 if (method->is_method_handle_intrinsic()) { 2061 vmIntrinsics::ID iid = method->intrinsic_id(); 2062 intptr_t start = (intptr_t)__ pc(); 2063 int vep_offset = ((intptr_t)__ pc()) - start; 2064 gen_special_dispatch(masm, 2065 method, 2066 in_sig_bt, 2067 in_regs); 2068 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 2069 __ flush(); 2070 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 2071 return nmethod::new_native_nmethod(method, 2072 compile_id, 2073 masm->code(), 2074 vep_offset, 2075 frame_complete, 2076 stack_slots / VMRegImpl::slots_per_word, 2077 in_ByteSize(-1), 2078 in_ByteSize(-1), 2079 (OopMapSet*)nullptr); 2080 } 2081 2082 address native_func = method->native_function(); 2083 assert(native_func != nullptr, "must have function"); 2084 2085 // First, create signature for outgoing C call 2086 // -------------------------------------------------------------------------- 2087 2088 int total_in_args = method->size_of_parameters(); 2089 // We have received a description of where all the java args are located 2090 // on entry to the wrapper. We need to convert these args to where 2091 // the jni function will expect them. To figure out where they go 2092 // we convert the java signature to a C signature by inserting 2093 // the hidden arguments as arg[0] and possibly arg[1] (static method) 2094 2095 // Calculate the total number of C arguments and create arrays for the 2096 // signature and the outgoing registers. 2097 // On ppc64, we have two arrays for the outgoing registers, because 2098 // some floating-point arguments must be passed in registers _and_ 2099 // in stack locations. 2100 bool method_is_static = method->is_static(); 2101 int total_c_args = total_in_args + (method_is_static ? 2 : 1); 2102 2103 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 2104 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 2105 2106 // Create the signature for the C call: 2107 // 1) add the JNIEnv* 2108 // 2) add the class if the method is static 2109 // 3) copy the rest of the incoming signature (shifted by the number of 2110 // hidden arguments). 2111 2112 int argc = 0; 2113 out_sig_bt[argc++] = T_ADDRESS; 2114 if (method->is_static()) { 2115 out_sig_bt[argc++] = T_OBJECT; 2116 } 2117 2118 for (int i = 0; i < total_in_args ; i++ ) { 2119 out_sig_bt[argc++] = in_sig_bt[i]; 2120 } 2121 2122 2123 // Compute the wrapper's frame size. 2124 // -------------------------------------------------------------------------- 2125 2126 // Now figure out where the args must be stored and how much stack space 2127 // they require. 2128 // 2129 // Compute framesize for the wrapper. We need to handlize all oops in 2130 // incoming registers. 2131 // 2132 // Calculate the total number of stack slots we will need: 2133 // 1) abi requirements 2134 // 2) outgoing arguments 2135 // 3) space for inbound oop handle area 2136 // 4) space for handlizing a klass if static method 2137 // 5) space for a lock if synchronized method 2138 // 6) workspace for saving return values, int <-> float reg moves, etc. 2139 // 7) alignment 2140 // 2141 // Layout of the native wrapper frame: 2142 // (stack grows upwards, memory grows downwards) 2143 // 2144 // NW [ABI_REG_ARGS] <-- 1) R1_SP 2145 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset 2146 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset 2147 // klass <-- 4) R1_SP + klass_offset 2148 // lock <-- 5) R1_SP + lock_offset 2149 // [workspace] <-- 6) R1_SP + workspace_offset 2150 // [alignment] (optional) <-- 7) 2151 // caller [JIT_TOP_ABI_48] <-- r_callers_sp 2152 // 2153 // - *_slot_offset Indicates offset from SP in number of stack slots. 2154 // - *_offset Indicates offset from SP in bytes. 2155 2156 int stack_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args) + // 1+2) 2157 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention. 2158 2159 // Now the space for the inbound oop handle area. 2160 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word; 2161 2162 int oop_handle_slot_offset = stack_slots; 2163 stack_slots += total_save_slots; // 3) 2164 2165 int klass_slot_offset = 0; 2166 int klass_offset = -1; 2167 if (method_is_static) { // 4) 2168 klass_slot_offset = stack_slots; 2169 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2170 stack_slots += VMRegImpl::slots_per_word; 2171 } 2172 2173 int lock_slot_offset = 0; 2174 int lock_offset = -1; 2175 if (method->is_synchronized()) { // 5) 2176 lock_slot_offset = stack_slots; 2177 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size; 2178 stack_slots += VMRegImpl::slots_per_word; 2179 } 2180 2181 int workspace_slot_offset = stack_slots; // 6) 2182 stack_slots += 2; 2183 2184 // Now compute actual number of stack words we need. 2185 // Rounding to make stack properly aligned. 2186 stack_slots = align_up(stack_slots, // 7) 2187 frame::alignment_in_bytes / VMRegImpl::stack_slot_size); 2188 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size; 2189 2190 2191 // Now we can start generating code. 2192 // -------------------------------------------------------------------------- 2193 2194 intptr_t start_pc = (intptr_t)__ pc(); 2195 intptr_t vep_start_pc; 2196 intptr_t frame_done_pc; 2197 2198 Label handle_pending_exception; 2199 Label last_java_pc; 2200 2201 Register r_callers_sp = R21; 2202 Register r_temp_1 = R22; 2203 Register r_temp_2 = R23; 2204 Register r_temp_3 = R24; 2205 Register r_temp_4 = R25; 2206 Register r_temp_5 = R26; 2207 Register r_temp_6 = R27; 2208 Register r_last_java_pc = R28; 2209 2210 Register r_carg1_jnienv = noreg; 2211 Register r_carg2_classorobject = noreg; 2212 r_carg1_jnienv = out_regs[0].first()->as_Register(); 2213 r_carg2_classorobject = out_regs[1].first()->as_Register(); 2214 2215 2216 // Generate the Unverified Entry Point (UEP). 2217 // -------------------------------------------------------------------------- 2218 assert(start_pc == (intptr_t)__ pc(), "uep must be at start"); 2219 2220 // Check ic: object class == cached class? 2221 if (!method_is_static) { 2222 __ ic_check(4 /* end_alignment */); 2223 } 2224 2225 // Generate the Verified Entry Point (VEP). 2226 // -------------------------------------------------------------------------- 2227 vep_start_pc = (intptr_t)__ pc(); 2228 2229 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { 2230 Label L_skip_barrier; 2231 Register klass = r_temp_1; 2232 // Notify OOP recorder (don't need the relocation) 2233 AddressLiteral md = __ constant_metadata_address(method->method_holder()); 2234 __ load_const_optimized(klass, md.value(), R0); 2235 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 2236 2237 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 2238 __ mtctr(klass); 2239 __ bctr(); 2240 2241 __ bind(L_skip_barrier); 2242 } 2243 2244 __ save_LR(r_temp_1); 2245 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. 2246 __ mr(r_callers_sp, R1_SP); // Remember frame pointer. 2247 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame. 2248 2249 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2250 bs->nmethod_entry_barrier(masm, r_temp_1); 2251 2252 frame_done_pc = (intptr_t)__ pc(); 2253 2254 // Native nmethod wrappers never take possession of the oop arguments. 2255 // So the caller will gc the arguments. 2256 // The only thing we need an oopMap for is if the call is static. 2257 // 2258 // An OopMap for lock (and class if static), and one for the VM call itself. 2259 OopMapSet *oop_maps = new OopMapSet(); 2260 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2261 2262 // Move arguments from register/stack to register/stack. 2263 // -------------------------------------------------------------------------- 2264 // 2265 // We immediately shuffle the arguments so that for any vm call we have 2266 // to make from here on out (sync slow path, jvmti, etc.) we will have 2267 // captured the oops from our caller and have a valid oopMap for them. 2268 // 2269 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2270 // (derived from JavaThread* which is in R16_thread) and, if static, 2271 // the class mirror instead of a receiver. This pretty much guarantees that 2272 // register layout will not match. We ignore these extra arguments during 2273 // the shuffle. The shuffle is described by the two calling convention 2274 // vectors we have in our possession. We simply walk the java vector to 2275 // get the source locations and the c vector to get the destinations. 2276 2277 // Record sp-based slot for receiver on stack for non-static methods. 2278 int receiver_offset = -1; 2279 2280 // We move the arguments backward because the floating point registers 2281 // destination will always be to a register with a greater or equal 2282 // register number or the stack. 2283 // in is the index of the incoming Java arguments 2284 // out is the index of the outgoing C arguments 2285 2286 #ifdef ASSERT 2287 bool reg_destroyed[Register::number_of_registers]; 2288 bool freg_destroyed[FloatRegister::number_of_registers]; 2289 for (int r = 0 ; r < Register::number_of_registers ; r++) { 2290 reg_destroyed[r] = false; 2291 } 2292 for (int f = 0 ; f < FloatRegister::number_of_registers ; f++) { 2293 freg_destroyed[f] = false; 2294 } 2295 #endif // ASSERT 2296 2297 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) { 2298 2299 #ifdef ASSERT 2300 if (in_regs[in].first()->is_Register()) { 2301 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!"); 2302 } else if (in_regs[in].first()->is_FloatRegister()) { 2303 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!"); 2304 } 2305 if (out_regs[out].first()->is_Register()) { 2306 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true; 2307 } else if (out_regs[out].first()->is_FloatRegister()) { 2308 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true; 2309 } 2310 #endif // ASSERT 2311 2312 switch (in_sig_bt[in]) { 2313 case T_BOOLEAN: 2314 case T_CHAR: 2315 case T_BYTE: 2316 case T_SHORT: 2317 case T_INT: 2318 // Move int and do sign extension. 2319 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2320 break; 2321 case T_LONG: 2322 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2323 break; 2324 case T_ARRAY: 2325 case T_OBJECT: 2326 object_move(masm, stack_slots, 2327 oop_map, oop_handle_slot_offset, 2328 ((in == 0) && (!method_is_static)), &receiver_offset, 2329 in_regs[in], out_regs[out], 2330 r_callers_sp, r_temp_1, r_temp_2); 2331 break; 2332 case T_VOID: 2333 break; 2334 case T_FLOAT: 2335 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2336 break; 2337 case T_DOUBLE: 2338 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2339 break; 2340 case T_ADDRESS: 2341 fatal("found type (T_ADDRESS) in java args"); 2342 break; 2343 default: 2344 ShouldNotReachHere(); 2345 break; 2346 } 2347 } 2348 2349 // Pre-load a static method's oop into ARG2. 2350 // Used both by locking code and the normal JNI call code. 2351 if (method_is_static) { 2352 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), 2353 r_carg2_classorobject); 2354 2355 // Now handlize the static class mirror in carg2. It's known not-null. 2356 __ std(r_carg2_classorobject, klass_offset, R1_SP); 2357 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2358 __ addi(r_carg2_classorobject, R1_SP, klass_offset); 2359 } 2360 2361 // Get JNIEnv* which is first argument to native. 2362 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset())); 2363 2364 // NOTE: 2365 // 2366 // We have all of the arguments setup at this point. 2367 // We MUST NOT touch any outgoing regs from this point on. 2368 // So if we must call out we must push a new frame. 2369 2370 // The last java pc will also be used as resume pc if this is the wrapper for wait0. 2371 // For this purpose the precise location matters but not for oopmap lookup. 2372 __ calculate_address_from_global_toc(r_last_java_pc, last_java_pc, true, true, true, true); 2373 2374 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. 2375 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); 2376 2377 # if 0 2378 // DTrace method entry 2379 # endif 2380 2381 // Lock a synchronized method. 2382 // -------------------------------------------------------------------------- 2383 2384 if (method->is_synchronized()) { 2385 Register r_oop = r_temp_4; 2386 const Register r_box = r_temp_5; 2387 Label done, locked; 2388 2389 // Load the oop for the object or class. r_carg2_classorobject contains 2390 // either the handlized oop from the incoming arguments or the handlized 2391 // class mirror (if the method is static). 2392 __ ld(r_oop, 0, r_carg2_classorobject); 2393 2394 // Get the lock box slot's address. 2395 __ addi(r_box, R1_SP, lock_offset); 2396 2397 // Try fastpath for locking. 2398 if (LockingMode == LM_LIGHTWEIGHT) { 2399 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2400 Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg; 2401 __ compiler_fast_lock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg); 2402 } else { 2403 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2404 __ compiler_fast_lock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2405 } 2406 __ beq(CR0, locked); 2407 2408 // None of the above fast optimizations worked so we have to get into the 2409 // slow case of monitor enter. Inline a special case of call_VM that 2410 // disallows any pending_exception. 2411 2412 // Save argument registers and leave room for C-compatible ABI_REG_ARGS. 2413 int frame_size = frame::native_abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); 2414 __ mr(R11_scratch1, R1_SP); 2415 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs); 2416 2417 // Do the call. 2418 __ set_last_Java_frame(R11_scratch1, r_last_java_pc); 2419 assert(r_last_java_pc->is_nonvolatile(), "r_last_java_pc needs to be preserved accross complete_monitor_locking_C call"); 2420 // The following call will not be preempted. 2421 // push_cont_fastpath forces freeze slow path in case we try to preempt where we will pin the 2422 // vthread to the carrier (see FreezeBase::recurse_freeze_native_frame()). 2423 __ push_cont_fastpath(); 2424 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); 2425 __ pop_cont_fastpath(); 2426 __ reset_last_Java_frame(); 2427 2428 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs); 2429 2430 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2431 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C"); 2432 2433 __ bind(locked); 2434 } 2435 2436 __ set_last_Java_frame(R1_SP, r_last_java_pc); 2437 2438 // Publish thread state 2439 // -------------------------------------------------------------------------- 2440 2441 // Transition from _thread_in_Java to _thread_in_native. 2442 __ li(R0, _thread_in_native); 2443 __ release(); 2444 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2445 __ stw(R0, thread_(thread_state)); 2446 2447 2448 // The JNI call 2449 // -------------------------------------------------------------------------- 2450 __ call_c(native_func, relocInfo::runtime_call_type); 2451 2452 2453 // Now, we are back from the native code. 2454 2455 2456 // Unpack the native result. 2457 // -------------------------------------------------------------------------- 2458 2459 // For int-types, we do any needed sign-extension required. 2460 // Care must be taken that the return values (R3_RET and F1_RET) 2461 // will survive any VM calls for blocking or unlocking. 2462 // An OOP result (handle) is done specially in the slow-path code. 2463 2464 switch (ret_type) { 2465 case T_VOID: break; // Nothing to do! 2466 case T_FLOAT: break; // Got it where we want it (unless slow-path). 2467 case T_DOUBLE: break; // Got it where we want it (unless slow-path). 2468 case T_LONG: break; // Got it where we want it (unless slow-path). 2469 case T_OBJECT: break; // Really a handle. 2470 // Cannot de-handlize until after reclaiming jvm_lock. 2471 case T_ARRAY: break; 2472 2473 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1) 2474 __ normalize_bool(R3_RET); 2475 break; 2476 } 2477 case T_BYTE: { // sign extension 2478 __ extsb(R3_RET, R3_RET); 2479 break; 2480 } 2481 case T_CHAR: { // unsigned result 2482 __ andi(R3_RET, R3_RET, 0xffff); 2483 break; 2484 } 2485 case T_SHORT: { // sign extension 2486 __ extsh(R3_RET, R3_RET); 2487 break; 2488 } 2489 case T_INT: // nothing to do 2490 break; 2491 default: 2492 ShouldNotReachHere(); 2493 break; 2494 } 2495 2496 // Publish thread state 2497 // -------------------------------------------------------------------------- 2498 2499 // Switch thread to "native transition" state before reading the 2500 // synchronization state. This additional state is necessary because reading 2501 // and testing the synchronization state is not atomic w.r.t. GC, as this 2502 // scenario demonstrates: 2503 // - Java thread A, in _thread_in_native state, loads _not_synchronized 2504 // and is preempted. 2505 // - VM thread changes sync state to synchronizing and suspends threads 2506 // for GC. 2507 // - Thread A is resumed to finish this native method, but doesn't block 2508 // here since it didn't see any synchronization in progress, and escapes. 2509 2510 // Transition from _thread_in_native to _thread_in_native_trans. 2511 __ li(R0, _thread_in_native_trans); 2512 __ release(); 2513 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2514 __ stw(R0, thread_(thread_state)); 2515 2516 2517 // Must we block? 2518 // -------------------------------------------------------------------------- 2519 2520 // Block, if necessary, before resuming in _thread_in_Java state. 2521 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2522 { 2523 Label no_block, sync; 2524 2525 // Force this write out before the read below. 2526 if (!UseSystemMemoryBarrier) { 2527 __ fence(); 2528 } 2529 2530 Register sync_state_addr = r_temp_4; 2531 Register sync_state = r_temp_5; 2532 Register suspend_flags = r_temp_6; 2533 2534 // No synchronization in progress nor yet synchronized 2535 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 2536 __ safepoint_poll(sync, sync_state, true /* at_return */, false /* in_nmethod */); 2537 2538 // Not suspended. 2539 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 2540 __ lwz(suspend_flags, thread_(suspend_flags)); 2541 __ cmpwi(CR1, suspend_flags, 0); 2542 __ beq(CR1, no_block); 2543 2544 // Block. Save any potential method result value before the operation and 2545 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2546 // lets us share the oopMap we used when we went native rather than create 2547 // a distinct one for this pc. 2548 __ bind(sync); 2549 __ isync(); 2550 2551 address entry_point = 2552 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans); 2553 save_native_result(masm, ret_type, workspace_slot_offset); 2554 __ call_VM_leaf(entry_point, R16_thread); 2555 restore_native_result(masm, ret_type, workspace_slot_offset); 2556 2557 __ bind(no_block); 2558 2559 // Publish thread state. 2560 // -------------------------------------------------------------------------- 2561 2562 // Thread state is thread_in_native_trans. Any safepoint blocking has 2563 // already happened so we can now change state to _thread_in_Java. 2564 2565 // Transition from _thread_in_native_trans to _thread_in_Java. 2566 __ li(R0, _thread_in_Java); 2567 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 2568 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2569 __ stw(R0, thread_(thread_state)); 2570 2571 // Check preemption for Object.wait() 2572 if (LockingMode != LM_LEGACY && method->is_object_wait0()) { 2573 Label not_preempted; 2574 __ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); 2575 __ cmpdi(CR0, R0, 0); 2576 __ beq(CR0, not_preempted); 2577 __ mtlr(R0); 2578 __ li(R0, 0); 2579 __ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); 2580 __ blr(); 2581 __ bind(not_preempted); 2582 } 2583 __ bind(last_java_pc); 2584 // We use the same pc/oopMap repeatedly when we call out above. 2585 intptr_t oopmap_pc = (intptr_t) __ pc(); 2586 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); 2587 } 2588 2589 // Reguard any pages if necessary. 2590 // -------------------------------------------------------------------------- 2591 2592 Label no_reguard; 2593 __ lwz(r_temp_1, thread_(stack_guard_state)); 2594 __ cmpwi(CR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled); 2595 __ bne(CR0, no_reguard); 2596 2597 save_native_result(masm, ret_type, workspace_slot_offset); 2598 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2599 restore_native_result(masm, ret_type, workspace_slot_offset); 2600 2601 __ bind(no_reguard); 2602 2603 2604 // Unlock 2605 // -------------------------------------------------------------------------- 2606 2607 if (method->is_synchronized()) { 2608 const Register r_oop = r_temp_4; 2609 const Register r_box = r_temp_5; 2610 const Register r_exception = r_temp_6; 2611 Label done; 2612 2613 // Get oop and address of lock object box. 2614 if (method_is_static) { 2615 assert(klass_offset != -1, ""); 2616 __ ld(r_oop, klass_offset, R1_SP); 2617 } else { 2618 assert(receiver_offset != -1, ""); 2619 __ ld(r_oop, receiver_offset, R1_SP); 2620 } 2621 __ addi(r_box, R1_SP, lock_offset); 2622 2623 // Try fastpath for unlocking. 2624 if (LockingMode == LM_LIGHTWEIGHT) { 2625 __ compiler_fast_unlock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2626 } else { 2627 __ compiler_fast_unlock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2628 } 2629 __ beq(CR0, done); 2630 2631 // Save and restore any potential method result value around the unlocking operation. 2632 save_native_result(masm, ret_type, workspace_slot_offset); 2633 2634 // Must save pending exception around the slow-path VM call. Since it's a 2635 // leaf call, the pending exception (if any) can be kept in a register. 2636 __ ld(r_exception, thread_(pending_exception)); 2637 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile"); 2638 __ li(R0, 0); 2639 __ std(R0, thread_(pending_exception)); 2640 2641 // Slow case of monitor enter. 2642 // Inline a special case of call_VM that disallows any pending_exception. 2643 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread). 2644 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread); 2645 2646 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2647 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C"); 2648 2649 restore_native_result(masm, ret_type, workspace_slot_offset); 2650 2651 // Check_forward_pending_exception jump to forward_exception if any pending 2652 // exception is set. The forward_exception routine expects to see the 2653 // exception in pending_exception and not in a register. Kind of clumsy, 2654 // since all folks who branch to forward_exception must have tested 2655 // pending_exception first and hence have it in a register already. 2656 __ std(r_exception, thread_(pending_exception)); 2657 2658 __ bind(done); 2659 } 2660 2661 # if 0 2662 // DTrace method exit 2663 # endif 2664 2665 // Clear "last Java frame" SP and PC. 2666 // -------------------------------------------------------------------------- 2667 2668 // Last java frame won't be set if we're resuming after preemption 2669 bool maybe_preempted = LockingMode != LM_LEGACY && method->is_object_wait0(); 2670 __ reset_last_Java_frame(!maybe_preempted /* check_last_java_sp */); 2671 2672 // Unbox oop result, e.g. JNIHandles::resolve value. 2673 // -------------------------------------------------------------------------- 2674 2675 if (is_reference_type(ret_type)) { 2676 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, MacroAssembler::PRESERVATION_NONE); 2677 } 2678 2679 if (CheckJNICalls) { 2680 // clear_pending_jni_exception_check 2681 __ load_const_optimized(R0, 0L); 2682 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 2683 } 2684 2685 // Reset handle block. 2686 // -------------------------------------------------------------------------- 2687 __ ld(r_temp_1, thread_(active_handles)); 2688 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 2689 __ li(r_temp_2, 0); 2690 __ stw(r_temp_2, in_bytes(JNIHandleBlock::top_offset()), r_temp_1); 2691 2692 2693 // Check for pending exceptions. 2694 // -------------------------------------------------------------------------- 2695 __ ld(r_temp_2, thread_(pending_exception)); 2696 __ cmpdi(CR0, r_temp_2, 0); 2697 __ bne(CR0, handle_pending_exception); 2698 2699 // Return 2700 // -------------------------------------------------------------------------- 2701 2702 __ pop_frame(); 2703 __ restore_LR(R11); 2704 __ blr(); 2705 2706 2707 // Handler for pending exceptions (out-of-line). 2708 // -------------------------------------------------------------------------- 2709 // Since this is a native call, we know the proper exception handler 2710 // is the empty function. We just pop this frame and then jump to 2711 // forward_exception_entry. 2712 __ bind(handle_pending_exception); 2713 2714 __ pop_frame(); 2715 __ restore_LR(R11); 2716 __ b64_patchable((address)StubRoutines::forward_exception_entry(), 2717 relocInfo::runtime_call_type); 2718 2719 // Done. 2720 // -------------------------------------------------------------------------- 2721 2722 __ flush(); 2723 2724 nmethod *nm = nmethod::new_native_nmethod(method, 2725 compile_id, 2726 masm->code(), 2727 vep_start_pc-start_pc, 2728 frame_done_pc-start_pc, 2729 stack_slots / VMRegImpl::slots_per_word, 2730 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2731 in_ByteSize(lock_offset), 2732 oop_maps); 2733 2734 return nm; 2735 } 2736 2737 // This function returns the adjust size (in number of words) to a c2i adapter 2738 // activation for use during deoptimization. 2739 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2740 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::frame_alignment_in_words); 2741 } 2742 2743 uint SharedRuntime::in_preserve_stack_slots() { 2744 return frame::jit_in_preserve_size / VMRegImpl::stack_slot_size; 2745 } 2746 2747 uint SharedRuntime::out_preserve_stack_slots() { 2748 #if defined(COMPILER1) || defined(COMPILER2) 2749 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size; 2750 #else 2751 return 0; 2752 #endif 2753 } 2754 2755 VMReg SharedRuntime::thread_register() { 2756 // On PPC virtual threads don't save the JavaThread* in their context (e.g. C1 stub frames). 2757 ShouldNotCallThis(); 2758 return nullptr; 2759 } 2760 2761 #if defined(COMPILER1) || defined(COMPILER2) 2762 // Frame generation for deopt and uncommon trap blobs. 2763 static void push_skeleton_frame(MacroAssembler* masm, bool deopt, 2764 /* Read */ 2765 Register unroll_block_reg, 2766 /* Update */ 2767 Register frame_sizes_reg, 2768 Register number_of_frames_reg, 2769 Register pcs_reg, 2770 /* Invalidate */ 2771 Register frame_size_reg, 2772 Register pc_reg) { 2773 2774 __ ld(pc_reg, 0, pcs_reg); 2775 __ ld(frame_size_reg, 0, frame_sizes_reg); 2776 __ std(pc_reg, _abi0(lr), R1_SP); 2777 __ push_frame(frame_size_reg, R0/*tmp*/); 2778 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP); 2779 __ addi(number_of_frames_reg, number_of_frames_reg, -1); 2780 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize); 2781 __ addi(pcs_reg, pcs_reg, wordSize); 2782 } 2783 2784 // Loop through the UnrollBlock info and create new frames. 2785 static void push_skeleton_frames(MacroAssembler* masm, bool deopt, 2786 /* read */ 2787 Register unroll_block_reg, 2788 /* invalidate */ 2789 Register frame_sizes_reg, 2790 Register number_of_frames_reg, 2791 Register pcs_reg, 2792 Register frame_size_reg, 2793 Register pc_reg) { 2794 Label loop; 2795 2796 // _number_of_frames is of type int (deoptimization.hpp) 2797 __ lwa(number_of_frames_reg, 2798 in_bytes(Deoptimization::UnrollBlock::number_of_frames_offset()), 2799 unroll_block_reg); 2800 __ ld(pcs_reg, 2801 in_bytes(Deoptimization::UnrollBlock::frame_pcs_offset()), 2802 unroll_block_reg); 2803 __ ld(frame_sizes_reg, 2804 in_bytes(Deoptimization::UnrollBlock::frame_sizes_offset()), 2805 unroll_block_reg); 2806 2807 // stack: (caller_of_deoptee, ...). 2808 2809 // At this point we either have an interpreter frame or a compiled 2810 // frame on top of stack. If it is a compiled frame we push a new c2i 2811 // adapter here 2812 2813 // Memorize top-frame stack-pointer. 2814 __ mr(frame_size_reg/*old_sp*/, R1_SP); 2815 2816 // Resize interpreter top frame OR C2I adapter. 2817 2818 // At this moment, the top frame (which is the caller of the deoptee) is 2819 // an interpreter frame or a newly pushed C2I adapter or an entry frame. 2820 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the 2821 // outgoing arguments. 2822 // 2823 // In order to push the interpreter frame for the deoptee, we need to 2824 // resize the top frame such that we are able to place the deoptee's 2825 // locals in the frame. 2826 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI 2827 // into a valid PARENT_IJAVA_FRAME_ABI. 2828 2829 __ lwa(R11_scratch1, 2830 in_bytes(Deoptimization::UnrollBlock::caller_adjustment_offset()), 2831 unroll_block_reg); 2832 __ neg(R11_scratch1, R11_scratch1); 2833 2834 // R11_scratch1 contains size of locals for frame resizing. 2835 // R12_scratch2 contains top frame's lr. 2836 2837 // Resize frame by complete frame size prevents TOC from being 2838 // overwritten by locals. A more stack space saving way would be 2839 // to copy the TOC to its location in the new abi. 2840 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size); 2841 2842 // now, resize the frame 2843 __ resize_frame(R11_scratch1, pc_reg/*tmp*/); 2844 2845 // In the case where we have resized a c2i frame above, the optional 2846 // alignment below the locals has size 32 (why?). 2847 __ std(R12_scratch2, _abi0(lr), R1_SP); 2848 2849 // Initialize initial_caller_sp. 2850 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP); 2851 2852 #ifdef ASSERT 2853 // Make sure that there is at least one entry in the array. 2854 __ cmpdi(CR0, number_of_frames_reg, 0); 2855 __ asm_assert_ne("array_size must be > 0"); 2856 #endif 2857 2858 // Now push the new interpreter frames. 2859 // 2860 __ bind(loop); 2861 // Allocate a new frame, fill in the pc. 2862 push_skeleton_frame(masm, deopt, 2863 unroll_block_reg, 2864 frame_sizes_reg, 2865 number_of_frames_reg, 2866 pcs_reg, 2867 frame_size_reg, 2868 pc_reg); 2869 __ cmpdi(CR0, number_of_frames_reg, 0); 2870 __ bne(CR0, loop); 2871 2872 // Get the return address pointing into the frame manager. 2873 __ ld(R0, 0, pcs_reg); 2874 // Store it in the top interpreter frame. 2875 __ std(R0, _abi0(lr), R1_SP); 2876 // Initialize frame_manager_lr of interpreter top frame. 2877 } 2878 #endif 2879 2880 void SharedRuntime::generate_deopt_blob() { 2881 // Allocate space for the code 2882 ResourceMark rm; 2883 // Setup code generation tools 2884 const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); 2885 CodeBuffer buffer(name, 2048, 1024); 2886 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2887 Label exec_mode_initialized; 2888 int frame_size_in_words; 2889 OopMap* map = nullptr; 2890 OopMapSet *oop_maps = new OopMapSet(); 2891 2892 // size of ABI112 plus spill slots for R3_RET and F1_RET. 2893 const int frame_size_in_bytes = frame::native_abi_reg_args_spill_size; 2894 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 2895 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. 2896 2897 const Register exec_mode_reg = R21_tmp1; 2898 2899 const address start = __ pc(); 2900 2901 #if defined(COMPILER1) || defined(COMPILER2) 2902 // -------------------------------------------------------------------------- 2903 // Prolog for non exception case! 2904 2905 // We have been called from the deopt handler of the deoptee. 2906 // 2907 // deoptee: 2908 // ... 2909 // call X 2910 // ... 2911 // deopt_handler: call_deopt_stub 2912 // cur. return pc --> ... 2913 // 2914 // So currently SR_LR points behind the call in the deopt handler. 2915 // We adjust it such that it points to the start of the deopt handler. 2916 // The return_pc has been stored in the frame of the deoptee and 2917 // will replace the address of the deopt_handler in the call 2918 // to Deoptimization::fetch_unroll_info below. 2919 // We can't grab a free register here, because all registers may 2920 // contain live values, so let the RegisterSaver do the adjustment 2921 // of the return pc. 2922 const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size; 2923 2924 // Push the "unpack frame" 2925 // Save everything in sight. 2926 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2927 &first_frame_size_in_bytes, 2928 /*generate_oop_map=*/ true, 2929 return_pc_adjustment_no_exception, 2930 RegisterSaver::return_pc_is_lr); 2931 assert(map != nullptr, "OopMap must have been created"); 2932 2933 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); 2934 // Save exec mode for unpack_frames. 2935 __ b(exec_mode_initialized); 2936 2937 // -------------------------------------------------------------------------- 2938 // Prolog for exception case 2939 2940 // An exception is pending. 2941 // We have been called with a return (interpreter) or a jump (exception blob). 2942 // 2943 // - R3_ARG1: exception oop 2944 // - R4_ARG2: exception pc 2945 2946 int exception_offset = __ pc() - start; 2947 2948 BLOCK_COMMENT("Prolog for exception case"); 2949 2950 // Store exception oop and pc in thread (location known to GC). 2951 // This is needed since the call to "fetch_unroll_info()" may safepoint. 2952 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2953 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2954 __ std(R4_ARG2, _abi0(lr), R1_SP); 2955 2956 // Vanilla deoptimization with an exception pending in exception_oop. 2957 int exception_in_tls_offset = __ pc() - start; 2958 2959 // Push the "unpack frame". 2960 // Save everything in sight. 2961 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2962 &first_frame_size_in_bytes, 2963 /*generate_oop_map=*/ false, 2964 /*return_pc_adjustment_exception=*/ 0, 2965 RegisterSaver::return_pc_is_pre_saved); 2966 2967 // Deopt during an exception. Save exec mode for unpack_frames. 2968 __ li(exec_mode_reg, Deoptimization::Unpack_exception); 2969 2970 // fall through 2971 2972 int reexecute_offset = 0; 2973 #ifdef COMPILER1 2974 __ b(exec_mode_initialized); 2975 2976 // Reexecute entry, similar to c2 uncommon trap 2977 reexecute_offset = __ pc() - start; 2978 2979 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2980 &first_frame_size_in_bytes, 2981 /*generate_oop_map=*/ false, 2982 /*return_pc_adjustment_reexecute=*/ 0, 2983 RegisterSaver::return_pc_is_pre_saved); 2984 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute); 2985 #endif 2986 2987 // -------------------------------------------------------------------------- 2988 __ BIND(exec_mode_initialized); 2989 2990 const Register unroll_block_reg = R22_tmp2; 2991 2992 // We need to set `last_Java_frame' because `fetch_unroll_info' will 2993 // call `last_Java_frame()'. The value of the pc in the frame is not 2994 // particularly important. It just needs to identify this blob. 2995 __ set_last_Java_frame(R1_SP, noreg); 2996 2997 // With EscapeAnalysis turned on, this call may safepoint! 2998 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg); 2999 address calls_return_pc = __ last_calls_return_pc(); 3000 // Set an oopmap for the call site that describes all our saved registers. 3001 oop_maps->add_gc_map(calls_return_pc - start, map); 3002 3003 __ reset_last_Java_frame(); 3004 // Save the return value. 3005 __ mr(unroll_block_reg, R3_RET); 3006 3007 // Restore only the result registers that have been saved 3008 // by save_volatile_registers(...). 3009 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); 3010 3011 // reload the exec mode from the UnrollBlock (it might have changed) 3012 __ lwz(exec_mode_reg, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg); 3013 // In excp_deopt_mode, restore and clear exception oop which we 3014 // stored in the thread during exception entry above. The exception 3015 // oop will be the return value of this stub. 3016 Label skip_restore_excp; 3017 __ cmpdi(CR0, exec_mode_reg, Deoptimization::Unpack_exception); 3018 __ bne(CR0, skip_restore_excp); 3019 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 3020 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 3021 __ li(R0, 0); 3022 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 3023 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 3024 __ BIND(skip_restore_excp); 3025 3026 __ pop_frame(); 3027 3028 // stack: (deoptee, optional i2c, caller of deoptee, ...). 3029 3030 // pop the deoptee's frame 3031 __ pop_frame(); 3032 3033 // stack: (caller_of_deoptee, ...). 3034 3035 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled. 3036 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info()) 3037 // and the frame is effectively not resized. 3038 Register caller_sp = R23_tmp3; 3039 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg); 3040 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5); 3041 3042 // Loop through the `UnrollBlock' info and create interpreter frames. 3043 push_skeleton_frames(masm, true/*deopt*/, 3044 unroll_block_reg, 3045 R23_tmp3, 3046 R24_tmp4, 3047 R25_tmp5, 3048 R26_tmp6, 3049 R27_tmp7); 3050 3051 // stack: (skeletal interpreter frame, ..., optional skeletal 3052 // interpreter frame, optional c2i, caller of deoptee, ...). 3053 3054 // push an `unpack_frame' taking care of float / int return values. 3055 __ push_frame(frame_size_in_bytes, R0/*tmp*/); 3056 3057 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3058 // skeletal interpreter frame, optional c2i, caller of deoptee, 3059 // ...). 3060 3061 // Spill live volatile registers since we'll do a call. 3062 __ std( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP); 3063 __ stfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP); 3064 3065 // Let the unpacker layout information in the skeletal frames just 3066 // allocated. 3067 __ calculate_address_from_global_toc(R3_RET, calls_return_pc, true, true, true, true); 3068 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET); 3069 // This is a call to a LEAF method, so no oop map is required. 3070 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3071 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/); 3072 __ reset_last_Java_frame(); 3073 3074 // Restore the volatiles saved above. 3075 __ ld( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP); 3076 __ lfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP); 3077 3078 // Pop the unpack frame. 3079 __ pop_frame(); 3080 __ restore_LR(R0); 3081 3082 // stack: (top interpreter frame, ..., optional interpreter frame, 3083 // optional c2i, caller of deoptee, ...). 3084 3085 // Initialize R14_state. 3086 __ restore_interpreter_state(R11_scratch1); 3087 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3088 3089 // Return to the interpreter entry point. 3090 __ blr(); 3091 __ flush(); 3092 #else // COMPILER2 3093 __ unimplemented("deopt blob needed only with compiler"); 3094 int exception_offset = __ pc() - start; 3095 #endif // COMPILER2 3096 3097 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 3098 reexecute_offset, first_frame_size_in_bytes / wordSize); 3099 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3100 } 3101 3102 #ifdef COMPILER2 3103 void OptoRuntime::generate_uncommon_trap_blob() { 3104 // Allocate space for the code. 3105 ResourceMark rm; 3106 // Setup code generation tools. 3107 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 3108 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 3109 address start = __ pc(); 3110 3111 Register unroll_block_reg = R21_tmp1; 3112 Register klass_index_reg = R22_tmp2; 3113 Register unc_trap_reg = R23_tmp3; 3114 Register r_return_pc = R27_tmp7; 3115 3116 OopMapSet* oop_maps = new OopMapSet(); 3117 int frame_size_in_bytes = frame::native_abi_reg_args_size; 3118 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 3119 3120 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 3121 3122 // Push a dummy `unpack_frame' and call 3123 // `Deoptimization::uncommon_trap' to pack the compiled frame into a 3124 // vframe array and return the `UnrollBlock' information. 3125 3126 // Save LR to compiled frame. 3127 __ save_LR(R11_scratch1); 3128 3129 // Push an "uncommon_trap" frame. 3130 __ push_frame_reg_args(0, R11_scratch1); 3131 3132 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). 3133 3134 // Set the `unpack_frame' as last_Java_frame. 3135 // `Deoptimization::uncommon_trap' expects it and considers its 3136 // sender frame as the deoptee frame. 3137 // Remember the offset of the instruction whose address will be 3138 // moved to R11_scratch1. 3139 address gc_map_pc = __ pc(); 3140 __ calculate_address_from_global_toc(r_return_pc, gc_map_pc, true, true, true, true); 3141 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc); 3142 3143 __ mr(klass_index_reg, R3); 3144 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap); 3145 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), 3146 R16_thread, klass_index_reg, R5_ARG3); 3147 3148 // Set an oopmap for the call site. 3149 oop_maps->add_gc_map(gc_map_pc - start, map); 3150 3151 __ reset_last_Java_frame(); 3152 3153 // Pop the `unpack frame'. 3154 __ pop_frame(); 3155 3156 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 3157 3158 // Save the return value. 3159 __ mr(unroll_block_reg, R3_RET); 3160 3161 // Pop the uncommon_trap frame. 3162 __ pop_frame(); 3163 3164 // stack: (caller_of_deoptee, ...). 3165 3166 #ifdef ASSERT 3167 __ lwz(R22_tmp2, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg); 3168 __ cmpdi(CR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap); 3169 __ asm_assert_eq("OptoRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap"); 3170 #endif 3171 3172 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled. 3173 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info()) 3174 // and the frame is effectively not resized. 3175 Register caller_sp = R23_tmp3; 3176 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg); 3177 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5); 3178 3179 // Allocate new interpreter frame(s) and possibly a c2i adapter 3180 // frame. 3181 push_skeleton_frames(masm, false/*deopt*/, 3182 unroll_block_reg, 3183 R22_tmp2, 3184 R23_tmp3, 3185 R24_tmp4, 3186 R25_tmp5, 3187 R26_tmp6); 3188 3189 // stack: (skeletal interpreter frame, ..., optional skeletal 3190 // interpreter frame, optional c2i, caller of deoptee, ...). 3191 3192 // Push a dummy `unpack_frame' taking care of float return values. 3193 // Call `Deoptimization::unpack_frames' to layout information in the 3194 // interpreter frames just created. 3195 3196 // Push a simple "unpack frame" here. 3197 __ push_frame_reg_args(0, R11_scratch1); 3198 3199 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3200 // skeletal interpreter frame, optional c2i, caller of deoptee, 3201 // ...). 3202 3203 // Set the "unpack_frame" as last_Java_frame. 3204 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc); 3205 3206 // Indicate it is the uncommon trap case. 3207 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap); 3208 // Let the unpacker layout information in the skeletal frames just 3209 // allocated. 3210 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3211 R16_thread, unc_trap_reg); 3212 3213 __ reset_last_Java_frame(); 3214 // Pop the `unpack frame'. 3215 __ pop_frame(); 3216 // Restore LR from top interpreter frame. 3217 __ restore_LR(R11_scratch1); 3218 3219 // stack: (top interpreter frame, ..., optional interpreter frame, 3220 // optional c2i, caller of deoptee, ...). 3221 3222 __ restore_interpreter_state(R11_scratch1); 3223 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3224 3225 // Return to the interpreter entry point. 3226 __ blr(); 3227 3228 masm->flush(); 3229 3230 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize); 3231 } 3232 #endif // COMPILER2 3233 3234 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. 3235 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { 3236 assert(StubRoutines::forward_exception_entry() != nullptr, 3237 "must be generated before"); 3238 assert(is_polling_page_id(id), "expected a polling page stub id"); 3239 3240 ResourceMark rm; 3241 OopMapSet *oop_maps = new OopMapSet(); 3242 OopMap* map; 3243 3244 // Allocate space for the code. Setup code generation tools. 3245 const char* name = SharedRuntime::stub_name(id); 3246 CodeBuffer buffer(name, 2048, 1024); 3247 MacroAssembler* masm = new MacroAssembler(&buffer); 3248 3249 address start = __ pc(); 3250 int frame_size_in_bytes = 0; 3251 3252 RegisterSaver::ReturnPCLocation return_pc_location; 3253 bool cause_return = (id == SharedStubId::polling_page_return_handler_id); 3254 if (cause_return) { 3255 // Nothing to do here. The frame has already been popped in MachEpilogNode. 3256 // Register LR already contains the return pc. 3257 return_pc_location = RegisterSaver::return_pc_is_pre_saved; 3258 } else { 3259 // Use thread()->saved_exception_pc() as return pc. 3260 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; 3261 } 3262 3263 bool save_vectors = (id == SharedStubId::polling_page_vectors_safepoint_handler_id); 3264 3265 // Save registers, fpu state, and flags. Set R31 = return pc. 3266 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3267 &frame_size_in_bytes, 3268 /*generate_oop_map=*/ true, 3269 /*return_pc_adjustment=*/0, 3270 return_pc_location, save_vectors); 3271 3272 // The following is basically a call_VM. However, we need the precise 3273 // address of the call in order to generate an oopmap. Hence, we do all the 3274 // work ourselves. 3275 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg); 3276 3277 // The return address must always be correct so that the frame constructor 3278 // never sees an invalid pc. 3279 3280 // Do the call 3281 __ call_VM_leaf(call_ptr, R16_thread); 3282 address calls_return_pc = __ last_calls_return_pc(); 3283 3284 // Set an oopmap for the call site. This oopmap will map all 3285 // oop-registers and debug-info registers as callee-saved. This 3286 // will allow deoptimization at this safepoint to find all possible 3287 // debug-info recordings, as well as let GC find all oops. 3288 oop_maps->add_gc_map(calls_return_pc - start, map); 3289 3290 Label noException; 3291 3292 // Clear the last Java frame. 3293 __ reset_last_Java_frame(); 3294 3295 BLOCK_COMMENT(" Check pending exception."); 3296 const Register pending_exception = R0; 3297 __ ld(pending_exception, thread_(pending_exception)); 3298 __ cmpdi(CR0, pending_exception, 0); 3299 __ beq(CR0, noException); 3300 3301 // Exception pending 3302 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3303 frame_size_in_bytes, 3304 /*restore_ctr=*/true, save_vectors); 3305 3306 BLOCK_COMMENT(" Jump to forward_exception_entry."); 3307 // Jump to forward_exception_entry, with the issuing PC in LR 3308 // so it looks like the original nmethod called forward_exception_entry. 3309 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3310 3311 // No exception case. 3312 __ BIND(noException); 3313 3314 if (!cause_return) { 3315 Label no_adjust; 3316 // If our stashed return pc was modified by the runtime we avoid touching it 3317 __ ld(R0, frame_size_in_bytes + _abi0(lr), R1_SP); 3318 __ cmpd(CR0, R0, R31); 3319 __ bne(CR0, no_adjust); 3320 3321 // Adjust return pc forward to step over the safepoint poll instruction 3322 __ addi(R31, R31, 4); 3323 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 3324 3325 __ bind(no_adjust); 3326 } 3327 3328 // Normal exit, restore registers and exit. 3329 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3330 frame_size_in_bytes, 3331 /*restore_ctr=*/true, save_vectors); 3332 3333 __ blr(); 3334 3335 // Make sure all code is generated 3336 masm->flush(); 3337 3338 // Fill-out other meta info 3339 // CodeBlob frame size is in words. 3340 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize); 3341 } 3342 3343 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss) 3344 // 3345 // Generate a stub that calls into the vm to find out the proper destination 3346 // of a java call. All the argument registers are live at this point 3347 // but since this is generic code we don't know what they are and the caller 3348 // must do any gc of the args. 3349 // 3350 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { 3351 assert(is_resolve_id(id), "expected a resolve stub id"); 3352 3353 // allocate space for the code 3354 ResourceMark rm; 3355 3356 const char* name = SharedRuntime::stub_name(id); 3357 CodeBuffer buffer(name, 1000, 512); 3358 MacroAssembler* masm = new MacroAssembler(&buffer); 3359 3360 int frame_size_in_bytes; 3361 3362 OopMapSet *oop_maps = new OopMapSet(); 3363 OopMap* map = nullptr; 3364 3365 address start = __ pc(); 3366 3367 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3368 &frame_size_in_bytes, 3369 /*generate_oop_map*/ true, 3370 /*return_pc_adjustment*/ 0, 3371 RegisterSaver::return_pc_is_lr); 3372 3373 // Use noreg as last_Java_pc, the return pc will be reconstructed 3374 // from the physical frame. 3375 __ set_last_Java_frame(/*sp*/R1_SP, noreg); 3376 3377 int frame_complete = __ offset(); 3378 3379 // Pass R19_method as 2nd (optional) argument, used by 3380 // counter_overflow_stub. 3381 __ call_VM_leaf(destination, R16_thread, R19_method); 3382 address calls_return_pc = __ last_calls_return_pc(); 3383 // Set an oopmap for the call site. 3384 // We need this not only for callee-saved registers, but also for volatile 3385 // registers that the compiler might be keeping live across a safepoint. 3386 // Create the oopmap for the call's return pc. 3387 oop_maps->add_gc_map(calls_return_pc - start, map); 3388 3389 // R3_RET contains the address we are going to jump to assuming no exception got installed. 3390 3391 // clear last_Java_sp 3392 __ reset_last_Java_frame(); 3393 3394 // Check for pending exceptions. 3395 BLOCK_COMMENT("Check for pending exceptions."); 3396 Label pending; 3397 __ ld(R11_scratch1, thread_(pending_exception)); 3398 __ cmpdi(CR0, R11_scratch1, 0); 3399 __ bne(CR0, pending); 3400 3401 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame. 3402 3403 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false); 3404 3405 // Get the returned method. 3406 __ get_vm_result_2(R19_method); 3407 3408 __ bctr(); 3409 3410 3411 // Pending exception after the safepoint. 3412 __ BIND(pending); 3413 3414 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true); 3415 3416 // exception pending => remove activation and forward to exception handler 3417 3418 __ li(R11_scratch1, 0); 3419 __ ld(R3_ARG1, thread_(pending_exception)); 3420 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread); 3421 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3422 3423 // ------------- 3424 // Make sure all code is generated. 3425 masm->flush(); 3426 3427 // return the blob 3428 // frame_size_words or bytes?? 3429 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, 3430 oop_maps, true); 3431 } 3432 3433 // Continuation point for throwing of implicit exceptions that are 3434 // not handled in the current activation. Fabricates an exception 3435 // oop and initiates normal exception dispatching in this 3436 // frame. Only callee-saved registers are preserved (through the 3437 // normal register window / RegisterMap handling). If the compiler 3438 // needs all registers to be preserved between the fault point and 3439 // the exception handler then it must assume responsibility for that 3440 // in AbstractCompiler::continuation_for_implicit_null_exception or 3441 // continuation_for_implicit_division_by_zero_exception. All other 3442 // implicit exceptions (e.g., NullPointerException or 3443 // AbstractMethodError on entry) are either at call sites or 3444 // otherwise assume that stack unwinding will be initiated, so 3445 // caller saved registers were assumed volatile in the compiler. 3446 // 3447 // Note that we generate only this stub into a RuntimeStub, because 3448 // it needs to be properly traversed and ignored during GC, so we 3449 // change the meaning of the "__" macro within this method. 3450 // 3451 // Note: the routine set_pc_not_at_call_for_caller in 3452 // SharedRuntime.cpp requires that this code be generated into a 3453 // RuntimeStub. 3454 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { 3455 assert(is_throw_id(id), "expected a throw stub id"); 3456 3457 const char* name = SharedRuntime::stub_name(id); 3458 3459 ResourceMark rm; 3460 const char* timer_msg = "SharedRuntime generate_throw_exception"; 3461 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); 3462 3463 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0); 3464 MacroAssembler* masm = new MacroAssembler(&code); 3465 3466 OopMapSet* oop_maps = new OopMapSet(); 3467 int frame_size_in_bytes = frame::native_abi_reg_args_size; 3468 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 3469 3470 address start = __ pc(); 3471 3472 __ save_LR(R11_scratch1); 3473 3474 // Push a frame. 3475 __ push_frame_reg_args(0, R11_scratch1); 3476 3477 address frame_complete_pc = __ pc(); 3478 3479 // Note that we always have a runtime stub frame on the top of 3480 // stack by this point. Remember the offset of the instruction 3481 // whose address will be moved to R11_scratch1. 3482 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 3483 3484 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 3485 3486 __ mr(R3_ARG1, R16_thread); 3487 __ call_c(runtime_entry); 3488 3489 // Set an oopmap for the call site. 3490 oop_maps->add_gc_map((int)(gc_map_pc - start), map); 3491 3492 __ reset_last_Java_frame(); 3493 3494 #ifdef ASSERT 3495 // Make sure that this code is only executed if there is a pending 3496 // exception. 3497 { 3498 Label L; 3499 __ ld(R0, 3500 in_bytes(Thread::pending_exception_offset()), 3501 R16_thread); 3502 __ cmpdi(CR0, R0, 0); 3503 __ bne(CR0, L); 3504 __ stop("SharedRuntime::throw_exception: no pending exception"); 3505 __ bind(L); 3506 } 3507 #endif 3508 3509 // Pop frame. 3510 __ pop_frame(); 3511 3512 __ restore_LR(R11_scratch1); 3513 3514 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry()); 3515 __ mtctr(R11_scratch1); 3516 __ bctr(); 3517 3518 // Create runtime stub with OopMap. 3519 RuntimeStub* stub = 3520 RuntimeStub::new_runtime_stub(name, &code, 3521 /*frame_complete=*/ (int)(frame_complete_pc - start), 3522 frame_size_in_bytes/wordSize, 3523 oop_maps, 3524 false); 3525 return stub; 3526 } 3527 3528 //------------------------------Montgomery multiplication------------------------ 3529 // 3530 3531 // Subtract 0:b from carry:a. Return carry. 3532 static unsigned long 3533 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 3534 long i = 0; 3535 unsigned long tmp, tmp2; 3536 __asm__ __volatile__ ( 3537 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA 3538 "mtctr %[len] \n" 3539 "0: \n" 3540 "ldx %[tmp], %[i], %[a] \n" 3541 "ldx %[tmp2], %[i], %[b] \n" 3542 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended 3543 "stdx %[tmp], %[i], %[a] \n" 3544 "addi %[i], %[i], 8 \n" 3545 "bdnz 0b \n" 3546 "addme %[tmp], %[carry] \n" // carry + CA - 1 3547 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2) 3548 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len) 3549 : "ctr", "xer", "memory" 3550 ); 3551 return tmp; 3552 } 3553 3554 // Multiply (unsigned) Long A by Long B, accumulating the double- 3555 // length result into the accumulator formed of T0, T1, and T2. 3556 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3557 unsigned long hi, lo; 3558 __asm__ __volatile__ ( 3559 "mulld %[lo], %[A], %[B] \n" 3560 "mulhdu %[hi], %[A], %[B] \n" 3561 "addc %[T0], %[T0], %[lo] \n" 3562 "adde %[T1], %[T1], %[hi] \n" 3563 "addze %[T2], %[T2] \n" 3564 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3565 : [A]"r"(A), [B]"r"(B) 3566 : "xer" 3567 ); 3568 } 3569 3570 // As above, but add twice the double-length result into the 3571 // accumulator. 3572 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3573 unsigned long hi, lo; 3574 __asm__ __volatile__ ( 3575 "mulld %[lo], %[A], %[B] \n" 3576 "mulhdu %[hi], %[A], %[B] \n" 3577 "addc %[T0], %[T0], %[lo] \n" 3578 "adde %[T1], %[T1], %[hi] \n" 3579 "addze %[T2], %[T2] \n" 3580 "addc %[T0], %[T0], %[lo] \n" 3581 "adde %[T1], %[T1], %[hi] \n" 3582 "addze %[T2], %[T2] \n" 3583 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3584 : [A]"r"(A), [B]"r"(B) 3585 : "xer" 3586 ); 3587 } 3588 3589 // Fast Montgomery multiplication. The derivation of the algorithm is 3590 // in "A Cryptographic Library for the Motorola DSP56000, 3591 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237". 3592 static void 3593 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 3594 unsigned long m[], unsigned long inv, int len) { 3595 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3596 int i; 3597 3598 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3599 3600 for (i = 0; i < len; i++) { 3601 int j; 3602 for (j = 0; j < i; j++) { 3603 MACC(a[j], b[i-j], t0, t1, t2); 3604 MACC(m[j], n[i-j], t0, t1, t2); 3605 } 3606 MACC(a[i], b[0], t0, t1, t2); 3607 m[i] = t0 * inv; 3608 MACC(m[i], n[0], t0, t1, t2); 3609 3610 assert(t0 == 0, "broken Montgomery multiply"); 3611 3612 t0 = t1; t1 = t2; t2 = 0; 3613 } 3614 3615 for (i = len; i < 2*len; i++) { 3616 int j; 3617 for (j = i-len+1; j < len; j++) { 3618 MACC(a[j], b[i-j], t0, t1, t2); 3619 MACC(m[j], n[i-j], t0, t1, t2); 3620 } 3621 m[i-len] = t0; 3622 t0 = t1; t1 = t2; t2 = 0; 3623 } 3624 3625 while (t0) { 3626 t0 = sub(m, n, t0, len); 3627 } 3628 } 3629 3630 // Fast Montgomery squaring. This uses asymptotically 25% fewer 3631 // multiplies so it should be up to 25% faster than Montgomery 3632 // multiplication. However, its loop control is more complex and it 3633 // may actually run slower on some machines. 3634 static void 3635 montgomery_square(unsigned long a[], unsigned long n[], 3636 unsigned long m[], unsigned long inv, int len) { 3637 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3638 int i; 3639 3640 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3641 3642 for (i = 0; i < len; i++) { 3643 int j; 3644 int end = (i+1)/2; 3645 for (j = 0; j < end; j++) { 3646 MACC2(a[j], a[i-j], t0, t1, t2); 3647 MACC(m[j], n[i-j], t0, t1, t2); 3648 } 3649 if ((i & 1) == 0) { 3650 MACC(a[j], a[j], t0, t1, t2); 3651 } 3652 for (; j < i; j++) { 3653 MACC(m[j], n[i-j], t0, t1, t2); 3654 } 3655 m[i] = t0 * inv; 3656 MACC(m[i], n[0], t0, t1, t2); 3657 3658 assert(t0 == 0, "broken Montgomery square"); 3659 3660 t0 = t1; t1 = t2; t2 = 0; 3661 } 3662 3663 for (i = len; i < 2*len; i++) { 3664 int start = i-len+1; 3665 int end = start + (len - start)/2; 3666 int j; 3667 for (j = start; j < end; j++) { 3668 MACC2(a[j], a[i-j], t0, t1, t2); 3669 MACC(m[j], n[i-j], t0, t1, t2); 3670 } 3671 if ((i & 1) == 0) { 3672 MACC(a[j], a[j], t0, t1, t2); 3673 } 3674 for (; j < len; j++) { 3675 MACC(m[j], n[i-j], t0, t1, t2); 3676 } 3677 m[i-len] = t0; 3678 t0 = t1; t1 = t2; t2 = 0; 3679 } 3680 3681 while (t0) { 3682 t0 = sub(m, n, t0, len); 3683 } 3684 } 3685 3686 // The threshold at which squaring is advantageous was determined 3687 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 3688 // Doesn't seem to be relevant for Power8 so we use the same value. 3689 #define MONTGOMERY_SQUARING_THRESHOLD 64 3690 3691 // Copy len longwords from s to d, word-swapping as we go. The 3692 // destination array is reversed. 3693 static void reverse_words(unsigned long *s, unsigned long *d, int len) { 3694 d += len; 3695 while(len-- > 0) { 3696 d--; 3697 unsigned long s_val = *s; 3698 // Swap words in a longword on little endian machines. 3699 #ifdef VM_LITTLE_ENDIAN 3700 s_val = (s_val << 32) | (s_val >> 32); 3701 #endif 3702 *d = s_val; 3703 s++; 3704 } 3705 } 3706 3707 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 3708 jint len, jlong inv, 3709 jint *m_ints) { 3710 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3711 assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 3712 int longwords = len/2; 3713 3714 // Make very sure we don't use so much space that the stack might 3715 // overflow. 512 jints corresponds to an 16384-bit integer and 3716 // will use here a total of 8k bytes of stack space. 3717 int divisor = sizeof(unsigned long) * 4; 3718 guarantee(longwords <= 8192 / divisor, "must be"); 3719 int total_allocation = longwords * sizeof (unsigned long) * 4; 3720 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3721 3722 // Local scratch arrays 3723 unsigned long 3724 *a = scratch + 0 * longwords, 3725 *b = scratch + 1 * longwords, 3726 *n = scratch + 2 * longwords, 3727 *m = scratch + 3 * longwords; 3728 3729 reverse_words((unsigned long *)a_ints, a, longwords); 3730 reverse_words((unsigned long *)b_ints, b, longwords); 3731 reverse_words((unsigned long *)n_ints, n, longwords); 3732 3733 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 3734 3735 reverse_words(m, (unsigned long *)m_ints, longwords); 3736 } 3737 3738 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 3739 jint len, jlong inv, 3740 jint *m_ints) { 3741 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3742 assert(len % 2 == 0, "array length in montgomery_square must be even"); 3743 int longwords = len/2; 3744 3745 // Make very sure we don't use so much space that the stack might 3746 // overflow. 512 jints corresponds to an 16384-bit integer and 3747 // will use here a total of 6k bytes of stack space. 3748 int divisor = sizeof(unsigned long) * 3; 3749 guarantee(longwords <= (8192 / divisor), "must be"); 3750 int total_allocation = longwords * sizeof (unsigned long) * 3; 3751 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3752 3753 // Local scratch arrays 3754 unsigned long 3755 *a = scratch + 0 * longwords, 3756 *n = scratch + 1 * longwords, 3757 *m = scratch + 2 * longwords; 3758 3759 reverse_words((unsigned long *)a_ints, a, longwords); 3760 reverse_words((unsigned long *)n_ints, n, longwords); 3761 3762 if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 3763 ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 3764 } else { 3765 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 3766 } 3767 3768 reverse_words(m, (unsigned long *)m_ints, longwords); 3769 } 3770 3771 #if INCLUDE_JFR 3772 3773 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. 3774 // It returns a jobject handle to the event writer. 3775 // The handle is dereferenced and the return value is the event writer oop. 3776 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { 3777 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id); 3778 CodeBuffer code(name, 512, 64); 3779 MacroAssembler* masm = new MacroAssembler(&code); 3780 3781 Register tmp1 = R10_ARG8; 3782 Register tmp2 = R9_ARG7; 3783 3784 int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; 3785 address start = __ pc(); 3786 __ mflr(tmp1); 3787 __ std(tmp1, _abi0(lr), R1_SP); // save return pc 3788 __ push_frame_reg_args(0, tmp1); 3789 int frame_complete = __ pc() - start; 3790 __ set_last_Java_frame(R1_SP, noreg); 3791 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), R16_thread); 3792 address calls_return_pc = __ last_calls_return_pc(); 3793 __ reset_last_Java_frame(); 3794 // The handle is dereferenced through a load barrier. 3795 __ resolve_global_jobject(R3_RET, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE); 3796 __ pop_frame(); 3797 __ ld(tmp1, _abi0(lr), R1_SP); 3798 __ mtlr(tmp1); 3799 __ blr(); 3800 3801 OopMapSet* oop_maps = new OopMapSet(); 3802 OopMap* map = new OopMap(framesize, 0); 3803 oop_maps->add_gc_map(calls_return_pc - start, map); 3804 3805 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) 3806 RuntimeStub::new_runtime_stub(name, &code, frame_complete, 3807 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3808 oop_maps, false); 3809 return stub; 3810 } 3811 3812 // For c2: call to return a leased buffer. 3813 RuntimeStub* SharedRuntime::generate_jfr_return_lease() { 3814 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id); 3815 CodeBuffer code(name, 512, 64); 3816 MacroAssembler* masm = new MacroAssembler(&code); 3817 3818 Register tmp1 = R10_ARG8; 3819 Register tmp2 = R9_ARG7; 3820 3821 int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; 3822 address start = __ pc(); 3823 __ mflr(tmp1); 3824 __ std(tmp1, _abi0(lr), R1_SP); // save return pc 3825 __ push_frame_reg_args(0, tmp1); 3826 int frame_complete = __ pc() - start; 3827 __ set_last_Java_frame(R1_SP, noreg); 3828 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), R16_thread); 3829 address calls_return_pc = __ last_calls_return_pc(); 3830 __ reset_last_Java_frame(); 3831 __ pop_frame(); 3832 __ ld(tmp1, _abi0(lr), R1_SP); 3833 __ mtlr(tmp1); 3834 __ blr(); 3835 3836 OopMapSet* oop_maps = new OopMapSet(); 3837 OopMap* map = new OopMap(framesize, 0); 3838 oop_maps->add_gc_map(calls_return_pc - start, map); 3839 3840 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) 3841 RuntimeStub::new_runtime_stub(name, &code, frame_complete, 3842 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3843 oop_maps, false); 3844 return stub; 3845 } 3846 3847 #endif // INCLUDE_JFR