1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2025 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "asm/macroAssembler.inline.hpp" 27 #include "code/debugInfoRec.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/vtableStubs.hpp" 30 #include "frame_ppc.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "gc/shared/gcLocker.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interp_masm.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/continuation.hpp" 39 #include "runtime/continuationEntry.inline.hpp" 40 #include "runtime/jniHandles.hpp" 41 #include "runtime/os.inline.hpp" 42 #include "runtime/safepointMechanism.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/signature.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "runtime/timerTrace.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/align.hpp" 49 #include "utilities/macros.hpp" 50 #include "vmreg_ppc.inline.hpp" 51 #ifdef COMPILER1 52 #include "c1/c1_Runtime1.hpp" 53 #endif 54 #ifdef COMPILER2 55 #include "opto/ad.hpp" 56 #include "opto/runtime.hpp" 57 #endif 58 59 #include <alloca.h> 60 61 #define __ masm-> 62 63 #ifdef PRODUCT 64 #define BLOCK_COMMENT(str) // nothing 65 #else 66 #define BLOCK_COMMENT(str) __ block_comment(str) 67 #endif 68 69 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 70 71 72 class RegisterSaver { 73 // Used for saving volatile registers. 74 public: 75 76 // Support different return pc locations. 77 enum ReturnPCLocation { 78 return_pc_is_lr, 79 return_pc_is_pre_saved, 80 return_pc_is_thread_saved_exception_pc 81 }; 82 83 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 84 int* out_frame_size_in_bytes, 85 bool generate_oop_map, 86 int return_pc_adjustment, 87 ReturnPCLocation return_pc_location, 88 bool save_vectors = false); 89 static void restore_live_registers_and_pop_frame(MacroAssembler* masm, 90 int frame_size_in_bytes, 91 bool restore_ctr, 92 bool save_vectors = false); 93 94 static void push_frame_and_save_argument_registers(MacroAssembler* masm, 95 Register r_temp, 96 int frame_size, 97 int total_args, 98 const VMRegPair *regs, const VMRegPair *regs2 = nullptr); 99 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, 100 int frame_size, 101 int total_args, 102 const VMRegPair *regs, const VMRegPair *regs2 = nullptr); 103 104 // During deoptimization only the result registers need to be restored 105 // all the other values have already been extracted. 106 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); 107 108 // Constants and data structures: 109 110 typedef enum { 111 int_reg, 112 float_reg, 113 special_reg, 114 vec_reg 115 } RegisterType; 116 117 typedef enum { 118 reg_size = 8, 119 half_reg_size = reg_size / 2, 120 vec_reg_size = 16 121 } RegisterConstants; 122 123 typedef struct { 124 RegisterType reg_type; 125 int reg_num; 126 VMReg vmreg; 127 } LiveRegType; 128 }; 129 130 131 #define RegisterSaver_LiveIntReg(regname) \ 132 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() } 133 134 #define RegisterSaver_LiveFloatReg(regname) \ 135 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() } 136 137 #define RegisterSaver_LiveSpecialReg(regname) \ 138 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } 139 140 #define RegisterSaver_LiveVecReg(regname) \ 141 { RegisterSaver::vec_reg, regname->encoding(), regname->as_VMReg() } 142 143 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { 144 // Live registers which get spilled to the stack. Register 145 // positions in this array correspond directly to the stack layout. 146 147 // 148 // live special registers: 149 // 150 RegisterSaver_LiveSpecialReg(SR_CTR), 151 // 152 // live float registers: 153 // 154 RegisterSaver_LiveFloatReg( F0 ), 155 RegisterSaver_LiveFloatReg( F1 ), 156 RegisterSaver_LiveFloatReg( F2 ), 157 RegisterSaver_LiveFloatReg( F3 ), 158 RegisterSaver_LiveFloatReg( F4 ), 159 RegisterSaver_LiveFloatReg( F5 ), 160 RegisterSaver_LiveFloatReg( F6 ), 161 RegisterSaver_LiveFloatReg( F7 ), 162 RegisterSaver_LiveFloatReg( F8 ), 163 RegisterSaver_LiveFloatReg( F9 ), 164 RegisterSaver_LiveFloatReg( F10 ), 165 RegisterSaver_LiveFloatReg( F11 ), 166 RegisterSaver_LiveFloatReg( F12 ), 167 RegisterSaver_LiveFloatReg( F13 ), 168 RegisterSaver_LiveFloatReg( F14 ), 169 RegisterSaver_LiveFloatReg( F15 ), 170 RegisterSaver_LiveFloatReg( F16 ), 171 RegisterSaver_LiveFloatReg( F17 ), 172 RegisterSaver_LiveFloatReg( F18 ), 173 RegisterSaver_LiveFloatReg( F19 ), 174 RegisterSaver_LiveFloatReg( F20 ), 175 RegisterSaver_LiveFloatReg( F21 ), 176 RegisterSaver_LiveFloatReg( F22 ), 177 RegisterSaver_LiveFloatReg( F23 ), 178 RegisterSaver_LiveFloatReg( F24 ), 179 RegisterSaver_LiveFloatReg( F25 ), 180 RegisterSaver_LiveFloatReg( F26 ), 181 RegisterSaver_LiveFloatReg( F27 ), 182 RegisterSaver_LiveFloatReg( F28 ), 183 RegisterSaver_LiveFloatReg( F29 ), 184 RegisterSaver_LiveFloatReg( F30 ), 185 RegisterSaver_LiveFloatReg( F31 ), 186 // 187 // live integer registers: 188 // 189 RegisterSaver_LiveIntReg( R0 ), 190 //RegisterSaver_LiveIntReg( R1 ), // stack pointer 191 RegisterSaver_LiveIntReg( R2 ), 192 RegisterSaver_LiveIntReg( R3 ), 193 RegisterSaver_LiveIntReg( R4 ), 194 RegisterSaver_LiveIntReg( R5 ), 195 RegisterSaver_LiveIntReg( R6 ), 196 RegisterSaver_LiveIntReg( R7 ), 197 RegisterSaver_LiveIntReg( R8 ), 198 RegisterSaver_LiveIntReg( R9 ), 199 RegisterSaver_LiveIntReg( R10 ), 200 RegisterSaver_LiveIntReg( R11 ), 201 RegisterSaver_LiveIntReg( R12 ), 202 //RegisterSaver_LiveIntReg( R13 ), // system thread id 203 RegisterSaver_LiveIntReg( R14 ), 204 RegisterSaver_LiveIntReg( R15 ), 205 RegisterSaver_LiveIntReg( R16 ), 206 RegisterSaver_LiveIntReg( R17 ), 207 RegisterSaver_LiveIntReg( R18 ), 208 RegisterSaver_LiveIntReg( R19 ), 209 RegisterSaver_LiveIntReg( R20 ), 210 RegisterSaver_LiveIntReg( R21 ), 211 RegisterSaver_LiveIntReg( R22 ), 212 RegisterSaver_LiveIntReg( R23 ), 213 RegisterSaver_LiveIntReg( R24 ), 214 RegisterSaver_LiveIntReg( R25 ), 215 RegisterSaver_LiveIntReg( R26 ), 216 RegisterSaver_LiveIntReg( R27 ), 217 RegisterSaver_LiveIntReg( R28 ), 218 RegisterSaver_LiveIntReg( R29 ), 219 RegisterSaver_LiveIntReg( R30 ), 220 RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below) 221 }; 222 223 static const RegisterSaver::LiveRegType RegisterSaver_LiveVecRegs[] = { 224 // 225 // live vector registers (optional, only these ones are used by C2): 226 // 227 RegisterSaver_LiveVecReg( VR0 ), 228 RegisterSaver_LiveVecReg( VR1 ), 229 RegisterSaver_LiveVecReg( VR2 ), 230 RegisterSaver_LiveVecReg( VR3 ), 231 RegisterSaver_LiveVecReg( VR4 ), 232 RegisterSaver_LiveVecReg( VR5 ), 233 RegisterSaver_LiveVecReg( VR6 ), 234 RegisterSaver_LiveVecReg( VR7 ), 235 RegisterSaver_LiveVecReg( VR8 ), 236 RegisterSaver_LiveVecReg( VR9 ), 237 RegisterSaver_LiveVecReg( VR10 ), 238 RegisterSaver_LiveVecReg( VR11 ), 239 RegisterSaver_LiveVecReg( VR12 ), 240 RegisterSaver_LiveVecReg( VR13 ), 241 RegisterSaver_LiveVecReg( VR14 ), 242 RegisterSaver_LiveVecReg( VR15 ), 243 RegisterSaver_LiveVecReg( VR16 ), 244 RegisterSaver_LiveVecReg( VR17 ), 245 RegisterSaver_LiveVecReg( VR18 ), 246 RegisterSaver_LiveVecReg( VR19 ), 247 RegisterSaver_LiveVecReg( VR20 ), 248 RegisterSaver_LiveVecReg( VR21 ), 249 RegisterSaver_LiveVecReg( VR22 ), 250 RegisterSaver_LiveVecReg( VR23 ), 251 RegisterSaver_LiveVecReg( VR24 ), 252 RegisterSaver_LiveVecReg( VR25 ), 253 RegisterSaver_LiveVecReg( VR26 ), 254 RegisterSaver_LiveVecReg( VR27 ), 255 RegisterSaver_LiveVecReg( VR28 ), 256 RegisterSaver_LiveVecReg( VR29 ), 257 RegisterSaver_LiveVecReg( VR30 ), 258 RegisterSaver_LiveVecReg( VR31 ) 259 }; 260 261 262 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 263 int* out_frame_size_in_bytes, 264 bool generate_oop_map, 265 int return_pc_adjustment, 266 ReturnPCLocation return_pc_location, 267 bool save_vectors) { 268 // Push an abi_reg_args-frame and store all registers which may be live. 269 // If requested, create an OopMap: Record volatile registers as 270 // callee-save values in an OopMap so their save locations will be 271 // propagated to the RegisterMap of the caller frame during 272 // StackFrameStream construction (needed for deoptimization; see 273 // compiledVFrame::create_stack_value). 274 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment. 275 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved). 276 277 // calculate frame size 278 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 279 sizeof(RegisterSaver::LiveRegType); 280 const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) / 281 sizeof(RegisterSaver::LiveRegType)) 282 : 0; 283 const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size; 284 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) 285 + frame::native_abi_reg_args_size; 286 287 *out_frame_size_in_bytes = frame_size_in_bytes; 288 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 289 const int register_save_offset = frame_size_in_bytes - register_save_size; 290 291 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. 292 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : nullptr; 293 294 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); 295 296 // push a new frame 297 __ push_frame(frame_size_in_bytes, noreg); 298 299 // Save some registers in the last (non-vector) slots of the new frame so we 300 // can use them as scratch regs or to determine the return pc. 301 __ std(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP); 302 __ std(R30, frame_size_in_bytes - 2*reg_size - vecregstosave_num * vec_reg_size, R1_SP); 303 304 // save the flags 305 // Do the save_LR by hand and adjust the return pc if requested. 306 switch (return_pc_location) { 307 case return_pc_is_lr: __ mflr(R31); break; 308 case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break; 309 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break; 310 default: ShouldNotReachHere(); 311 } 312 if (return_pc_location != return_pc_is_pre_saved) { 313 if (return_pc_adjustment != 0) { 314 __ addi(R31, R31, return_pc_adjustment); 315 } 316 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 317 } 318 319 // save all registers (ints and floats) 320 int offset = register_save_offset; 321 322 for (int i = 0; i < regstosave_num; i++) { 323 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 324 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 325 326 switch (reg_type) { 327 case RegisterSaver::int_reg: { 328 if (reg_num < 30) { // We spilled R30-31 right at the beginning. 329 __ std(as_Register(reg_num), offset, R1_SP); 330 } 331 break; 332 } 333 case RegisterSaver::float_reg: { 334 __ stfd(as_FloatRegister(reg_num), offset, R1_SP); 335 break; 336 } 337 case RegisterSaver::special_reg: { 338 if (reg_num == SR_CTR.encoding()) { 339 __ mfctr(R30); 340 __ std(R30, offset, R1_SP); 341 } else { 342 Unimplemented(); 343 } 344 break; 345 } 346 default: 347 ShouldNotReachHere(); 348 } 349 350 if (generate_oop_map) { 351 map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), 352 RegisterSaver_LiveRegs[i].vmreg); 353 } 354 offset += reg_size; 355 } 356 357 // Note that generate_oop_map in the following loop is only used for the 358 // polling_page_vectors_safepoint_handler_blob. 359 // The order in which the vector contents are stored depends on Endianess and 360 // the utilized instructions (PowerArchitecturePPC64). 361 assert(is_aligned(offset, StackAlignmentInBytes), "should be"); 362 if (PowerArchitecturePPC64 >= 10) { 363 assert(is_even(vecregstosave_num), "expectation"); 364 for (int i = 0; i < vecregstosave_num; i += 2) { 365 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num; 366 assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!"); 367 368 __ stxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP); 369 // Note: The contents were read in the same order (see loadV16_Power9 node in ppc.ad). 370 if (generate_oop_map) { 371 map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), 372 RegisterSaver_LiveVecRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg); 373 map->set_callee_saved(VMRegImpl::stack2reg((offset + vec_reg_size) >> 2), 374 RegisterSaver_LiveVecRegs[i BIG_ENDIAN_ONLY(+1) ].vmreg); 375 } 376 offset += (2 * vec_reg_size); 377 } 378 } else { 379 for (int i = 0; i < vecregstosave_num; i++) { 380 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num; 381 382 if (PowerArchitecturePPC64 >= 9) { 383 __ stxv(as_VectorRegister(reg_num)->to_vsr(), offset, R1_SP); 384 } else { 385 __ li(R31, offset); 386 __ stxvd2x(as_VectorRegister(reg_num)->to_vsr(), R31, R1_SP); 387 } 388 // Note: The contents were read in the same order (see loadV16_Power8 / loadV16_Power9 node in ppc.ad). 389 if (generate_oop_map) { 390 VMReg vsr = RegisterSaver_LiveVecRegs[i].vmreg; 391 map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), vsr); 392 } 393 offset += vec_reg_size; 394 } 395 } 396 397 assert(offset == frame_size_in_bytes, "consistency check"); 398 399 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers"); 400 401 // And we're done. 402 return map; 403 } 404 405 406 // Pop the current frame and restore all the registers that we 407 // saved. 408 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, 409 int frame_size_in_bytes, 410 bool restore_ctr, 411 bool save_vectors) { 412 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 413 sizeof(RegisterSaver::LiveRegType); 414 const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) / 415 sizeof(RegisterSaver::LiveRegType)) 416 : 0; 417 const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size; 418 419 const int register_save_offset = frame_size_in_bytes - register_save_size; 420 421 BLOCK_COMMENT("restore_live_registers_and_pop_frame {"); 422 423 // restore all registers (ints and floats) 424 int offset = register_save_offset; 425 426 for (int i = 0; i < regstosave_num; i++) { 427 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 428 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 429 430 switch (reg_type) { 431 case RegisterSaver::int_reg: { 432 if (reg_num != 31) // R31 restored at the end, it's the tmp reg! 433 __ ld(as_Register(reg_num), offset, R1_SP); 434 break; 435 } 436 case RegisterSaver::float_reg: { 437 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 438 break; 439 } 440 case RegisterSaver::special_reg: { 441 if (reg_num == SR_CTR.encoding()) { 442 if (restore_ctr) { // Nothing to do here if ctr already contains the next address. 443 __ ld(R31, offset, R1_SP); 444 __ mtctr(R31); 445 } 446 } else { 447 Unimplemented(); 448 } 449 break; 450 } 451 default: 452 ShouldNotReachHere(); 453 } 454 offset += reg_size; 455 } 456 457 assert(is_aligned(offset, StackAlignmentInBytes), "should be"); 458 if (PowerArchitecturePPC64 >= 10) { 459 for (int i = 0; i < vecregstosave_num; i += 2) { 460 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num; 461 assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!"); 462 463 __ lxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP); 464 465 offset += (2 * vec_reg_size); 466 } 467 } else { 468 for (int i = 0; i < vecregstosave_num; i++) { 469 int reg_num = RegisterSaver_LiveVecRegs[i].reg_num; 470 471 if (PowerArchitecturePPC64 >= 9) { 472 __ lxv(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP); 473 } else { 474 __ li(R31, offset); 475 __ lxvd2x(as_VectorRegister(reg_num).to_vsr(), R31, R1_SP); 476 } 477 478 offset += vec_reg_size; 479 } 480 } 481 482 assert(offset == frame_size_in_bytes, "consistency check"); 483 484 // restore link and the flags 485 __ ld(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 486 __ mtlr(R31); 487 488 // restore scratch register's value 489 __ ld(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP); 490 491 // pop the frame 492 __ addi(R1_SP, R1_SP, frame_size_in_bytes); 493 494 BLOCK_COMMENT("} restore_live_registers_and_pop_frame"); 495 } 496 497 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp, 498 int frame_size,int total_args, const VMRegPair *regs, 499 const VMRegPair *regs2) { 500 __ push_frame(frame_size, r_temp); 501 int st_off = frame_size - wordSize; 502 for (int i = 0; i < total_args; i++) { 503 VMReg r_1 = regs[i].first(); 504 VMReg r_2 = regs[i].second(); 505 if (!r_1->is_valid()) { 506 assert(!r_2->is_valid(), ""); 507 continue; 508 } 509 if (r_1->is_Register()) { 510 Register r = r_1->as_Register(); 511 __ std(r, st_off, R1_SP); 512 st_off -= wordSize; 513 } else if (r_1->is_FloatRegister()) { 514 FloatRegister f = r_1->as_FloatRegister(); 515 __ stfd(f, st_off, R1_SP); 516 st_off -= wordSize; 517 } 518 } 519 if (regs2 != nullptr) { 520 for (int i = 0; i < total_args; i++) { 521 VMReg r_1 = regs2[i].first(); 522 VMReg r_2 = regs2[i].second(); 523 if (!r_1->is_valid()) { 524 assert(!r_2->is_valid(), ""); 525 continue; 526 } 527 if (r_1->is_Register()) { 528 Register r = r_1->as_Register(); 529 __ std(r, st_off, R1_SP); 530 st_off -= wordSize; 531 } else if (r_1->is_FloatRegister()) { 532 FloatRegister f = r_1->as_FloatRegister(); 533 __ stfd(f, st_off, R1_SP); 534 st_off -= wordSize; 535 } 536 } 537 } 538 } 539 540 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size, 541 int total_args, const VMRegPair *regs, 542 const VMRegPair *regs2) { 543 int st_off = frame_size - wordSize; 544 for (int i = 0; i < total_args; i++) { 545 VMReg r_1 = regs[i].first(); 546 VMReg r_2 = regs[i].second(); 547 if (r_1->is_Register()) { 548 Register r = r_1->as_Register(); 549 __ ld(r, st_off, R1_SP); 550 st_off -= wordSize; 551 } else if (r_1->is_FloatRegister()) { 552 FloatRegister f = r_1->as_FloatRegister(); 553 __ lfd(f, st_off, R1_SP); 554 st_off -= wordSize; 555 } 556 } 557 if (regs2 != nullptr) 558 for (int i = 0; i < total_args; i++) { 559 VMReg r_1 = regs2[i].first(); 560 VMReg r_2 = regs2[i].second(); 561 if (r_1->is_Register()) { 562 Register r = r_1->as_Register(); 563 __ ld(r, st_off, R1_SP); 564 st_off -= wordSize; 565 } else if (r_1->is_FloatRegister()) { 566 FloatRegister f = r_1->as_FloatRegister(); 567 __ lfd(f, st_off, R1_SP); 568 st_off -= wordSize; 569 } 570 } 571 __ pop_frame(); 572 } 573 574 // Restore the registers that might be holding a result. 575 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { 576 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 577 sizeof(RegisterSaver::LiveRegType); 578 const int register_save_size = regstosave_num * reg_size; // VS registers not relevant here. 579 const int register_save_offset = frame_size_in_bytes - register_save_size; 580 581 // restore all result registers (ints and floats) 582 int offset = register_save_offset; 583 for (int i = 0; i < regstosave_num; i++) { 584 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 585 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 586 switch (reg_type) { 587 case RegisterSaver::int_reg: { 588 if (as_Register(reg_num)==R3_RET) // int result_reg 589 __ ld(as_Register(reg_num), offset, R1_SP); 590 break; 591 } 592 case RegisterSaver::float_reg: { 593 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg 594 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 595 break; 596 } 597 case RegisterSaver::special_reg: { 598 // Special registers don't hold a result. 599 break; 600 } 601 default: 602 ShouldNotReachHere(); 603 } 604 offset += reg_size; 605 } 606 607 assert(offset == frame_size_in_bytes, "consistency check"); 608 } 609 610 // Is vector's size (in bytes) bigger than a size saved by default? 611 bool SharedRuntime::is_wide_vector(int size) { 612 // Note, MaxVectorSize == 8/16 on PPC64. 613 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size); 614 return size > 8; 615 } 616 617 static int reg2slot(VMReg r) { 618 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 619 } 620 621 static int reg2offset(VMReg r) { 622 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 623 } 624 625 // --------------------------------------------------------------------------- 626 // Read the array of BasicTypes from a signature, and compute where the 627 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 628 // quantities. Values less than VMRegImpl::stack0 are registers, those above 629 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 630 // as framesizes are fixed. 631 // VMRegImpl::stack0 refers to the first slot 0(sp). 632 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register 633 // up to Register::number_of_registers) are the 64-bit 634 // integer registers. 635 636 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 637 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 638 // units regardless of build. Of course for i486 there is no 64 bit build 639 640 // The Java calling convention is a "shifted" version of the C ABI. 641 // By skipping the first C ABI register we can call non-static jni methods 642 // with small numbers of arguments without having to shuffle the arguments 643 // at all. Since we control the java ABI we ought to at least get some 644 // advantage out of it. 645 646 const VMReg java_iarg_reg[8] = { 647 R3->as_VMReg(), 648 R4->as_VMReg(), 649 R5->as_VMReg(), 650 R6->as_VMReg(), 651 R7->as_VMReg(), 652 R8->as_VMReg(), 653 R9->as_VMReg(), 654 R10->as_VMReg() 655 }; 656 657 const VMReg java_farg_reg[13] = { 658 F1->as_VMReg(), 659 F2->as_VMReg(), 660 F3->as_VMReg(), 661 F4->as_VMReg(), 662 F5->as_VMReg(), 663 F6->as_VMReg(), 664 F7->as_VMReg(), 665 F8->as_VMReg(), 666 F9->as_VMReg(), 667 F10->as_VMReg(), 668 F11->as_VMReg(), 669 F12->as_VMReg(), 670 F13->as_VMReg() 671 }; 672 673 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]); 674 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]); 675 676 STATIC_ASSERT(num_java_iarg_registers == Argument::n_int_register_parameters_j); 677 STATIC_ASSERT(num_java_farg_registers == Argument::n_float_register_parameters_j); 678 679 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 680 VMRegPair *regs, 681 int total_args_passed) { 682 // C2c calling conventions for compiled-compiled calls. 683 // Put 8 ints/longs into registers _AND_ 13 float/doubles into 684 // registers _AND_ put the rest on the stack. 685 686 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats 687 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 688 689 int i; 690 VMReg reg; 691 int stk = 0; 692 int ireg = 0; 693 int freg = 0; 694 695 // We put the first 8 arguments into registers and the rest on the 696 // stack, float arguments are already in their argument registers 697 // due to c2c calling conventions (see calling_convention). 698 for (int i = 0; i < total_args_passed; ++i) { 699 switch(sig_bt[i]) { 700 case T_BOOLEAN: 701 case T_CHAR: 702 case T_BYTE: 703 case T_SHORT: 704 case T_INT: 705 if (ireg < num_java_iarg_registers) { 706 // Put int/ptr in register 707 reg = java_iarg_reg[ireg]; 708 ++ireg; 709 } else { 710 // Put int/ptr on stack. 711 reg = VMRegImpl::stack2reg(stk); 712 stk += inc_stk_for_intfloat; 713 } 714 regs[i].set1(reg); 715 break; 716 case T_LONG: 717 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 718 if (ireg < num_java_iarg_registers) { 719 // Put long in register. 720 reg = java_iarg_reg[ireg]; 721 ++ireg; 722 } else { 723 // Put long on stack. They must be aligned to 2 slots. 724 if (stk & 0x1) ++stk; 725 reg = VMRegImpl::stack2reg(stk); 726 stk += inc_stk_for_longdouble; 727 } 728 regs[i].set2(reg); 729 break; 730 case T_OBJECT: 731 case T_ARRAY: 732 case T_ADDRESS: 733 if (ireg < num_java_iarg_registers) { 734 // Put ptr in register. 735 reg = java_iarg_reg[ireg]; 736 ++ireg; 737 } else { 738 // Put ptr on stack. Objects must be aligned to 2 slots too, 739 // because "64-bit pointers record oop-ishness on 2 aligned 740 // adjacent registers." (see OopFlow::build_oop_map). 741 if (stk & 0x1) ++stk; 742 reg = VMRegImpl::stack2reg(stk); 743 stk += inc_stk_for_longdouble; 744 } 745 regs[i].set2(reg); 746 break; 747 case T_FLOAT: 748 if (freg < num_java_farg_registers) { 749 // Put float in register. 750 reg = java_farg_reg[freg]; 751 ++freg; 752 } else { 753 // Put float on stack. 754 reg = VMRegImpl::stack2reg(stk); 755 stk += inc_stk_for_intfloat; 756 } 757 regs[i].set1(reg); 758 break; 759 case T_DOUBLE: 760 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 761 if (freg < num_java_farg_registers) { 762 // Put double in register. 763 reg = java_farg_reg[freg]; 764 ++freg; 765 } else { 766 // Put double on stack. They must be aligned to 2 slots. 767 if (stk & 0x1) ++stk; 768 reg = VMRegImpl::stack2reg(stk); 769 stk += inc_stk_for_longdouble; 770 } 771 regs[i].set2(reg); 772 break; 773 case T_VOID: 774 // Do not count halves. 775 regs[i].set_bad(); 776 break; 777 default: 778 ShouldNotReachHere(); 779 } 780 } 781 return stk; 782 } 783 784 #if defined(COMPILER1) || defined(COMPILER2) 785 // Calling convention for calling C code. 786 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 787 VMRegPair *regs, 788 int total_args_passed) { 789 // Calling conventions for C runtime calls and calls to JNI native methods. 790 // 791 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8 792 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist 793 // the first 13 flt/dbl's in the first 13 fp regs but additionally 794 // copy flt/dbl to the stack if they are beyond the 8th argument. 795 796 const VMReg iarg_reg[8] = { 797 R3->as_VMReg(), 798 R4->as_VMReg(), 799 R5->as_VMReg(), 800 R6->as_VMReg(), 801 R7->as_VMReg(), 802 R8->as_VMReg(), 803 R9->as_VMReg(), 804 R10->as_VMReg() 805 }; 806 807 const VMReg farg_reg[13] = { 808 F1->as_VMReg(), 809 F2->as_VMReg(), 810 F3->as_VMReg(), 811 F4->as_VMReg(), 812 F5->as_VMReg(), 813 F6->as_VMReg(), 814 F7->as_VMReg(), 815 F8->as_VMReg(), 816 F9->as_VMReg(), 817 F10->as_VMReg(), 818 F11->as_VMReg(), 819 F12->as_VMReg(), 820 F13->as_VMReg() 821 }; 822 823 // Check calling conventions consistency. 824 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c && 825 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c, 826 "consistency"); 827 828 const int additional_frame_header_slots = ((frame::native_abi_minframe_size - frame::jit_out_preserve_size) 829 / VMRegImpl::stack_slot_size); 830 const int float_offset_in_slots = Argument::float_on_stack_offset_in_bytes_c / VMRegImpl::stack_slot_size; 831 832 VMReg reg; 833 int arg = 0; 834 int freg = 0; 835 bool stack_used = false; 836 837 for (int i = 0; i < total_args_passed; ++i, ++arg) { 838 // Each argument corresponds to a slot in the Parameter Save Area (if not omitted) 839 int stk = (arg * 2) + additional_frame_header_slots; 840 841 switch(sig_bt[i]) { 842 // 843 // If arguments 0-7 are integers, they are passed in integer registers. 844 // Argument i is placed in iarg_reg[i]. 845 // 846 case T_BOOLEAN: 847 case T_CHAR: 848 case T_BYTE: 849 case T_SHORT: 850 case T_INT: 851 // We must cast ints to longs and use full 64 bit stack slots 852 // here. Thus fall through, handle as long. 853 case T_LONG: 854 case T_OBJECT: 855 case T_ARRAY: 856 case T_ADDRESS: 857 case T_METADATA: 858 // Oops are already boxed if required (JNI). 859 if (arg < Argument::n_int_register_parameters_c) { 860 reg = iarg_reg[arg]; 861 } else { 862 reg = VMRegImpl::stack2reg(stk); 863 stack_used = true; 864 } 865 regs[i].set2(reg); 866 break; 867 868 // 869 // Floats are treated differently from int regs: The first 13 float arguments 870 // are passed in registers (not the float args among the first 13 args). 871 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed 872 // in farg_reg[j] if argument i is the j-th float argument of this call. 873 // 874 case T_FLOAT: 875 if (freg < Argument::n_float_register_parameters_c) { 876 // Put float in register ... 877 reg = farg_reg[freg]; 878 ++freg; 879 } else { 880 // Put float on stack. 881 reg = VMRegImpl::stack2reg(stk + float_offset_in_slots); 882 stack_used = true; 883 } 884 regs[i].set1(reg); 885 break; 886 case T_DOUBLE: 887 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 888 if (freg < Argument::n_float_register_parameters_c) { 889 // Put double in register ... 890 reg = farg_reg[freg]; 891 ++freg; 892 } else { 893 // Put double on stack. 894 reg = VMRegImpl::stack2reg(stk); 895 stack_used = true; 896 } 897 regs[i].set2(reg); 898 break; 899 900 case T_VOID: 901 // Do not count halves. 902 regs[i].set_bad(); 903 --arg; 904 break; 905 default: 906 ShouldNotReachHere(); 907 } 908 } 909 910 // Return size of the stack frame excluding the jit_out_preserve part in single-word slots. 911 #if defined(ABI_ELFv2) 912 assert(additional_frame_header_slots == 0, "ABIv2 shouldn't use extra slots"); 913 // ABIv2 allows omitting the Parameter Save Area if the callee's prototype 914 // indicates that all parameters can be passed in registers. 915 return stack_used ? (arg * 2) : 0; 916 #else 917 // The Parameter Save Area needs to be at least 8 double-word slots for ABIv1. 918 // We have to add extra slots because ABIv1 uses a larger header. 919 return MAX2(arg, 8) * 2 + additional_frame_header_slots; 920 #endif 921 } 922 #endif // COMPILER2 923 924 int SharedRuntime::vector_calling_convention(VMRegPair *regs, 925 uint num_bits, 926 uint total_args_passed) { 927 Unimplemented(); 928 return 0; 929 } 930 931 static address gen_c2i_adapter(MacroAssembler *masm, 932 int total_args_passed, 933 int comp_args_on_stack, 934 const BasicType *sig_bt, 935 const VMRegPair *regs, 936 Label& call_interpreter, 937 const Register& ientry) { 938 939 address c2i_entrypoint; 940 941 const Register sender_SP = R21_sender_SP; // == R21_tmp1 942 const Register code = R22_tmp2; 943 //const Register ientry = R23_tmp3; 944 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 }; 945 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 946 int value_regs_index = 0; 947 948 const Register return_pc = R27_tmp7; 949 const Register tmp = R28_tmp8; 950 951 assert_different_registers(sender_SP, code, ientry, return_pc, tmp); 952 953 // Adapter needs TOP_IJAVA_FRAME_ABI. 954 const int adapter_size = frame::top_ijava_frame_abi_size + 955 align_up(total_args_passed * wordSize, frame::alignment_in_bytes); 956 957 // regular (verified) c2i entry point 958 c2i_entrypoint = __ pc(); 959 960 // Does compiled code exists? If yes, patch the caller's callsite. 961 __ ld(code, method_(code)); 962 __ cmpdi(CR0, code, 0); 963 __ ld(ientry, method_(interpreter_entry)); // preloaded 964 __ beq(CR0, call_interpreter); 965 966 967 // Patch caller's callsite, method_(code) was not null which means that 968 // compiled code exists. 969 __ mflr(return_pc); 970 __ std(return_pc, _abi0(lr), R1_SP); 971 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs); 972 973 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc); 974 975 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs); 976 __ ld(return_pc, _abi0(lr), R1_SP); 977 __ ld(ientry, method_(interpreter_entry)); // preloaded 978 __ mtlr(return_pc); 979 980 981 // Call the interpreter. 982 __ BIND(call_interpreter); 983 __ mtctr(ientry); 984 985 // Get a copy of the current SP for loading caller's arguments. 986 __ mr(sender_SP, R1_SP); 987 988 // Add space for the adapter. 989 __ resize_frame(-adapter_size, R12_scratch2); 990 991 int st_off = adapter_size - wordSize; 992 993 // Write the args into the outgoing interpreter space. 994 for (int i = 0; i < total_args_passed; i++) { 995 VMReg r_1 = regs[i].first(); 996 VMReg r_2 = regs[i].second(); 997 if (!r_1->is_valid()) { 998 assert(!r_2->is_valid(), ""); 999 continue; 1000 } 1001 if (r_1->is_stack()) { 1002 Register tmp_reg = value_regs[value_regs_index]; 1003 value_regs_index = (value_regs_index + 1) % num_value_regs; 1004 // The calling convention produces OptoRegs that ignore the out 1005 // preserve area (JIT's ABI). We must account for it here. 1006 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 1007 if (!r_2->is_valid()) { 1008 __ lwz(tmp_reg, ld_off, sender_SP); 1009 } else { 1010 __ ld(tmp_reg, ld_off, sender_SP); 1011 } 1012 // Pretend stack targets were loaded into tmp_reg. 1013 r_1 = tmp_reg->as_VMReg(); 1014 } 1015 1016 if (r_1->is_Register()) { 1017 Register r = r_1->as_Register(); 1018 if (!r_2->is_valid()) { 1019 __ stw(r, st_off, R1_SP); 1020 st_off-=wordSize; 1021 } else { 1022 // Longs are given 2 64-bit slots in the interpreter, but the 1023 // data is passed in only 1 slot. 1024 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1025 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 1026 st_off-=wordSize; 1027 } 1028 __ std(r, st_off, R1_SP); 1029 st_off-=wordSize; 1030 } 1031 } else { 1032 assert(r_1->is_FloatRegister(), ""); 1033 FloatRegister f = r_1->as_FloatRegister(); 1034 if (!r_2->is_valid()) { 1035 __ stfs(f, st_off, R1_SP); 1036 st_off-=wordSize; 1037 } else { 1038 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the 1039 // data is passed in only 1 slot. 1040 // One of these should get known junk... 1041 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 1042 st_off-=wordSize; 1043 __ stfd(f, st_off, R1_SP); 1044 st_off-=wordSize; 1045 } 1046 } 1047 } 1048 1049 // Jump to the interpreter just as if interpreter was doing it. 1050 1051 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 1052 1053 // load TOS 1054 __ addi(R15_esp, R1_SP, st_off); 1055 1056 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1. 1057 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register"); 1058 __ bctr(); 1059 1060 return c2i_entrypoint; 1061 } 1062 1063 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 1064 int total_args_passed, 1065 int comp_args_on_stack, 1066 const BasicType *sig_bt, 1067 const VMRegPair *regs) { 1068 1069 // Load method's entry-point from method. 1070 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 1071 __ mtctr(R12_scratch2); 1072 1073 // We will only enter here from an interpreted frame and never from after 1074 // passing thru a c2i. Azul allowed this but we do not. If we lose the 1075 // race and use a c2i we will remain interpreted for the race loser(s). 1076 // This removes all sorts of headaches on the x86 side and also eliminates 1077 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 1078 1079 // Note: r13 contains the senderSP on entry. We must preserve it since 1080 // we may do a i2c -> c2i transition if we lose a race where compiled 1081 // code goes non-entrant while we get args ready. 1082 // In addition we use r13 to locate all the interpreter args as 1083 // we must align the stack to 16 bytes on an i2c entry else we 1084 // lose alignment we expect in all compiled code and register 1085 // save code can segv when fxsave instructions find improperly 1086 // aligned stack pointer. 1087 1088 const Register ld_ptr = R15_esp; 1089 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 }; 1090 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 1091 int value_regs_index = 0; 1092 1093 int ld_offset = total_args_passed*wordSize; 1094 1095 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 1096 // in registers, we will occasionally have no stack args. 1097 int comp_words_on_stack = 0; 1098 if (comp_args_on_stack) { 1099 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in 1100 // registers are below. By subtracting stack0, we either get a negative 1101 // number (all values in registers) or the maximum stack slot accessed. 1102 1103 // Convert 4-byte c2 stack slots to words. 1104 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1105 // Round up to miminum stack alignment, in wordSize. 1106 comp_words_on_stack = align_up(comp_words_on_stack, 2); 1107 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1); 1108 } 1109 1110 // Now generate the shuffle code. Pick up all register args and move the 1111 // rest through register value=Z_R12. 1112 BLOCK_COMMENT("Shuffle arguments"); 1113 for (int i = 0; i < total_args_passed; i++) { 1114 if (sig_bt[i] == T_VOID) { 1115 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1116 continue; 1117 } 1118 1119 // Pick up 0, 1 or 2 words from ld_ptr. 1120 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 1121 "scrambled load targets?"); 1122 VMReg r_1 = regs[i].first(); 1123 VMReg r_2 = regs[i].second(); 1124 if (!r_1->is_valid()) { 1125 assert(!r_2->is_valid(), ""); 1126 continue; 1127 } 1128 if (r_1->is_FloatRegister()) { 1129 if (!r_2->is_valid()) { 1130 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr); 1131 ld_offset-=wordSize; 1132 } else { 1133 // Skip the unused interpreter slot. 1134 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr); 1135 ld_offset-=2*wordSize; 1136 } 1137 } else { 1138 Register r; 1139 if (r_1->is_stack()) { 1140 // Must do a memory to memory move thru "value". 1141 r = value_regs[value_regs_index]; 1142 value_regs_index = (value_regs_index + 1) % num_value_regs; 1143 } else { 1144 r = r_1->as_Register(); 1145 } 1146 if (!r_2->is_valid()) { 1147 // Not sure we need to do this but it shouldn't hurt. 1148 if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) { 1149 __ ld(r, ld_offset, ld_ptr); 1150 ld_offset-=wordSize; 1151 } else { 1152 __ lwz(r, ld_offset, ld_ptr); 1153 ld_offset-=wordSize; 1154 } 1155 } else { 1156 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the 1157 // data is passed in only 1 slot. 1158 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1159 ld_offset-=wordSize; 1160 } 1161 __ ld(r, ld_offset, ld_ptr); 1162 ld_offset-=wordSize; 1163 } 1164 1165 if (r_1->is_stack()) { 1166 // Now store value where the compiler expects it 1167 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size; 1168 1169 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN || 1170 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) { 1171 __ stw(r, st_off, R1_SP); 1172 } else { 1173 __ std(r, st_off, R1_SP); 1174 } 1175 } 1176 } 1177 } 1178 1179 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about 1180 1181 BLOCK_COMMENT("Store method"); 1182 // Store method into thread->callee_target. 1183 // We might end up in handle_wrong_method if the callee is 1184 // deoptimized as we race thru here. If that happens we don't want 1185 // to take a safepoint because the caller frame will look 1186 // interpreted and arguments are now "compiled" so it is much better 1187 // to make this transition invisible to the stack walking 1188 // code. Unfortunately if we try and find the callee by normal means 1189 // a safepoint is possible. So we stash the desired callee in the 1190 // thread and the vm will find there should this case occur. 1191 __ std(R19_method, thread_(callee_target)); 1192 1193 // Jump to the compiled code just as if compiled code was doing it. 1194 __ bctr(); 1195 } 1196 1197 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1198 int total_args_passed, 1199 int comp_args_on_stack, 1200 const BasicType *sig_bt, 1201 const VMRegPair *regs, 1202 AdapterHandlerEntry* handler) { 1203 address i2c_entry; 1204 address c2i_unverified_entry; 1205 address c2i_entry; 1206 1207 1208 // entry: i2c 1209 1210 __ align(CodeEntryAlignment); 1211 i2c_entry = __ pc(); 1212 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1213 1214 1215 // entry: c2i unverified 1216 1217 __ align(CodeEntryAlignment); 1218 BLOCK_COMMENT("c2i unverified entry"); 1219 c2i_unverified_entry = __ pc(); 1220 1221 // inline_cache contains a CompiledICData 1222 const Register ic = R19_inline_cache_reg; 1223 const Register ic_klass = R11_scratch1; 1224 const Register receiver_klass = R12_scratch2; 1225 const Register code = R21_tmp1; 1226 const Register ientry = R23_tmp3; 1227 1228 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry); 1229 assert(R11_scratch1 == R11, "need prologue scratch register"); 1230 1231 Label call_interpreter; 1232 1233 __ ic_check(4 /* end_alignment */); 1234 __ ld(R19_method, CompiledICData::speculated_method_offset(), ic); 1235 // Argument is valid and klass is as expected, continue. 1236 1237 __ ld(code, method_(code)); 1238 __ cmpdi(CR0, code, 0); 1239 __ ld(ientry, method_(interpreter_entry)); // preloaded 1240 __ beq_predict_taken(CR0, call_interpreter); 1241 1242 // Branch to ic_miss_stub. 1243 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); 1244 1245 // entry: c2i 1246 1247 c2i_entry = __ pc(); 1248 1249 // Class initialization barrier for static methods 1250 address c2i_no_clinit_check_entry = nullptr; 1251 if (VM_Version::supports_fast_class_init_checks()) { 1252 Label L_skip_barrier; 1253 1254 { // Bypass the barrier for non-static methods 1255 __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1256 __ andi_(R0, R0, JVM_ACC_STATIC); 1257 __ beq(CR0, L_skip_barrier); // non-static 1258 } 1259 1260 Register klass = R11_scratch1; 1261 __ load_method_holder(klass, R19_method); 1262 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 1263 1264 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 1265 __ mtctr(klass); 1266 __ bctr(); 1267 1268 __ bind(L_skip_barrier); 1269 c2i_no_clinit_check_entry = __ pc(); 1270 } 1271 1272 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1273 bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code); 1274 1275 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry); 1276 1277 handler->set_entry_points(i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry); 1278 return; 1279 } 1280 1281 // An oop arg. Must pass a handle not the oop itself. 1282 static void object_move(MacroAssembler* masm, 1283 int frame_size_in_slots, 1284 OopMap* oop_map, int oop_handle_offset, 1285 bool is_receiver, int* receiver_offset, 1286 VMRegPair src, VMRegPair dst, 1287 Register r_caller_sp, Register r_temp_1, Register r_temp_2) { 1288 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), 1289 "receiver has already been moved"); 1290 1291 // We must pass a handle. First figure out the location we use as a handle. 1292 1293 if (src.first()->is_stack()) { 1294 // stack to stack or reg 1295 1296 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1297 Label skip; 1298 const int oop_slot_in_callers_frame = reg2slot(src.first()); 1299 1300 guarantee(!is_receiver, "expecting receiver in register"); 1301 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots)); 1302 1303 __ addi(r_handle, r_caller_sp, reg2offset(src.first())); 1304 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); 1305 __ cmpdi(CR0, r_temp_2, 0); 1306 __ bne(CR0, skip); 1307 // Use a null handle if oop is null. 1308 __ li(r_handle, 0); 1309 __ bind(skip); 1310 1311 if (dst.first()->is_stack()) { 1312 // stack to stack 1313 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1314 } else { 1315 // stack to reg 1316 // Nothing to do, r_handle is already the dst register. 1317 } 1318 } else { 1319 // reg to stack or reg 1320 const Register r_oop = src.first()->as_Register(); 1321 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1322 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word 1323 + oop_handle_offset; // in slots 1324 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size; 1325 Label skip; 1326 1327 if (is_receiver) { 1328 *receiver_offset = oop_offset; 1329 } 1330 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1331 1332 __ std( r_oop, oop_offset, R1_SP); 1333 __ addi(r_handle, R1_SP, oop_offset); 1334 1335 __ cmpdi(CR0, r_oop, 0); 1336 __ bne(CR0, skip); 1337 // Use a null handle if oop is null. 1338 __ li(r_handle, 0); 1339 __ bind(skip); 1340 1341 if (dst.first()->is_stack()) { 1342 // reg to stack 1343 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1344 } else { 1345 // reg to reg 1346 // Nothing to do, r_handle is already the dst register. 1347 } 1348 } 1349 } 1350 1351 static void int_move(MacroAssembler*masm, 1352 VMRegPair src, VMRegPair dst, 1353 Register r_caller_sp, Register r_temp) { 1354 assert(src.first()->is_valid(), "incoming must be int"); 1355 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1356 1357 if (src.first()->is_stack()) { 1358 if (dst.first()->is_stack()) { 1359 // stack to stack 1360 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp); 1361 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1362 } else { 1363 // stack to reg 1364 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1365 } 1366 } else if (dst.first()->is_stack()) { 1367 // reg to stack 1368 __ extsw(r_temp, src.first()->as_Register()); 1369 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1370 } else { 1371 // reg to reg 1372 __ extsw(dst.first()->as_Register(), src.first()->as_Register()); 1373 } 1374 } 1375 1376 static void long_move(MacroAssembler*masm, 1377 VMRegPair src, VMRegPair dst, 1378 Register r_caller_sp, Register r_temp) { 1379 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long"); 1380 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1381 1382 if (src.first()->is_stack()) { 1383 if (dst.first()->is_stack()) { 1384 // stack to stack 1385 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1386 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1387 } else { 1388 // stack to reg 1389 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1390 } 1391 } else if (dst.first()->is_stack()) { 1392 // reg to stack 1393 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1394 } else { 1395 // reg to reg 1396 if (dst.first()->as_Register() != src.first()->as_Register()) 1397 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1398 } 1399 } 1400 1401 static void float_move(MacroAssembler*masm, 1402 VMRegPair src, VMRegPair dst, 1403 Register r_caller_sp, Register r_temp) { 1404 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float"); 1405 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float"); 1406 1407 if (src.first()->is_stack()) { 1408 if (dst.first()->is_stack()) { 1409 // stack to stack 1410 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp); 1411 __ stw(r_temp, reg2offset(dst.first()), R1_SP); 1412 } else { 1413 // stack to reg 1414 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1415 } 1416 } else if (dst.first()->is_stack()) { 1417 // reg to stack 1418 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1419 } else { 1420 // reg to reg 1421 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1422 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1423 } 1424 } 1425 1426 static void double_move(MacroAssembler*masm, 1427 VMRegPair src, VMRegPair dst, 1428 Register r_caller_sp, Register r_temp) { 1429 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double"); 1430 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double"); 1431 1432 if (src.first()->is_stack()) { 1433 if (dst.first()->is_stack()) { 1434 // stack to stack 1435 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1436 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1437 } else { 1438 // stack to reg 1439 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1440 } 1441 } else if (dst.first()->is_stack()) { 1442 // reg to stack 1443 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1444 } else { 1445 // reg to reg 1446 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1447 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1448 } 1449 } 1450 1451 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1452 switch (ret_type) { 1453 case T_BOOLEAN: 1454 case T_CHAR: 1455 case T_BYTE: 1456 case T_SHORT: 1457 case T_INT: 1458 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1459 break; 1460 case T_ARRAY: 1461 case T_OBJECT: 1462 case T_LONG: 1463 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1464 break; 1465 case T_FLOAT: 1466 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1467 break; 1468 case T_DOUBLE: 1469 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1470 break; 1471 case T_VOID: 1472 break; 1473 default: 1474 ShouldNotReachHere(); 1475 break; 1476 } 1477 } 1478 1479 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1480 switch (ret_type) { 1481 case T_BOOLEAN: 1482 case T_CHAR: 1483 case T_BYTE: 1484 case T_SHORT: 1485 case T_INT: 1486 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1487 break; 1488 case T_ARRAY: 1489 case T_OBJECT: 1490 case T_LONG: 1491 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1492 break; 1493 case T_FLOAT: 1494 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1495 break; 1496 case T_DOUBLE: 1497 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1498 break; 1499 case T_VOID: 1500 break; 1501 default: 1502 ShouldNotReachHere(); 1503 break; 1504 } 1505 } 1506 1507 static void verify_oop_args(MacroAssembler* masm, 1508 const methodHandle& method, 1509 const BasicType* sig_bt, 1510 const VMRegPair* regs) { 1511 Register temp_reg = R19_method; // not part of any compiled calling seq 1512 if (VerifyOops) { 1513 for (int i = 0; i < method->size_of_parameters(); i++) { 1514 if (is_reference_type(sig_bt[i])) { 1515 VMReg r = regs[i].first(); 1516 assert(r->is_valid(), "bad oop arg"); 1517 if (r->is_stack()) { 1518 __ ld(temp_reg, reg2offset(r), R1_SP); 1519 __ verify_oop(temp_reg, FILE_AND_LINE); 1520 } else { 1521 __ verify_oop(r->as_Register(), FILE_AND_LINE); 1522 } 1523 } 1524 } 1525 } 1526 } 1527 1528 static void gen_special_dispatch(MacroAssembler* masm, 1529 const methodHandle& method, 1530 const BasicType* sig_bt, 1531 const VMRegPair* regs) { 1532 verify_oop_args(masm, method, sig_bt, regs); 1533 vmIntrinsics::ID iid = method->intrinsic_id(); 1534 1535 // Now write the args into the outgoing interpreter space 1536 bool has_receiver = false; 1537 Register receiver_reg = noreg; 1538 int member_arg_pos = -1; 1539 Register member_reg = noreg; 1540 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1541 if (ref_kind != 0) { 1542 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1543 member_reg = R19_method; // known to be free at this point 1544 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1545 } else if (iid == vmIntrinsics::_invokeBasic) { 1546 has_receiver = true; 1547 } else if (iid == vmIntrinsics::_linkToNative) { 1548 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument 1549 member_reg = R19_method; // known to be free at this point 1550 } else { 1551 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid)); 1552 } 1553 1554 if (member_reg != noreg) { 1555 // Load the member_arg into register, if necessary. 1556 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1557 VMReg r = regs[member_arg_pos].first(); 1558 if (r->is_stack()) { 1559 __ ld(member_reg, reg2offset(r), R1_SP); 1560 } else { 1561 // no data motion is needed 1562 member_reg = r->as_Register(); 1563 } 1564 } 1565 1566 if (has_receiver) { 1567 // Make sure the receiver is loaded into a register. 1568 assert(method->size_of_parameters() > 0, "oob"); 1569 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1570 VMReg r = regs[0].first(); 1571 assert(r->is_valid(), "bad receiver arg"); 1572 if (r->is_stack()) { 1573 // Porting note: This assumes that compiled calling conventions always 1574 // pass the receiver oop in a register. If this is not true on some 1575 // platform, pick a temp and load the receiver from stack. 1576 fatal("receiver always in a register"); 1577 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point? 1578 __ ld(receiver_reg, reg2offset(r), R1_SP); 1579 } else { 1580 // no data motion is needed 1581 receiver_reg = r->as_Register(); 1582 } 1583 } 1584 1585 // Figure out which address we are really jumping to: 1586 MethodHandles::generate_method_handle_dispatch(masm, iid, 1587 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1588 } 1589 1590 //---------------------------- continuation_enter_setup --------------------------- 1591 // 1592 // Frame setup. 1593 // 1594 // Arguments: 1595 // None. 1596 // 1597 // Results: 1598 // R1_SP: pointer to blank ContinuationEntry in the pushed frame. 1599 // 1600 // Kills: 1601 // R0, R20 1602 // 1603 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& framesize_words) { 1604 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, ""); 1605 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1606 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1607 1608 const int frame_size_in_bytes = (int)ContinuationEntry::size(); 1609 assert(is_aligned(frame_size_in_bytes, frame::alignment_in_bytes), "alignment error"); 1610 1611 framesize_words = frame_size_in_bytes / wordSize; 1612 1613 DEBUG_ONLY(__ block_comment("setup {")); 1614 // Save return pc and push entry frame 1615 const Register return_pc = R20; 1616 __ mflr(return_pc); 1617 __ std(return_pc, _abi0(lr), R1_SP); // SP->lr = return_pc 1618 __ push_frame(frame_size_in_bytes , R0); // SP -= frame_size_in_bytes 1619 1620 OopMap* map = new OopMap((int)frame_size_in_bytes / VMRegImpl::stack_slot_size, 0 /* arg_slots*/); 1621 1622 __ ld_ptr(R0, JavaThread::cont_entry_offset(), R16_thread); 1623 __ st_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread); 1624 __ st_ptr(R0, ContinuationEntry::parent_offset(), R1_SP); 1625 DEBUG_ONLY(__ block_comment("} setup")); 1626 1627 return map; 1628 } 1629 1630 //---------------------------- fill_continuation_entry --------------------------- 1631 // 1632 // Initialize the new ContinuationEntry. 1633 // 1634 // Arguments: 1635 // R1_SP: pointer to blank Continuation entry 1636 // reg_cont_obj: pointer to the continuation 1637 // reg_flags: flags 1638 // 1639 // Results: 1640 // R1_SP: pointer to filled out ContinuationEntry 1641 // 1642 // Kills: 1643 // R8_ARG6, R9_ARG7, R10_ARG8 1644 // 1645 static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, Register reg_flags) { 1646 assert_different_registers(reg_cont_obj, reg_flags); 1647 Register zero = R8_ARG6; 1648 Register tmp2 = R9_ARG7; 1649 Register tmp3 = R10_ARG8; 1650 1651 DEBUG_ONLY(__ block_comment("fill {")); 1652 #ifdef ASSERT 1653 __ load_const_optimized(tmp2, ContinuationEntry::cookie_value()); 1654 __ stw(tmp2, in_bytes(ContinuationEntry::cookie_offset()), R1_SP); 1655 #endif //ASSERT 1656 1657 __ li(zero, 0); 1658 __ st_ptr(reg_cont_obj, ContinuationEntry::cont_offset(), R1_SP); 1659 __ stw(reg_flags, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1660 __ st_ptr(zero, ContinuationEntry::chunk_offset(), R1_SP); 1661 __ stw(zero, in_bytes(ContinuationEntry::argsize_offset()), R1_SP); 1662 __ stw(zero, in_bytes(ContinuationEntry::pin_count_offset()), R1_SP); 1663 1664 __ ld_ptr(tmp2, JavaThread::cont_fastpath_offset(), R16_thread); 1665 __ ld(tmp3, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1666 __ st_ptr(tmp2, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP); 1667 __ std(tmp3, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP); 1668 1669 __ st_ptr(zero, JavaThread::cont_fastpath_offset(), R16_thread); 1670 __ std(zero, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1671 DEBUG_ONLY(__ block_comment("} fill")); 1672 } 1673 1674 //---------------------------- continuation_enter_cleanup --------------------------- 1675 // 1676 // Copy corresponding attributes from the top ContinuationEntry to the JavaThread 1677 // before deleting it. 1678 // 1679 // Arguments: 1680 // R1_SP: pointer to the ContinuationEntry 1681 // 1682 // Results: 1683 // None. 1684 // 1685 // Kills: 1686 // R8_ARG6, R9_ARG7, R10_ARG8, R15_esp 1687 // 1688 static void continuation_enter_cleanup(MacroAssembler* masm) { 1689 Register tmp1 = R8_ARG6; 1690 Register tmp2 = R9_ARG7; 1691 Register tmp3 = R10_ARG8; 1692 1693 #ifdef ASSERT 1694 __ block_comment("clean {"); 1695 __ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread); 1696 __ cmpd(CR0, R1_SP, tmp1); 1697 __ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP"); 1698 #endif 1699 1700 __ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP); 1701 __ st_ptr(tmp1, JavaThread::cont_fastpath_offset(), R16_thread); 1702 1703 if (CheckJNICalls) { 1704 // Check if this is a virtual thread continuation 1705 Label L_skip_vthread_code; 1706 __ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1707 __ cmpwi(CR0, R0, 0); 1708 __ beq(CR0, L_skip_vthread_code); 1709 1710 // If the held monitor count is > 0 and this vthread is terminating then 1711 // it failed to release a JNI monitor. So we issue the same log message 1712 // that JavaThread::exit does. 1713 __ ld(R0, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1714 __ cmpdi(CR0, R0, 0); 1715 __ beq(CR0, L_skip_vthread_code); 1716 1717 // Save return value potentially containing the exception oop 1718 Register ex_oop = R15_esp; // nonvolatile register 1719 __ mr(ex_oop, R3_RET); 1720 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held)); 1721 // Restore potental return value 1722 __ mr(R3_RET, ex_oop); 1723 1724 // For vthreads we have to explicitly zero the JNI monitor count of the carrier 1725 // on termination. The held count is implicitly zeroed below when we restore from 1726 // the parent held count (which has to be zero). 1727 __ li(tmp1, 0); 1728 __ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1729 1730 __ bind(L_skip_vthread_code); 1731 } 1732 #ifdef ASSERT 1733 else { 1734 // Check if this is a virtual thread continuation 1735 Label L_skip_vthread_code; 1736 __ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1737 __ cmpwi(CR0, R0, 0); 1738 __ beq(CR0, L_skip_vthread_code); 1739 1740 // See comment just above. If not checking JNI calls the JNI count is only 1741 // needed for assertion checking. 1742 __ li(tmp1, 0); 1743 __ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1744 1745 __ bind(L_skip_vthread_code); 1746 } 1747 #endif 1748 1749 __ ld(tmp2, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP); 1750 __ ld_ptr(tmp3, ContinuationEntry::parent_offset(), R1_SP); 1751 __ std(tmp2, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1752 __ st_ptr(tmp3, JavaThread::cont_entry_offset(), R16_thread); 1753 DEBUG_ONLY(__ block_comment("} clean")); 1754 } 1755 1756 static void check_continuation_enter_argument(VMReg actual_vmreg, 1757 Register expected_reg, 1758 const char* name) { 1759 assert(!actual_vmreg->is_stack(), "%s cannot be on stack", name); 1760 assert(actual_vmreg->as_Register() == expected_reg, 1761 "%s is in unexpected register: %s instead of %s", 1762 name, actual_vmreg->as_Register()->name(), expected_reg->name()); 1763 } 1764 1765 static void gen_continuation_enter(MacroAssembler* masm, 1766 const VMRegPair* regs, 1767 int& exception_offset, 1768 OopMapSet* oop_maps, 1769 int& frame_complete, 1770 int& framesize_words, 1771 int& interpreted_entry_offset, 1772 int& compiled_entry_offset) { 1773 1774 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread) 1775 int pos_cont_obj = 0; 1776 int pos_is_cont = 1; 1777 int pos_is_virtual = 2; 1778 1779 // The platform-specific calling convention may present the arguments in various registers. 1780 // To simplify the rest of the code, we expect the arguments to reside at these known 1781 // registers, and we additionally check the placement here in case calling convention ever 1782 // changes. 1783 Register reg_cont_obj = R3_ARG1; 1784 Register reg_is_cont = R4_ARG2; 1785 Register reg_is_virtual = R5_ARG3; 1786 1787 check_continuation_enter_argument(regs[pos_cont_obj].first(), reg_cont_obj, "Continuation object"); 1788 check_continuation_enter_argument(regs[pos_is_cont].first(), reg_is_cont, "isContinue"); 1789 check_continuation_enter_argument(regs[pos_is_virtual].first(), reg_is_virtual, "isVirtualThread"); 1790 1791 address resolve_static_call = SharedRuntime::get_resolve_static_call_stub(); 1792 1793 address start = __ pc(); 1794 1795 Label L_thaw, L_exit; 1796 1797 // i2i entry used at interp_only_mode only 1798 interpreted_entry_offset = __ pc() - start; 1799 { 1800 #ifdef ASSERT 1801 Label is_interp_only; 1802 __ lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread); 1803 __ cmpwi(CR0, R0, 0); 1804 __ bne(CR0, is_interp_only); 1805 __ stop("enterSpecial interpreter entry called when not in interp_only_mode"); 1806 __ bind(is_interp_only); 1807 #endif 1808 1809 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter) 1810 __ ld(reg_cont_obj, Interpreter::stackElementSize*3, R15_esp); 1811 __ lwz(reg_is_cont, Interpreter::stackElementSize*2, R15_esp); 1812 __ lwz(reg_is_virtual, Interpreter::stackElementSize*1, R15_esp); 1813 1814 __ push_cont_fastpath(); 1815 1816 OopMap* map = continuation_enter_setup(masm, framesize_words); 1817 1818 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe, 1819 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway. 1820 1821 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual); 1822 1823 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue) 1824 __ cmpwi(CR0, reg_is_cont, 0); 1825 __ bne(CR0, L_thaw); 1826 1827 // --- call Continuation.enter(Continuation c, boolean isContinue) 1828 1829 // Emit compiled static call. The call will be always resolved to the c2i 1830 // entry of Continuation.enter(Continuation c, boolean isContinue). 1831 // There are special cases in SharedRuntime::resolve_static_call_C() and 1832 // SharedRuntime::resolve_sub_helper_internal() to achieve this 1833 // See also corresponding call below. 1834 address c2i_call_pc = __ pc(); 1835 int start_offset = __ offset(); 1836 // Put the entry point as a constant into the constant pool. 1837 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none); 1838 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 1839 guarantee(entry_point_toc_addr != nullptr, "const section overflow"); 1840 1841 // Emit the trampoline stub which will be related to the branch-and-link below. 1842 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset); 1843 guarantee(stub != nullptr, "no space for trampoline stub"); 1844 1845 __ relocate(relocInfo::static_call_type); 1846 // Note: At this point we do not have the address of the trampoline 1847 // stub, and the entry point might be too far away for bl, so __ pc() 1848 // serves as dummy and the bl will be patched later. 1849 __ bl(__ pc()); 1850 oop_maps->add_gc_map(__ pc() - start, map); 1851 __ post_call_nop(); 1852 1853 __ b(L_exit); 1854 1855 // static stub for the call above 1856 stub = CompiledDirectCall::emit_to_interp_stub(masm, c2i_call_pc); 1857 guarantee(stub != nullptr, "no space for static stub"); 1858 } 1859 1860 // compiled entry 1861 __ align(CodeEntryAlignment); 1862 compiled_entry_offset = __ pc() - start; 1863 1864 OopMap* map = continuation_enter_setup(masm, framesize_words); 1865 1866 // Frame is now completed as far as size and linkage. 1867 frame_complete =__ pc() - start; 1868 1869 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual); 1870 1871 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue) 1872 __ cmpwi(CR0, reg_is_cont, 0); 1873 __ bne(CR0, L_thaw); 1874 1875 // --- call Continuation.enter(Continuation c, boolean isContinue) 1876 1877 // Emit compiled static call 1878 // The call needs to be resolved. There's a special case for this in 1879 // SharedRuntime::find_callee_info_helper() which calls 1880 // LinkResolver::resolve_continuation_enter() which resolves the call to 1881 // Continuation.enter(Continuation c, boolean isContinue). 1882 address call_pc = __ pc(); 1883 int start_offset = __ offset(); 1884 // Put the entry point as a constant into the constant pool. 1885 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none); 1886 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 1887 guarantee(entry_point_toc_addr != nullptr, "const section overflow"); 1888 1889 // Emit the trampoline stub which will be related to the branch-and-link below. 1890 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset); 1891 guarantee(stub != nullptr, "no space for trampoline stub"); 1892 1893 __ relocate(relocInfo::static_call_type); 1894 // Note: At this point we do not have the address of the trampoline 1895 // stub, and the entry point might be too far away for bl, so __ pc() 1896 // serves as dummy and the bl will be patched later. 1897 __ bl(__ pc()); 1898 oop_maps->add_gc_map(__ pc() - start, map); 1899 __ post_call_nop(); 1900 1901 __ b(L_exit); 1902 1903 // --- Thawing path 1904 1905 __ bind(L_thaw); 1906 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start; 1907 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(StubRoutines::cont_thaw())); 1908 __ mtctr(R0); 1909 __ bctrl(); 1910 oop_maps->add_gc_map(__ pc() - start, map->deep_copy()); 1911 ContinuationEntry::_return_pc_offset = __ pc() - start; 1912 __ post_call_nop(); 1913 1914 // --- Normal exit (resolve/thawing) 1915 1916 __ bind(L_exit); 1917 ContinuationEntry::_cleanup_offset = __ pc() - start; 1918 continuation_enter_cleanup(masm); 1919 1920 // Pop frame and return 1921 DEBUG_ONLY(__ ld_ptr(R0, 0, R1_SP)); 1922 __ addi(R1_SP, R1_SP, framesize_words*wordSize); 1923 DEBUG_ONLY(__ cmpd(CR0, R0, R1_SP)); 1924 __ asm_assert_eq(FILE_AND_LINE ": inconsistent frame size"); 1925 __ ld(R0, _abi0(lr), R1_SP); // Return pc 1926 __ mtlr(R0); 1927 __ blr(); 1928 1929 // --- Exception handling path 1930 1931 exception_offset = __ pc() - start; 1932 1933 continuation_enter_cleanup(masm); 1934 Register ex_pc = R17_tos; // nonvolatile register 1935 Register ex_oop = R15_esp; // nonvolatile register 1936 __ ld(ex_pc, _abi0(callers_sp), R1_SP); // Load caller's return pc 1937 __ ld(ex_pc, _abi0(lr), ex_pc); 1938 __ mr(ex_oop, R3_RET); // save return value containing the exception oop 1939 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, ex_pc); 1940 __ mtlr(R3_RET); // the exception handler 1941 __ ld(R1_SP, _abi0(callers_sp), R1_SP); // remove enterSpecial frame 1942 1943 // Continue at exception handler 1944 // See OptoRuntime::generate_exception_blob for register arguments 1945 __ mr(R3_ARG1, ex_oop); // pass exception oop 1946 __ mr(R4_ARG2, ex_pc); // pass exception pc 1947 __ blr(); 1948 1949 // static stub for the call above 1950 stub = CompiledDirectCall::emit_to_interp_stub(masm, call_pc); 1951 guarantee(stub != nullptr, "no space for static stub"); 1952 } 1953 1954 static void gen_continuation_yield(MacroAssembler* masm, 1955 const VMRegPair* regs, 1956 OopMapSet* oop_maps, 1957 int& frame_complete, 1958 int& framesize_words, 1959 int& compiled_entry_offset) { 1960 Register tmp = R10_ARG8; 1961 1962 const int framesize_bytes = (int)align_up((int)frame::native_abi_reg_args_size, frame::alignment_in_bytes); 1963 framesize_words = framesize_bytes / wordSize; 1964 1965 address start = __ pc(); 1966 compiled_entry_offset = __ pc() - start; 1967 1968 // Save return pc and push entry frame 1969 __ mflr(tmp); 1970 __ std(tmp, _abi0(lr), R1_SP); // SP->lr = return_pc 1971 __ push_frame(framesize_bytes , R0); // SP -= frame_size_in_bytes 1972 1973 DEBUG_ONLY(__ block_comment("Frame Complete")); 1974 frame_complete = __ pc() - start; 1975 address last_java_pc = __ pc(); 1976 1977 // This nop must be exactly at the PC we push into the frame info. 1978 // We use this nop for fast CodeBlob lookup, associate the OopMap 1979 // with it right away. 1980 __ post_call_nop(); 1981 OopMap* map = new OopMap(framesize_bytes / VMRegImpl::stack_slot_size, 1); 1982 oop_maps->add_gc_map(last_java_pc - start, map); 1983 1984 __ calculate_address_from_global_toc(tmp, last_java_pc); // will be relocated 1985 __ set_last_Java_frame(R1_SP, tmp); 1986 __ call_VM_leaf(Continuation::freeze_entry(), R16_thread, R1_SP); 1987 __ reset_last_Java_frame(); 1988 1989 Label L_pinned; 1990 1991 __ cmpwi(CR0, R3_RET, 0); 1992 __ bne(CR0, L_pinned); 1993 1994 // yield succeeded 1995 1996 // Pop frames of continuation including this stub's frame 1997 __ ld_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread); 1998 // The frame pushed by gen_continuation_enter is on top now again 1999 continuation_enter_cleanup(masm); 2000 2001 // Pop frame and return 2002 Label L_return; 2003 __ bind(L_return); 2004 __ pop_frame(); 2005 __ ld(R0, _abi0(lr), R1_SP); // Return pc 2006 __ mtlr(R0); 2007 __ blr(); 2008 2009 // yield failed - continuation is pinned 2010 2011 __ bind(L_pinned); 2012 2013 // handle pending exception thrown by freeze 2014 __ ld(tmp, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 2015 __ cmpdi(CR0, tmp, 0); 2016 __ beq(CR0, L_return); // return if no exception is pending 2017 __ pop_frame(); 2018 __ ld(R0, _abi0(lr), R1_SP); // Return pc 2019 __ mtlr(R0); 2020 __ load_const_optimized(tmp, StubRoutines::forward_exception_entry(), R0); 2021 __ mtctr(tmp); 2022 __ bctr(); 2023 } 2024 2025 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) { 2026 ::continuation_enter_cleanup(masm); 2027 } 2028 2029 // --------------------------------------------------------------------------- 2030 // Generate a native wrapper for a given method. The method takes arguments 2031 // in the Java compiled code convention, marshals them to the native 2032 // convention (handlizes oops, etc), transitions to native, makes the call, 2033 // returns to java state (possibly blocking), unhandlizes any result and 2034 // returns. 2035 // 2036 // Critical native functions are a shorthand for the use of 2037 // GetPrimtiveArrayCritical and disallow the use of any other JNI 2038 // functions. The wrapper is expected to unpack the arguments before 2039 // passing them to the callee. Critical native functions leave the state _in_Java, 2040 // since they cannot stop for GC. 2041 // Some other parts of JNI setup are skipped like the tear down of the JNI handle 2042 // block and the check for pending exceptions it's impossible for them 2043 // to be thrown. 2044 // 2045 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, 2046 const methodHandle& method, 2047 int compile_id, 2048 BasicType *in_sig_bt, 2049 VMRegPair *in_regs, 2050 BasicType ret_type) { 2051 if (method->is_continuation_native_intrinsic()) { 2052 int exception_offset = -1; 2053 OopMapSet* oop_maps = new OopMapSet(); 2054 int frame_complete = -1; 2055 int stack_slots = -1; 2056 int interpreted_entry_offset = -1; 2057 int vep_offset = -1; 2058 if (method->is_continuation_enter_intrinsic()) { 2059 gen_continuation_enter(masm, 2060 in_regs, 2061 exception_offset, 2062 oop_maps, 2063 frame_complete, 2064 stack_slots, 2065 interpreted_entry_offset, 2066 vep_offset); 2067 } else if (method->is_continuation_yield_intrinsic()) { 2068 gen_continuation_yield(masm, 2069 in_regs, 2070 oop_maps, 2071 frame_complete, 2072 stack_slots, 2073 vep_offset); 2074 } else { 2075 guarantee(false, "Unknown Continuation native intrinsic"); 2076 } 2077 2078 #ifdef ASSERT 2079 if (method->is_continuation_enter_intrinsic()) { 2080 assert(interpreted_entry_offset != -1, "Must be set"); 2081 assert(exception_offset != -1, "Must be set"); 2082 } else { 2083 assert(interpreted_entry_offset == -1, "Must be unset"); 2084 assert(exception_offset == -1, "Must be unset"); 2085 } 2086 assert(frame_complete != -1, "Must be set"); 2087 assert(stack_slots != -1, "Must be set"); 2088 assert(vep_offset != -1, "Must be set"); 2089 #endif 2090 2091 __ flush(); 2092 nmethod* nm = nmethod::new_native_nmethod(method, 2093 compile_id, 2094 masm->code(), 2095 vep_offset, 2096 frame_complete, 2097 stack_slots, 2098 in_ByteSize(-1), 2099 in_ByteSize(-1), 2100 oop_maps, 2101 exception_offset); 2102 if (nm == nullptr) return nm; 2103 if (method->is_continuation_enter_intrinsic()) { 2104 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset); 2105 } else if (method->is_continuation_yield_intrinsic()) { 2106 _cont_doYield_stub = nm; 2107 } 2108 return nm; 2109 } 2110 2111 if (method->is_method_handle_intrinsic()) { 2112 vmIntrinsics::ID iid = method->intrinsic_id(); 2113 intptr_t start = (intptr_t)__ pc(); 2114 int vep_offset = ((intptr_t)__ pc()) - start; 2115 gen_special_dispatch(masm, 2116 method, 2117 in_sig_bt, 2118 in_regs); 2119 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 2120 __ flush(); 2121 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 2122 return nmethod::new_native_nmethod(method, 2123 compile_id, 2124 masm->code(), 2125 vep_offset, 2126 frame_complete, 2127 stack_slots / VMRegImpl::slots_per_word, 2128 in_ByteSize(-1), 2129 in_ByteSize(-1), 2130 (OopMapSet*)nullptr); 2131 } 2132 2133 address native_func = method->native_function(); 2134 assert(native_func != nullptr, "must have function"); 2135 2136 // First, create signature for outgoing C call 2137 // -------------------------------------------------------------------------- 2138 2139 int total_in_args = method->size_of_parameters(); 2140 // We have received a description of where all the java args are located 2141 // on entry to the wrapper. We need to convert these args to where 2142 // the jni function will expect them. To figure out where they go 2143 // we convert the java signature to a C signature by inserting 2144 // the hidden arguments as arg[0] and possibly arg[1] (static method) 2145 2146 // Calculate the total number of C arguments and create arrays for the 2147 // signature and the outgoing registers. 2148 // On ppc64, we have two arrays for the outgoing registers, because 2149 // some floating-point arguments must be passed in registers _and_ 2150 // in stack locations. 2151 bool method_is_static = method->is_static(); 2152 int total_c_args = total_in_args + (method_is_static ? 2 : 1); 2153 2154 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 2155 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 2156 2157 // Create the signature for the C call: 2158 // 1) add the JNIEnv* 2159 // 2) add the class if the method is static 2160 // 3) copy the rest of the incoming signature (shifted by the number of 2161 // hidden arguments). 2162 2163 int argc = 0; 2164 out_sig_bt[argc++] = T_ADDRESS; 2165 if (method->is_static()) { 2166 out_sig_bt[argc++] = T_OBJECT; 2167 } 2168 2169 for (int i = 0; i < total_in_args ; i++ ) { 2170 out_sig_bt[argc++] = in_sig_bt[i]; 2171 } 2172 2173 2174 // Compute the wrapper's frame size. 2175 // -------------------------------------------------------------------------- 2176 2177 // Now figure out where the args must be stored and how much stack space 2178 // they require. 2179 // 2180 // Compute framesize for the wrapper. We need to handlize all oops in 2181 // incoming registers. 2182 // 2183 // Calculate the total number of stack slots we will need: 2184 // 1) abi requirements 2185 // 2) outgoing arguments 2186 // 3) space for inbound oop handle area 2187 // 4) space for handlizing a klass if static method 2188 // 5) space for a lock if synchronized method 2189 // 6) workspace for saving return values, int <-> float reg moves, etc. 2190 // 7) alignment 2191 // 2192 // Layout of the native wrapper frame: 2193 // (stack grows upwards, memory grows downwards) 2194 // 2195 // NW [ABI_REG_ARGS] <-- 1) R1_SP 2196 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset 2197 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset 2198 // klass <-- 4) R1_SP + klass_offset 2199 // lock <-- 5) R1_SP + lock_offset 2200 // [workspace] <-- 6) R1_SP + workspace_offset 2201 // [alignment] (optional) <-- 7) 2202 // caller [JIT_TOP_ABI_48] <-- r_callers_sp 2203 // 2204 // - *_slot_offset Indicates offset from SP in number of stack slots. 2205 // - *_offset Indicates offset from SP in bytes. 2206 2207 int stack_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args) + // 1+2) 2208 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention. 2209 2210 // Now the space for the inbound oop handle area. 2211 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word; 2212 2213 int oop_handle_slot_offset = stack_slots; 2214 stack_slots += total_save_slots; // 3) 2215 2216 int klass_slot_offset = 0; 2217 int klass_offset = -1; 2218 if (method_is_static) { // 4) 2219 klass_slot_offset = stack_slots; 2220 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2221 stack_slots += VMRegImpl::slots_per_word; 2222 } 2223 2224 int lock_slot_offset = 0; 2225 int lock_offset = -1; 2226 if (method->is_synchronized()) { // 5) 2227 lock_slot_offset = stack_slots; 2228 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size; 2229 stack_slots += VMRegImpl::slots_per_word; 2230 } 2231 2232 int workspace_slot_offset = stack_slots; // 6) 2233 stack_slots += 2; 2234 2235 // Now compute actual number of stack words we need. 2236 // Rounding to make stack properly aligned. 2237 stack_slots = align_up(stack_slots, // 7) 2238 frame::alignment_in_bytes / VMRegImpl::stack_slot_size); 2239 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size; 2240 2241 2242 // Now we can start generating code. 2243 // -------------------------------------------------------------------------- 2244 2245 intptr_t start_pc = (intptr_t)__ pc(); 2246 intptr_t vep_start_pc; 2247 intptr_t frame_done_pc; 2248 2249 Label handle_pending_exception; 2250 Label last_java_pc; 2251 2252 Register r_callers_sp = R21; 2253 Register r_temp_1 = R22; 2254 Register r_temp_2 = R23; 2255 Register r_temp_3 = R24; 2256 Register r_temp_4 = R25; 2257 Register r_temp_5 = R26; 2258 Register r_temp_6 = R27; 2259 Register r_last_java_pc = R28; 2260 2261 Register r_carg1_jnienv = noreg; 2262 Register r_carg2_classorobject = noreg; 2263 r_carg1_jnienv = out_regs[0].first()->as_Register(); 2264 r_carg2_classorobject = out_regs[1].first()->as_Register(); 2265 2266 2267 // Generate the Unverified Entry Point (UEP). 2268 // -------------------------------------------------------------------------- 2269 assert(start_pc == (intptr_t)__ pc(), "uep must be at start"); 2270 2271 // Check ic: object class == cached class? 2272 if (!method_is_static) { 2273 __ ic_check(4 /* end_alignment */); 2274 } 2275 2276 // Generate the Verified Entry Point (VEP). 2277 // -------------------------------------------------------------------------- 2278 vep_start_pc = (intptr_t)__ pc(); 2279 2280 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { 2281 Label L_skip_barrier; 2282 Register klass = r_temp_1; 2283 // Notify OOP recorder (don't need the relocation) 2284 AddressLiteral md = __ constant_metadata_address(method->method_holder()); 2285 __ load_const_optimized(klass, md.value(), R0); 2286 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 2287 2288 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 2289 __ mtctr(klass); 2290 __ bctr(); 2291 2292 __ bind(L_skip_barrier); 2293 } 2294 2295 __ save_LR(r_temp_1); 2296 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. 2297 __ mr(r_callers_sp, R1_SP); // Remember frame pointer. 2298 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame. 2299 2300 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2301 bs->nmethod_entry_barrier(masm, r_temp_1); 2302 2303 frame_done_pc = (intptr_t)__ pc(); 2304 2305 // Native nmethod wrappers never take possession of the oop arguments. 2306 // So the caller will gc the arguments. 2307 // The only thing we need an oopMap for is if the call is static. 2308 // 2309 // An OopMap for lock (and class if static), and one for the VM call itself. 2310 OopMapSet *oop_maps = new OopMapSet(); 2311 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2312 2313 // Move arguments from register/stack to register/stack. 2314 // -------------------------------------------------------------------------- 2315 // 2316 // We immediately shuffle the arguments so that for any vm call we have 2317 // to make from here on out (sync slow path, jvmti, etc.) we will have 2318 // captured the oops from our caller and have a valid oopMap for them. 2319 // 2320 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2321 // (derived from JavaThread* which is in R16_thread) and, if static, 2322 // the class mirror instead of a receiver. This pretty much guarantees that 2323 // register layout will not match. We ignore these extra arguments during 2324 // the shuffle. The shuffle is described by the two calling convention 2325 // vectors we have in our possession. We simply walk the java vector to 2326 // get the source locations and the c vector to get the destinations. 2327 2328 // Record sp-based slot for receiver on stack for non-static methods. 2329 int receiver_offset = -1; 2330 2331 // We move the arguments backward because the floating point registers 2332 // destination will always be to a register with a greater or equal 2333 // register number or the stack. 2334 // in is the index of the incoming Java arguments 2335 // out is the index of the outgoing C arguments 2336 2337 #ifdef ASSERT 2338 bool reg_destroyed[Register::number_of_registers]; 2339 bool freg_destroyed[FloatRegister::number_of_registers]; 2340 for (int r = 0 ; r < Register::number_of_registers ; r++) { 2341 reg_destroyed[r] = false; 2342 } 2343 for (int f = 0 ; f < FloatRegister::number_of_registers ; f++) { 2344 freg_destroyed[f] = false; 2345 } 2346 #endif // ASSERT 2347 2348 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) { 2349 2350 #ifdef ASSERT 2351 if (in_regs[in].first()->is_Register()) { 2352 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!"); 2353 } else if (in_regs[in].first()->is_FloatRegister()) { 2354 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!"); 2355 } 2356 if (out_regs[out].first()->is_Register()) { 2357 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true; 2358 } else if (out_regs[out].first()->is_FloatRegister()) { 2359 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true; 2360 } 2361 #endif // ASSERT 2362 2363 switch (in_sig_bt[in]) { 2364 case T_BOOLEAN: 2365 case T_CHAR: 2366 case T_BYTE: 2367 case T_SHORT: 2368 case T_INT: 2369 // Move int and do sign extension. 2370 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2371 break; 2372 case T_LONG: 2373 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2374 break; 2375 case T_ARRAY: 2376 case T_OBJECT: 2377 object_move(masm, stack_slots, 2378 oop_map, oop_handle_slot_offset, 2379 ((in == 0) && (!method_is_static)), &receiver_offset, 2380 in_regs[in], out_regs[out], 2381 r_callers_sp, r_temp_1, r_temp_2); 2382 break; 2383 case T_VOID: 2384 break; 2385 case T_FLOAT: 2386 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2387 break; 2388 case T_DOUBLE: 2389 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2390 break; 2391 case T_ADDRESS: 2392 fatal("found type (T_ADDRESS) in java args"); 2393 break; 2394 default: 2395 ShouldNotReachHere(); 2396 break; 2397 } 2398 } 2399 2400 // Pre-load a static method's oop into ARG2. 2401 // Used both by locking code and the normal JNI call code. 2402 if (method_is_static) { 2403 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), 2404 r_carg2_classorobject); 2405 2406 // Now handlize the static class mirror in carg2. It's known not-null. 2407 __ std(r_carg2_classorobject, klass_offset, R1_SP); 2408 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2409 __ addi(r_carg2_classorobject, R1_SP, klass_offset); 2410 } 2411 2412 // Get JNIEnv* which is first argument to native. 2413 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset())); 2414 2415 // NOTE: 2416 // 2417 // We have all of the arguments setup at this point. 2418 // We MUST NOT touch any outgoing regs from this point on. 2419 // So if we must call out we must push a new frame. 2420 2421 // The last java pc will also be used as resume pc if this is the wrapper for wait0. 2422 // For this purpose the precise location matters but not for oopmap lookup. 2423 __ calculate_address_from_global_toc(r_last_java_pc, last_java_pc, true, true, true, true); 2424 2425 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. 2426 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); 2427 2428 # if 0 2429 // DTrace method entry 2430 # endif 2431 2432 // Lock a synchronized method. 2433 // -------------------------------------------------------------------------- 2434 2435 if (method->is_synchronized()) { 2436 Register r_oop = r_temp_4; 2437 const Register r_box = r_temp_5; 2438 Label done, locked; 2439 2440 // Load the oop for the object or class. r_carg2_classorobject contains 2441 // either the handlized oop from the incoming arguments or the handlized 2442 // class mirror (if the method is static). 2443 __ ld(r_oop, 0, r_carg2_classorobject); 2444 2445 // Get the lock box slot's address. 2446 __ addi(r_box, R1_SP, lock_offset); 2447 2448 // Try fastpath for locking. 2449 if (LockingMode == LM_LIGHTWEIGHT) { 2450 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2451 Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg; 2452 __ compiler_fast_lock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg); 2453 } else { 2454 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2455 __ compiler_fast_lock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2456 } 2457 __ beq(CR0, locked); 2458 2459 // None of the above fast optimizations worked so we have to get into the 2460 // slow case of monitor enter. Inline a special case of call_VM that 2461 // disallows any pending_exception. 2462 2463 // Save argument registers and leave room for C-compatible ABI_REG_ARGS. 2464 int frame_size = frame::native_abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); 2465 __ mr(R11_scratch1, R1_SP); 2466 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs); 2467 2468 // Do the call. 2469 __ set_last_Java_frame(R11_scratch1, r_last_java_pc); 2470 assert(r_last_java_pc->is_nonvolatile(), "r_last_java_pc needs to be preserved accross complete_monitor_locking_C call"); 2471 // The following call will not be preempted. 2472 // push_cont_fastpath forces freeze slow path in case we try to preempt where we will pin the 2473 // vthread to the carrier (see FreezeBase::recurse_freeze_native_frame()). 2474 __ push_cont_fastpath(); 2475 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); 2476 __ pop_cont_fastpath(); 2477 __ reset_last_Java_frame(); 2478 2479 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs); 2480 2481 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2482 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C"); 2483 2484 __ bind(locked); 2485 } 2486 2487 __ set_last_Java_frame(R1_SP, r_last_java_pc); 2488 2489 // Publish thread state 2490 // -------------------------------------------------------------------------- 2491 2492 // Transition from _thread_in_Java to _thread_in_native. 2493 __ li(R0, _thread_in_native); 2494 __ release(); 2495 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2496 __ stw(R0, thread_(thread_state)); 2497 2498 2499 // The JNI call 2500 // -------------------------------------------------------------------------- 2501 __ call_c(native_func, relocInfo::runtime_call_type); 2502 2503 2504 // Now, we are back from the native code. 2505 2506 2507 // Unpack the native result. 2508 // -------------------------------------------------------------------------- 2509 2510 // For int-types, we do any needed sign-extension required. 2511 // Care must be taken that the return values (R3_RET and F1_RET) 2512 // will survive any VM calls for blocking or unlocking. 2513 // An OOP result (handle) is done specially in the slow-path code. 2514 2515 switch (ret_type) { 2516 case T_VOID: break; // Nothing to do! 2517 case T_FLOAT: break; // Got it where we want it (unless slow-path). 2518 case T_DOUBLE: break; // Got it where we want it (unless slow-path). 2519 case T_LONG: break; // Got it where we want it (unless slow-path). 2520 case T_OBJECT: break; // Really a handle. 2521 // Cannot de-handlize until after reclaiming jvm_lock. 2522 case T_ARRAY: break; 2523 2524 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1) 2525 __ normalize_bool(R3_RET); 2526 break; 2527 } 2528 case T_BYTE: { // sign extension 2529 __ extsb(R3_RET, R3_RET); 2530 break; 2531 } 2532 case T_CHAR: { // unsigned result 2533 __ andi(R3_RET, R3_RET, 0xffff); 2534 break; 2535 } 2536 case T_SHORT: { // sign extension 2537 __ extsh(R3_RET, R3_RET); 2538 break; 2539 } 2540 case T_INT: // nothing to do 2541 break; 2542 default: 2543 ShouldNotReachHere(); 2544 break; 2545 } 2546 2547 // Publish thread state 2548 // -------------------------------------------------------------------------- 2549 2550 // Switch thread to "native transition" state before reading the 2551 // synchronization state. This additional state is necessary because reading 2552 // and testing the synchronization state is not atomic w.r.t. GC, as this 2553 // scenario demonstrates: 2554 // - Java thread A, in _thread_in_native state, loads _not_synchronized 2555 // and is preempted. 2556 // - VM thread changes sync state to synchronizing and suspends threads 2557 // for GC. 2558 // - Thread A is resumed to finish this native method, but doesn't block 2559 // here since it didn't see any synchronization in progress, and escapes. 2560 2561 // Transition from _thread_in_native to _thread_in_native_trans. 2562 __ li(R0, _thread_in_native_trans); 2563 __ release(); 2564 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2565 __ stw(R0, thread_(thread_state)); 2566 2567 2568 // Must we block? 2569 // -------------------------------------------------------------------------- 2570 2571 // Block, if necessary, before resuming in _thread_in_Java state. 2572 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2573 { 2574 Label no_block, sync; 2575 2576 // Force this write out before the read below. 2577 if (!UseSystemMemoryBarrier) { 2578 __ fence(); 2579 } 2580 2581 Register sync_state_addr = r_temp_4; 2582 Register sync_state = r_temp_5; 2583 Register suspend_flags = r_temp_6; 2584 2585 // No synchronization in progress nor yet synchronized 2586 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 2587 __ safepoint_poll(sync, sync_state, true /* at_return */, false /* in_nmethod */); 2588 2589 // Not suspended. 2590 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 2591 __ lwz(suspend_flags, thread_(suspend_flags)); 2592 __ cmpwi(CR1, suspend_flags, 0); 2593 __ beq(CR1, no_block); 2594 2595 // Block. Save any potential method result value before the operation and 2596 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2597 // lets us share the oopMap we used when we went native rather than create 2598 // a distinct one for this pc. 2599 __ bind(sync); 2600 __ isync(); 2601 2602 address entry_point = 2603 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans); 2604 save_native_result(masm, ret_type, workspace_slot_offset); 2605 __ call_VM_leaf(entry_point, R16_thread); 2606 restore_native_result(masm, ret_type, workspace_slot_offset); 2607 2608 __ bind(no_block); 2609 2610 // Publish thread state. 2611 // -------------------------------------------------------------------------- 2612 2613 // Thread state is thread_in_native_trans. Any safepoint blocking has 2614 // already happened so we can now change state to _thread_in_Java. 2615 2616 // Transition from _thread_in_native_trans to _thread_in_Java. 2617 __ li(R0, _thread_in_Java); 2618 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 2619 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2620 __ stw(R0, thread_(thread_state)); 2621 2622 // Check preemption for Object.wait() 2623 if (LockingMode != LM_LEGACY && method->is_object_wait0()) { 2624 Label not_preempted; 2625 __ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); 2626 __ cmpdi(CR0, R0, 0); 2627 __ beq(CR0, not_preempted); 2628 __ mtlr(R0); 2629 __ li(R0, 0); 2630 __ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); 2631 __ blr(); 2632 __ bind(not_preempted); 2633 } 2634 __ bind(last_java_pc); 2635 // We use the same pc/oopMap repeatedly when we call out above. 2636 intptr_t oopmap_pc = (intptr_t) __ pc(); 2637 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); 2638 } 2639 2640 // Reguard any pages if necessary. 2641 // -------------------------------------------------------------------------- 2642 2643 Label no_reguard; 2644 __ lwz(r_temp_1, thread_(stack_guard_state)); 2645 __ cmpwi(CR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled); 2646 __ bne(CR0, no_reguard); 2647 2648 save_native_result(masm, ret_type, workspace_slot_offset); 2649 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2650 restore_native_result(masm, ret_type, workspace_slot_offset); 2651 2652 __ bind(no_reguard); 2653 2654 2655 // Unlock 2656 // -------------------------------------------------------------------------- 2657 2658 if (method->is_synchronized()) { 2659 const Register r_oop = r_temp_4; 2660 const Register r_box = r_temp_5; 2661 const Register r_exception = r_temp_6; 2662 Label done; 2663 2664 // Get oop and address of lock object box. 2665 if (method_is_static) { 2666 assert(klass_offset != -1, ""); 2667 __ ld(r_oop, klass_offset, R1_SP); 2668 } else { 2669 assert(receiver_offset != -1, ""); 2670 __ ld(r_oop, receiver_offset, R1_SP); 2671 } 2672 __ addi(r_box, R1_SP, lock_offset); 2673 2674 // Try fastpath for unlocking. 2675 if (LockingMode == LM_LIGHTWEIGHT) { 2676 __ compiler_fast_unlock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2677 } else { 2678 __ compiler_fast_unlock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2679 } 2680 __ beq(CR0, done); 2681 2682 // Save and restore any potential method result value around the unlocking operation. 2683 save_native_result(masm, ret_type, workspace_slot_offset); 2684 2685 // Must save pending exception around the slow-path VM call. Since it's a 2686 // leaf call, the pending exception (if any) can be kept in a register. 2687 __ ld(r_exception, thread_(pending_exception)); 2688 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile"); 2689 __ li(R0, 0); 2690 __ std(R0, thread_(pending_exception)); 2691 2692 // Slow case of monitor enter. 2693 // Inline a special case of call_VM that disallows any pending_exception. 2694 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread). 2695 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread); 2696 2697 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2698 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C"); 2699 2700 restore_native_result(masm, ret_type, workspace_slot_offset); 2701 2702 // Check_forward_pending_exception jump to forward_exception if any pending 2703 // exception is set. The forward_exception routine expects to see the 2704 // exception in pending_exception and not in a register. Kind of clumsy, 2705 // since all folks who branch to forward_exception must have tested 2706 // pending_exception first and hence have it in a register already. 2707 __ std(r_exception, thread_(pending_exception)); 2708 2709 __ bind(done); 2710 } 2711 2712 # if 0 2713 // DTrace method exit 2714 # endif 2715 2716 // Clear "last Java frame" SP and PC. 2717 // -------------------------------------------------------------------------- 2718 2719 // Last java frame won't be set if we're resuming after preemption 2720 bool maybe_preempted = LockingMode != LM_LEGACY && method->is_object_wait0(); 2721 __ reset_last_Java_frame(!maybe_preempted /* check_last_java_sp */); 2722 2723 // Unbox oop result, e.g. JNIHandles::resolve value. 2724 // -------------------------------------------------------------------------- 2725 2726 if (is_reference_type(ret_type)) { 2727 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, MacroAssembler::PRESERVATION_NONE); 2728 } 2729 2730 if (CheckJNICalls) { 2731 // clear_pending_jni_exception_check 2732 __ load_const_optimized(R0, 0L); 2733 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 2734 } 2735 2736 // Reset handle block. 2737 // -------------------------------------------------------------------------- 2738 __ ld(r_temp_1, thread_(active_handles)); 2739 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 2740 __ li(r_temp_2, 0); 2741 __ stw(r_temp_2, in_bytes(JNIHandleBlock::top_offset()), r_temp_1); 2742 2743 // Prepare for return 2744 // -------------------------------------------------------------------------- 2745 __ pop_frame(); 2746 __ restore_LR(R11); 2747 2748 #if INCLUDE_JFR 2749 // We need to do a poll test after unwind in case the sampler 2750 // managed to sample the native frame after returning to Java. 2751 Label L_stub; 2752 int safepoint_offset = __ offset(); 2753 if (!UseSIGTRAP) { 2754 __ relocate(relocInfo::poll_return_type); 2755 } 2756 __ safepoint_poll(L_stub, r_temp_2, true /* at_return */, true /* in_nmethod: frame already popped */); 2757 #endif // INCLUDE_JFR 2758 2759 // Check for pending exceptions. 2760 // -------------------------------------------------------------------------- 2761 __ ld(r_temp_2, thread_(pending_exception)); 2762 __ cmpdi(CR0, r_temp_2, 0); 2763 __ bne(CR0, handle_pending_exception); 2764 2765 // Return. 2766 __ blr(); 2767 2768 // Handler for return safepoint (out-of-line). 2769 #if INCLUDE_JFR 2770 if (!UseSIGTRAP) { 2771 __ bind(L_stub); 2772 __ jump_to_polling_page_return_handler_blob(safepoint_offset); 2773 } 2774 #endif // INCLUDE_JFR 2775 2776 // Handler for pending exceptions (out-of-line). 2777 // -------------------------------------------------------------------------- 2778 // Since this is a native call, we know the proper exception handler 2779 // is the empty function. We just pop this frame and then jump to 2780 // forward_exception_entry. 2781 __ bind(handle_pending_exception); 2782 __ b64_patchable((address)StubRoutines::forward_exception_entry(), 2783 relocInfo::runtime_call_type); 2784 2785 // Done. 2786 // -------------------------------------------------------------------------- 2787 2788 __ flush(); 2789 2790 nmethod *nm = nmethod::new_native_nmethod(method, 2791 compile_id, 2792 masm->code(), 2793 vep_start_pc-start_pc, 2794 frame_done_pc-start_pc, 2795 stack_slots / VMRegImpl::slots_per_word, 2796 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2797 in_ByteSize(lock_offset), 2798 oop_maps); 2799 2800 return nm; 2801 } 2802 2803 // This function returns the adjust size (in number of words) to a c2i adapter 2804 // activation for use during deoptimization. 2805 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2806 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::frame_alignment_in_words); 2807 } 2808 2809 uint SharedRuntime::in_preserve_stack_slots() { 2810 return frame::jit_in_preserve_size / VMRegImpl::stack_slot_size; 2811 } 2812 2813 uint SharedRuntime::out_preserve_stack_slots() { 2814 #if defined(COMPILER1) || defined(COMPILER2) 2815 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size; 2816 #else 2817 return 0; 2818 #endif 2819 } 2820 2821 VMReg SharedRuntime::thread_register() { 2822 // On PPC virtual threads don't save the JavaThread* in their context (e.g. C1 stub frames). 2823 ShouldNotCallThis(); 2824 return nullptr; 2825 } 2826 2827 #if defined(COMPILER1) || defined(COMPILER2) 2828 // Frame generation for deopt and uncommon trap blobs. 2829 static void push_skeleton_frame(MacroAssembler* masm, bool deopt, 2830 /* Read */ 2831 Register unroll_block_reg, 2832 /* Update */ 2833 Register frame_sizes_reg, 2834 Register number_of_frames_reg, 2835 Register pcs_reg, 2836 /* Invalidate */ 2837 Register frame_size_reg, 2838 Register pc_reg) { 2839 2840 __ ld(pc_reg, 0, pcs_reg); 2841 __ ld(frame_size_reg, 0, frame_sizes_reg); 2842 __ std(pc_reg, _abi0(lr), R1_SP); 2843 __ push_frame(frame_size_reg, R0/*tmp*/); 2844 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP); 2845 __ addi(number_of_frames_reg, number_of_frames_reg, -1); 2846 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize); 2847 __ addi(pcs_reg, pcs_reg, wordSize); 2848 } 2849 2850 // Loop through the UnrollBlock info and create new frames. 2851 static void push_skeleton_frames(MacroAssembler* masm, bool deopt, 2852 /* read */ 2853 Register unroll_block_reg, 2854 /* invalidate */ 2855 Register frame_sizes_reg, 2856 Register number_of_frames_reg, 2857 Register pcs_reg, 2858 Register frame_size_reg, 2859 Register pc_reg) { 2860 Label loop; 2861 2862 // _number_of_frames is of type int (deoptimization.hpp) 2863 __ lwa(number_of_frames_reg, 2864 in_bytes(Deoptimization::UnrollBlock::number_of_frames_offset()), 2865 unroll_block_reg); 2866 __ ld(pcs_reg, 2867 in_bytes(Deoptimization::UnrollBlock::frame_pcs_offset()), 2868 unroll_block_reg); 2869 __ ld(frame_sizes_reg, 2870 in_bytes(Deoptimization::UnrollBlock::frame_sizes_offset()), 2871 unroll_block_reg); 2872 2873 // stack: (caller_of_deoptee, ...). 2874 2875 // At this point we either have an interpreter frame or a compiled 2876 // frame on top of stack. If it is a compiled frame we push a new c2i 2877 // adapter here 2878 2879 // Memorize top-frame stack-pointer. 2880 __ mr(frame_size_reg/*old_sp*/, R1_SP); 2881 2882 // Resize interpreter top frame OR C2I adapter. 2883 2884 // At this moment, the top frame (which is the caller of the deoptee) is 2885 // an interpreter frame or a newly pushed C2I adapter or an entry frame. 2886 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the 2887 // outgoing arguments. 2888 // 2889 // In order to push the interpreter frame for the deoptee, we need to 2890 // resize the top frame such that we are able to place the deoptee's 2891 // locals in the frame. 2892 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI 2893 // into a valid PARENT_IJAVA_FRAME_ABI. 2894 2895 __ lwa(R11_scratch1, 2896 in_bytes(Deoptimization::UnrollBlock::caller_adjustment_offset()), 2897 unroll_block_reg); 2898 __ neg(R11_scratch1, R11_scratch1); 2899 2900 // R11_scratch1 contains size of locals for frame resizing. 2901 // R12_scratch2 contains top frame's lr. 2902 2903 // Resize frame by complete frame size prevents TOC from being 2904 // overwritten by locals. A more stack space saving way would be 2905 // to copy the TOC to its location in the new abi. 2906 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size); 2907 2908 // now, resize the frame 2909 __ resize_frame(R11_scratch1, pc_reg/*tmp*/); 2910 2911 // In the case where we have resized a c2i frame above, the optional 2912 // alignment below the locals has size 32 (why?). 2913 __ std(R12_scratch2, _abi0(lr), R1_SP); 2914 2915 // Initialize initial_caller_sp. 2916 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP); 2917 2918 #ifdef ASSERT 2919 // Make sure that there is at least one entry in the array. 2920 __ cmpdi(CR0, number_of_frames_reg, 0); 2921 __ asm_assert_ne("array_size must be > 0"); 2922 #endif 2923 2924 // Now push the new interpreter frames. 2925 // 2926 __ bind(loop); 2927 // Allocate a new frame, fill in the pc. 2928 push_skeleton_frame(masm, deopt, 2929 unroll_block_reg, 2930 frame_sizes_reg, 2931 number_of_frames_reg, 2932 pcs_reg, 2933 frame_size_reg, 2934 pc_reg); 2935 __ cmpdi(CR0, number_of_frames_reg, 0); 2936 __ bne(CR0, loop); 2937 2938 // Get the return address pointing into the template interpreter. 2939 __ ld(R0, 0, pcs_reg); 2940 // Store it in the top interpreter frame. 2941 __ std(R0, _abi0(lr), R1_SP); 2942 // Initialize frame_manager_lr of interpreter top frame. 2943 } 2944 #endif 2945 2946 void SharedRuntime::generate_deopt_blob() { 2947 // Allocate space for the code 2948 ResourceMark rm; 2949 // Setup code generation tools 2950 const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); 2951 CodeBuffer buffer(name, 2048, 1024); 2952 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2953 Label exec_mode_initialized; 2954 int frame_size_in_words; 2955 OopMap* map = nullptr; 2956 OopMapSet *oop_maps = new OopMapSet(); 2957 2958 // size of ABI112 plus spill slots for R3_RET and F1_RET. 2959 const int frame_size_in_bytes = frame::native_abi_reg_args_spill_size; 2960 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 2961 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. 2962 2963 const Register exec_mode_reg = R21_tmp1; 2964 2965 const address start = __ pc(); 2966 2967 #if defined(COMPILER1) || defined(COMPILER2) 2968 // -------------------------------------------------------------------------- 2969 // Prolog for non exception case! 2970 2971 // We have been called from the deopt handler of the deoptee. 2972 // 2973 // deoptee: 2974 // ... 2975 // call X 2976 // ... 2977 // deopt_handler: call_deopt_stub 2978 // cur. return pc --> ... 2979 // 2980 // So currently SR_LR points behind the call in the deopt handler. 2981 // We adjust it such that it points to the start of the deopt handler. 2982 // The return_pc has been stored in the frame of the deoptee and 2983 // will replace the address of the deopt_handler in the call 2984 // to Deoptimization::fetch_unroll_info below. 2985 // We can't grab a free register here, because all registers may 2986 // contain live values, so let the RegisterSaver do the adjustment 2987 // of the return pc. 2988 const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size; 2989 2990 // Push the "unpack frame" 2991 // Save everything in sight. 2992 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2993 &first_frame_size_in_bytes, 2994 /*generate_oop_map=*/ true, 2995 return_pc_adjustment_no_exception, 2996 RegisterSaver::return_pc_is_lr); 2997 assert(map != nullptr, "OopMap must have been created"); 2998 2999 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); 3000 // Save exec mode for unpack_frames. 3001 __ b(exec_mode_initialized); 3002 3003 // -------------------------------------------------------------------------- 3004 // Prolog for exception case 3005 3006 // An exception is pending. 3007 // We have been called with a return (interpreter) or a jump (exception blob). 3008 // 3009 // - R3_ARG1: exception oop 3010 // - R4_ARG2: exception pc 3011 3012 int exception_offset = __ pc() - start; 3013 3014 BLOCK_COMMENT("Prolog for exception case"); 3015 3016 // Store exception oop and pc in thread (location known to GC). 3017 // This is needed since the call to "fetch_unroll_info()" may safepoint. 3018 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 3019 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 3020 __ std(R4_ARG2, _abi0(lr), R1_SP); 3021 3022 // Vanilla deoptimization with an exception pending in exception_oop. 3023 int exception_in_tls_offset = __ pc() - start; 3024 3025 // Push the "unpack frame". 3026 // Save everything in sight. 3027 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3028 &first_frame_size_in_bytes, 3029 /*generate_oop_map=*/ false, 3030 /*return_pc_adjustment_exception=*/ 0, 3031 RegisterSaver::return_pc_is_pre_saved); 3032 3033 // Deopt during an exception. Save exec mode for unpack_frames. 3034 __ li(exec_mode_reg, Deoptimization::Unpack_exception); 3035 3036 // fall through 3037 3038 int reexecute_offset = 0; 3039 #ifdef COMPILER1 3040 __ b(exec_mode_initialized); 3041 3042 // Reexecute entry, similar to c2 uncommon trap 3043 reexecute_offset = __ pc() - start; 3044 3045 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3046 &first_frame_size_in_bytes, 3047 /*generate_oop_map=*/ false, 3048 /*return_pc_adjustment_reexecute=*/ 0, 3049 RegisterSaver::return_pc_is_pre_saved); 3050 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute); 3051 #endif 3052 3053 // -------------------------------------------------------------------------- 3054 __ BIND(exec_mode_initialized); 3055 3056 const Register unroll_block_reg = R22_tmp2; 3057 3058 // We need to set `last_Java_frame' because `fetch_unroll_info' will 3059 // call `last_Java_frame()'. The value of the pc in the frame is not 3060 // particularly important. It just needs to identify this blob. 3061 __ set_last_Java_frame(R1_SP, noreg); 3062 3063 // With EscapeAnalysis turned on, this call may safepoint! 3064 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg); 3065 address calls_return_pc = __ last_calls_return_pc(); 3066 // Set an oopmap for the call site that describes all our saved registers. 3067 oop_maps->add_gc_map(calls_return_pc - start, map); 3068 3069 __ reset_last_Java_frame(); 3070 // Save the return value. 3071 __ mr(unroll_block_reg, R3_RET); 3072 3073 // Restore only the result registers that have been saved 3074 // by save_volatile_registers(...). 3075 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); 3076 3077 // reload the exec mode from the UnrollBlock (it might have changed) 3078 __ lwz(exec_mode_reg, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg); 3079 // In excp_deopt_mode, restore and clear exception oop which we 3080 // stored in the thread during exception entry above. The exception 3081 // oop will be the return value of this stub. 3082 Label skip_restore_excp; 3083 __ cmpdi(CR0, exec_mode_reg, Deoptimization::Unpack_exception); 3084 __ bne(CR0, skip_restore_excp); 3085 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 3086 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 3087 __ li(R0, 0); 3088 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 3089 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 3090 __ BIND(skip_restore_excp); 3091 3092 __ pop_frame(); 3093 3094 // stack: (deoptee, optional i2c, caller of deoptee, ...). 3095 3096 // pop the deoptee's frame 3097 __ pop_frame(); 3098 3099 // stack: (caller_of_deoptee, ...). 3100 3101 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled. 3102 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info()) 3103 // and the frame is effectively not resized. 3104 Register caller_sp = R23_tmp3; 3105 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg); 3106 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5); 3107 3108 // Loop through the `UnrollBlock' info and create interpreter frames. 3109 push_skeleton_frames(masm, true/*deopt*/, 3110 unroll_block_reg, 3111 R23_tmp3, 3112 R24_tmp4, 3113 R25_tmp5, 3114 R26_tmp6, 3115 R27_tmp7); 3116 3117 // stack: (skeletal interpreter frame, ..., optional skeletal 3118 // interpreter frame, optional c2i, caller of deoptee, ...). 3119 3120 // push an `unpack_frame' taking care of float / int return values. 3121 __ push_frame(frame_size_in_bytes, R0/*tmp*/); 3122 3123 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3124 // skeletal interpreter frame, optional c2i, caller of deoptee, 3125 // ...). 3126 3127 // Spill live volatile registers since we'll do a call. 3128 __ std( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP); 3129 __ stfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP); 3130 3131 // Let the unpacker layout information in the skeletal frames just 3132 // allocated. 3133 __ calculate_address_from_global_toc(R3_RET, calls_return_pc, true, true, true, true); 3134 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET); 3135 // This is a call to a LEAF method, so no oop map is required. 3136 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3137 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/); 3138 __ reset_last_Java_frame(); 3139 3140 // Restore the volatiles saved above. 3141 __ ld( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP); 3142 __ lfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP); 3143 3144 // Pop the unpack frame. 3145 __ pop_frame(); 3146 __ restore_LR(R0); 3147 3148 // stack: (top interpreter frame, ..., optional interpreter frame, 3149 // optional c2i, caller of deoptee, ...). 3150 3151 // Initialize R14_state. 3152 __ restore_interpreter_state(R11_scratch1); 3153 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3154 3155 // Return to the interpreter entry point. 3156 __ blr(); 3157 __ flush(); 3158 #else // COMPILER2 3159 __ unimplemented("deopt blob needed only with compiler"); 3160 int exception_offset = __ pc() - start; 3161 #endif // COMPILER2 3162 3163 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 3164 reexecute_offset, first_frame_size_in_bytes / wordSize); 3165 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3166 } 3167 3168 #ifdef COMPILER2 3169 UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { 3170 // Allocate space for the code. 3171 ResourceMark rm; 3172 // Setup code generation tools. 3173 const char* name = OptoRuntime::stub_name(OptoStubId::uncommon_trap_id); 3174 CodeBuffer buffer(name, 2048, 1024); 3175 if (buffer.blob() == nullptr) { 3176 return nullptr; 3177 } 3178 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 3179 address start = __ pc(); 3180 3181 Register unroll_block_reg = R21_tmp1; 3182 Register klass_index_reg = R22_tmp2; 3183 Register unc_trap_reg = R23_tmp3; 3184 Register r_return_pc = R27_tmp7; 3185 3186 OopMapSet* oop_maps = new OopMapSet(); 3187 int frame_size_in_bytes = frame::native_abi_reg_args_size; 3188 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 3189 3190 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 3191 3192 // Push a dummy `unpack_frame' and call 3193 // `Deoptimization::uncommon_trap' to pack the compiled frame into a 3194 // vframe array and return the `UnrollBlock' information. 3195 3196 // Save LR to compiled frame. 3197 __ save_LR(R11_scratch1); 3198 3199 // Push an "uncommon_trap" frame. 3200 __ push_frame_reg_args(0, R11_scratch1); 3201 3202 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). 3203 3204 // Set the `unpack_frame' as last_Java_frame. 3205 // `Deoptimization::uncommon_trap' expects it and considers its 3206 // sender frame as the deoptee frame. 3207 // Remember the offset of the instruction whose address will be 3208 // moved to R11_scratch1. 3209 address gc_map_pc = __ pc(); 3210 __ calculate_address_from_global_toc(r_return_pc, gc_map_pc, true, true, true, true); 3211 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc); 3212 3213 __ mr(klass_index_reg, R3); 3214 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap); 3215 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), 3216 R16_thread, klass_index_reg, R5_ARG3); 3217 3218 // Set an oopmap for the call site. 3219 oop_maps->add_gc_map(gc_map_pc - start, map); 3220 3221 __ reset_last_Java_frame(); 3222 3223 // Pop the `unpack frame'. 3224 __ pop_frame(); 3225 3226 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 3227 3228 // Save the return value. 3229 __ mr(unroll_block_reg, R3_RET); 3230 3231 // Pop the uncommon_trap frame. 3232 __ pop_frame(); 3233 3234 // stack: (caller_of_deoptee, ...). 3235 3236 #ifdef ASSERT 3237 __ lwz(R22_tmp2, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg); 3238 __ cmpdi(CR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap); 3239 __ asm_assert_eq("OptoRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap"); 3240 #endif 3241 3242 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled. 3243 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info()) 3244 // and the frame is effectively not resized. 3245 Register caller_sp = R23_tmp3; 3246 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg); 3247 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5); 3248 3249 // Allocate new interpreter frame(s) and possibly a c2i adapter 3250 // frame. 3251 push_skeleton_frames(masm, false/*deopt*/, 3252 unroll_block_reg, 3253 R22_tmp2, 3254 R23_tmp3, 3255 R24_tmp4, 3256 R25_tmp5, 3257 R26_tmp6); 3258 3259 // stack: (skeletal interpreter frame, ..., optional skeletal 3260 // interpreter frame, optional c2i, caller of deoptee, ...). 3261 3262 // Push a dummy `unpack_frame' taking care of float return values. 3263 // Call `Deoptimization::unpack_frames' to layout information in the 3264 // interpreter frames just created. 3265 3266 // Push a simple "unpack frame" here. 3267 __ push_frame_reg_args(0, R11_scratch1); 3268 3269 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3270 // skeletal interpreter frame, optional c2i, caller of deoptee, 3271 // ...). 3272 3273 // Set the "unpack_frame" as last_Java_frame. 3274 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc); 3275 3276 // Indicate it is the uncommon trap case. 3277 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap); 3278 // Let the unpacker layout information in the skeletal frames just 3279 // allocated. 3280 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3281 R16_thread, unc_trap_reg); 3282 3283 __ reset_last_Java_frame(); 3284 // Pop the `unpack frame'. 3285 __ pop_frame(); 3286 // Restore LR from top interpreter frame. 3287 __ restore_LR(R11_scratch1); 3288 3289 // stack: (top interpreter frame, ..., optional interpreter frame, 3290 // optional c2i, caller of deoptee, ...). 3291 3292 __ restore_interpreter_state(R11_scratch1); 3293 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3294 3295 // Return to the interpreter entry point. 3296 __ blr(); 3297 3298 masm->flush(); 3299 3300 return UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize); 3301 } 3302 #endif // COMPILER2 3303 3304 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. 3305 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { 3306 assert(StubRoutines::forward_exception_entry() != nullptr, 3307 "must be generated before"); 3308 assert(is_polling_page_id(id), "expected a polling page stub id"); 3309 3310 ResourceMark rm; 3311 OopMapSet *oop_maps = new OopMapSet(); 3312 OopMap* map; 3313 3314 // Allocate space for the code. Setup code generation tools. 3315 const char* name = SharedRuntime::stub_name(id); 3316 CodeBuffer buffer(name, 2048, 1024); 3317 MacroAssembler* masm = new MacroAssembler(&buffer); 3318 3319 address start = __ pc(); 3320 int frame_size_in_bytes = 0; 3321 3322 RegisterSaver::ReturnPCLocation return_pc_location; 3323 bool cause_return = (id == SharedStubId::polling_page_return_handler_id); 3324 if (cause_return) { 3325 // Nothing to do here. The frame has already been popped in MachEpilogNode. 3326 // Register LR already contains the return pc. 3327 return_pc_location = RegisterSaver::return_pc_is_pre_saved; 3328 } else { 3329 // Use thread()->saved_exception_pc() as return pc. 3330 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; 3331 } 3332 3333 bool save_vectors = (id == SharedStubId::polling_page_vectors_safepoint_handler_id); 3334 3335 // Save registers, fpu state, and flags. Set R31 = return pc. 3336 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3337 &frame_size_in_bytes, 3338 /*generate_oop_map=*/ true, 3339 /*return_pc_adjustment=*/0, 3340 return_pc_location, save_vectors); 3341 3342 // The following is basically a call_VM. However, we need the precise 3343 // address of the call in order to generate an oopmap. Hence, we do all the 3344 // work ourselves. 3345 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg); 3346 3347 // The return address must always be correct so that the frame constructor 3348 // never sees an invalid pc. 3349 3350 // Do the call 3351 __ call_VM_leaf(call_ptr, R16_thread); 3352 address calls_return_pc = __ last_calls_return_pc(); 3353 3354 // Set an oopmap for the call site. This oopmap will map all 3355 // oop-registers and debug-info registers as callee-saved. This 3356 // will allow deoptimization at this safepoint to find all possible 3357 // debug-info recordings, as well as let GC find all oops. 3358 oop_maps->add_gc_map(calls_return_pc - start, map); 3359 3360 Label noException; 3361 3362 // Clear the last Java frame. 3363 __ reset_last_Java_frame(); 3364 3365 BLOCK_COMMENT(" Check pending exception."); 3366 const Register pending_exception = R0; 3367 __ ld(pending_exception, thread_(pending_exception)); 3368 __ cmpdi(CR0, pending_exception, 0); 3369 __ beq(CR0, noException); 3370 3371 // Exception pending 3372 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3373 frame_size_in_bytes, 3374 /*restore_ctr=*/true, save_vectors); 3375 3376 BLOCK_COMMENT(" Jump to forward_exception_entry."); 3377 // Jump to forward_exception_entry, with the issuing PC in LR 3378 // so it looks like the original nmethod called forward_exception_entry. 3379 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3380 3381 // No exception case. 3382 __ BIND(noException); 3383 3384 if (!cause_return) { 3385 Label no_adjust; 3386 // If our stashed return pc was modified by the runtime we avoid touching it 3387 __ ld(R0, frame_size_in_bytes + _abi0(lr), R1_SP); 3388 __ cmpd(CR0, R0, R31); 3389 __ bne(CR0, no_adjust); 3390 3391 // Adjust return pc forward to step over the safepoint poll instruction 3392 __ addi(R31, R31, 4); 3393 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 3394 3395 __ bind(no_adjust); 3396 } 3397 3398 // Normal exit, restore registers and exit. 3399 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3400 frame_size_in_bytes, 3401 /*restore_ctr=*/true, save_vectors); 3402 3403 __ blr(); 3404 3405 // Make sure all code is generated 3406 masm->flush(); 3407 3408 // Fill-out other meta info 3409 // CodeBlob frame size is in words. 3410 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize); 3411 } 3412 3413 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss) 3414 // 3415 // Generate a stub that calls into the vm to find out the proper destination 3416 // of a java call. All the argument registers are live at this point 3417 // but since this is generic code we don't know what they are and the caller 3418 // must do any gc of the args. 3419 // 3420 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { 3421 assert(is_resolve_id(id), "expected a resolve stub id"); 3422 3423 // allocate space for the code 3424 ResourceMark rm; 3425 3426 const char* name = SharedRuntime::stub_name(id); 3427 CodeBuffer buffer(name, 1000, 512); 3428 MacroAssembler* masm = new MacroAssembler(&buffer); 3429 3430 int frame_size_in_bytes; 3431 3432 OopMapSet *oop_maps = new OopMapSet(); 3433 OopMap* map = nullptr; 3434 3435 address start = __ pc(); 3436 3437 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3438 &frame_size_in_bytes, 3439 /*generate_oop_map*/ true, 3440 /*return_pc_adjustment*/ 0, 3441 RegisterSaver::return_pc_is_lr); 3442 3443 // Use noreg as last_Java_pc, the return pc will be reconstructed 3444 // from the physical frame. 3445 __ set_last_Java_frame(/*sp*/R1_SP, noreg); 3446 3447 int frame_complete = __ offset(); 3448 3449 // Pass R19_method as 2nd (optional) argument, used by 3450 // counter_overflow_stub. 3451 __ call_VM_leaf(destination, R16_thread, R19_method); 3452 address calls_return_pc = __ last_calls_return_pc(); 3453 // Set an oopmap for the call site. 3454 // We need this not only for callee-saved registers, but also for volatile 3455 // registers that the compiler might be keeping live across a safepoint. 3456 // Create the oopmap for the call's return pc. 3457 oop_maps->add_gc_map(calls_return_pc - start, map); 3458 3459 // R3_RET contains the address we are going to jump to assuming no exception got installed. 3460 3461 // clear last_Java_sp 3462 __ reset_last_Java_frame(); 3463 3464 // Check for pending exceptions. 3465 BLOCK_COMMENT("Check for pending exceptions."); 3466 Label pending; 3467 __ ld(R11_scratch1, thread_(pending_exception)); 3468 __ cmpdi(CR0, R11_scratch1, 0); 3469 __ bne(CR0, pending); 3470 3471 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame. 3472 3473 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false); 3474 3475 // Get the returned method. 3476 __ get_vm_result_metadata(R19_method); 3477 3478 __ bctr(); 3479 3480 3481 // Pending exception after the safepoint. 3482 __ BIND(pending); 3483 3484 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true); 3485 3486 // exception pending => remove activation and forward to exception handler 3487 3488 __ li(R11_scratch1, 0); 3489 __ ld(R3_ARG1, thread_(pending_exception)); 3490 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_oop_offset()), R16_thread); 3491 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3492 3493 // ------------- 3494 // Make sure all code is generated. 3495 masm->flush(); 3496 3497 // return the blob 3498 // frame_size_words or bytes?? 3499 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, 3500 oop_maps, true); 3501 } 3502 3503 // Continuation point for throwing of implicit exceptions that are 3504 // not handled in the current activation. Fabricates an exception 3505 // oop and initiates normal exception dispatching in this 3506 // frame. Only callee-saved registers are preserved (through the 3507 // normal register window / RegisterMap handling). If the compiler 3508 // needs all registers to be preserved between the fault point and 3509 // the exception handler then it must assume responsibility for that 3510 // in AbstractCompiler::continuation_for_implicit_null_exception or 3511 // continuation_for_implicit_division_by_zero_exception. All other 3512 // implicit exceptions (e.g., NullPointerException or 3513 // AbstractMethodError on entry) are either at call sites or 3514 // otherwise assume that stack unwinding will be initiated, so 3515 // caller saved registers were assumed volatile in the compiler. 3516 // 3517 // Note that we generate only this stub into a RuntimeStub, because 3518 // it needs to be properly traversed and ignored during GC, so we 3519 // change the meaning of the "__" macro within this method. 3520 // 3521 // Note: the routine set_pc_not_at_call_for_caller in 3522 // SharedRuntime.cpp requires that this code be generated into a 3523 // RuntimeStub. 3524 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { 3525 assert(is_throw_id(id), "expected a throw stub id"); 3526 3527 const char* name = SharedRuntime::stub_name(id); 3528 3529 ResourceMark rm; 3530 const char* timer_msg = "SharedRuntime generate_throw_exception"; 3531 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); 3532 3533 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0); 3534 MacroAssembler* masm = new MacroAssembler(&code); 3535 3536 OopMapSet* oop_maps = new OopMapSet(); 3537 int frame_size_in_bytes = frame::native_abi_reg_args_size; 3538 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 3539 3540 address start = __ pc(); 3541 3542 __ save_LR(R11_scratch1); 3543 3544 // Push a frame. 3545 __ push_frame_reg_args(0, R11_scratch1); 3546 3547 address frame_complete_pc = __ pc(); 3548 3549 // Note that we always have a runtime stub frame on the top of 3550 // stack by this point. Remember the offset of the instruction 3551 // whose address will be moved to R11_scratch1. 3552 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 3553 3554 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 3555 3556 __ mr(R3_ARG1, R16_thread); 3557 __ call_c(runtime_entry); 3558 3559 // Set an oopmap for the call site. 3560 oop_maps->add_gc_map((int)(gc_map_pc - start), map); 3561 3562 __ reset_last_Java_frame(); 3563 3564 #ifdef ASSERT 3565 // Make sure that this code is only executed if there is a pending 3566 // exception. 3567 { 3568 Label L; 3569 __ ld(R0, 3570 in_bytes(Thread::pending_exception_offset()), 3571 R16_thread); 3572 __ cmpdi(CR0, R0, 0); 3573 __ bne(CR0, L); 3574 __ stop("SharedRuntime::throw_exception: no pending exception"); 3575 __ bind(L); 3576 } 3577 #endif 3578 3579 // Pop frame. 3580 __ pop_frame(); 3581 3582 __ restore_LR(R11_scratch1); 3583 3584 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry()); 3585 __ mtctr(R11_scratch1); 3586 __ bctr(); 3587 3588 // Create runtime stub with OopMap. 3589 RuntimeStub* stub = 3590 RuntimeStub::new_runtime_stub(name, &code, 3591 /*frame_complete=*/ (int)(frame_complete_pc - start), 3592 frame_size_in_bytes/wordSize, 3593 oop_maps, 3594 false); 3595 return stub; 3596 } 3597 3598 //------------------------------Montgomery multiplication------------------------ 3599 // 3600 3601 // Subtract 0:b from carry:a. Return carry. 3602 static unsigned long 3603 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 3604 long i = 0; 3605 unsigned long tmp, tmp2; 3606 __asm__ __volatile__ ( 3607 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA 3608 "mtctr %[len] \n" 3609 "0: \n" 3610 "ldx %[tmp], %[i], %[a] \n" 3611 "ldx %[tmp2], %[i], %[b] \n" 3612 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended 3613 "stdx %[tmp], %[i], %[a] \n" 3614 "addi %[i], %[i], 8 \n" 3615 "bdnz 0b \n" 3616 "addme %[tmp], %[carry] \n" // carry + CA - 1 3617 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2) 3618 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len) 3619 : "ctr", "xer", "memory" 3620 ); 3621 return tmp; 3622 } 3623 3624 // Multiply (unsigned) Long A by Long B, accumulating the double- 3625 // length result into the accumulator formed of T0, T1, and T2. 3626 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3627 unsigned long hi, lo; 3628 __asm__ __volatile__ ( 3629 "mulld %[lo], %[A], %[B] \n" 3630 "mulhdu %[hi], %[A], %[B] \n" 3631 "addc %[T0], %[T0], %[lo] \n" 3632 "adde %[T1], %[T1], %[hi] \n" 3633 "addze %[T2], %[T2] \n" 3634 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3635 : [A]"r"(A), [B]"r"(B) 3636 : "xer" 3637 ); 3638 } 3639 3640 // As above, but add twice the double-length result into the 3641 // accumulator. 3642 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3643 unsigned long hi, lo; 3644 __asm__ __volatile__ ( 3645 "mulld %[lo], %[A], %[B] \n" 3646 "mulhdu %[hi], %[A], %[B] \n" 3647 "addc %[T0], %[T0], %[lo] \n" 3648 "adde %[T1], %[T1], %[hi] \n" 3649 "addze %[T2], %[T2] \n" 3650 "addc %[T0], %[T0], %[lo] \n" 3651 "adde %[T1], %[T1], %[hi] \n" 3652 "addze %[T2], %[T2] \n" 3653 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3654 : [A]"r"(A), [B]"r"(B) 3655 : "xer" 3656 ); 3657 } 3658 3659 // Fast Montgomery multiplication. The derivation of the algorithm is 3660 // in "A Cryptographic Library for the Motorola DSP56000, 3661 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237". 3662 static void 3663 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 3664 unsigned long m[], unsigned long inv, int len) { 3665 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3666 int i; 3667 3668 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3669 3670 for (i = 0; i < len; i++) { 3671 int j; 3672 for (j = 0; j < i; j++) { 3673 MACC(a[j], b[i-j], t0, t1, t2); 3674 MACC(m[j], n[i-j], t0, t1, t2); 3675 } 3676 MACC(a[i], b[0], t0, t1, t2); 3677 m[i] = t0 * inv; 3678 MACC(m[i], n[0], t0, t1, t2); 3679 3680 assert(t0 == 0, "broken Montgomery multiply"); 3681 3682 t0 = t1; t1 = t2; t2 = 0; 3683 } 3684 3685 for (i = len; i < 2*len; i++) { 3686 int j; 3687 for (j = i-len+1; j < len; j++) { 3688 MACC(a[j], b[i-j], t0, t1, t2); 3689 MACC(m[j], n[i-j], t0, t1, t2); 3690 } 3691 m[i-len] = t0; 3692 t0 = t1; t1 = t2; t2 = 0; 3693 } 3694 3695 while (t0) { 3696 t0 = sub(m, n, t0, len); 3697 } 3698 } 3699 3700 // Fast Montgomery squaring. This uses asymptotically 25% fewer 3701 // multiplies so it should be up to 25% faster than Montgomery 3702 // multiplication. However, its loop control is more complex and it 3703 // may actually run slower on some machines. 3704 static void 3705 montgomery_square(unsigned long a[], unsigned long n[], 3706 unsigned long m[], unsigned long inv, int len) { 3707 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3708 int i; 3709 3710 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3711 3712 for (i = 0; i < len; i++) { 3713 int j; 3714 int end = (i+1)/2; 3715 for (j = 0; j < end; j++) { 3716 MACC2(a[j], a[i-j], t0, t1, t2); 3717 MACC(m[j], n[i-j], t0, t1, t2); 3718 } 3719 if ((i & 1) == 0) { 3720 MACC(a[j], a[j], t0, t1, t2); 3721 } 3722 for (; j < i; j++) { 3723 MACC(m[j], n[i-j], t0, t1, t2); 3724 } 3725 m[i] = t0 * inv; 3726 MACC(m[i], n[0], t0, t1, t2); 3727 3728 assert(t0 == 0, "broken Montgomery square"); 3729 3730 t0 = t1; t1 = t2; t2 = 0; 3731 } 3732 3733 for (i = len; i < 2*len; i++) { 3734 int start = i-len+1; 3735 int end = start + (len - start)/2; 3736 int j; 3737 for (j = start; j < end; j++) { 3738 MACC2(a[j], a[i-j], t0, t1, t2); 3739 MACC(m[j], n[i-j], t0, t1, t2); 3740 } 3741 if ((i & 1) == 0) { 3742 MACC(a[j], a[j], t0, t1, t2); 3743 } 3744 for (; j < len; j++) { 3745 MACC(m[j], n[i-j], t0, t1, t2); 3746 } 3747 m[i-len] = t0; 3748 t0 = t1; t1 = t2; t2 = 0; 3749 } 3750 3751 while (t0) { 3752 t0 = sub(m, n, t0, len); 3753 } 3754 } 3755 3756 // The threshold at which squaring is advantageous was determined 3757 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 3758 // Doesn't seem to be relevant for Power8 so we use the same value. 3759 #define MONTGOMERY_SQUARING_THRESHOLD 64 3760 3761 // Copy len longwords from s to d, word-swapping as we go. The 3762 // destination array is reversed. 3763 static void reverse_words(unsigned long *s, unsigned long *d, int len) { 3764 d += len; 3765 while(len-- > 0) { 3766 d--; 3767 unsigned long s_val = *s; 3768 // Swap words in a longword on little endian machines. 3769 #ifdef VM_LITTLE_ENDIAN 3770 s_val = (s_val << 32) | (s_val >> 32); 3771 #endif 3772 *d = s_val; 3773 s++; 3774 } 3775 } 3776 3777 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 3778 jint len, jlong inv, 3779 jint *m_ints) { 3780 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3781 assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 3782 int longwords = len/2; 3783 3784 // Make very sure we don't use so much space that the stack might 3785 // overflow. 512 jints corresponds to an 16384-bit integer and 3786 // will use here a total of 8k bytes of stack space. 3787 int divisor = sizeof(unsigned long) * 4; 3788 guarantee(longwords <= 8192 / divisor, "must be"); 3789 int total_allocation = longwords * sizeof (unsigned long) * 4; 3790 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3791 3792 // Local scratch arrays 3793 unsigned long 3794 *a = scratch + 0 * longwords, 3795 *b = scratch + 1 * longwords, 3796 *n = scratch + 2 * longwords, 3797 *m = scratch + 3 * longwords; 3798 3799 reverse_words((unsigned long *)a_ints, a, longwords); 3800 reverse_words((unsigned long *)b_ints, b, longwords); 3801 reverse_words((unsigned long *)n_ints, n, longwords); 3802 3803 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 3804 3805 reverse_words(m, (unsigned long *)m_ints, longwords); 3806 } 3807 3808 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 3809 jint len, jlong inv, 3810 jint *m_ints) { 3811 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3812 assert(len % 2 == 0, "array length in montgomery_square must be even"); 3813 int longwords = len/2; 3814 3815 // Make very sure we don't use so much space that the stack might 3816 // overflow. 512 jints corresponds to an 16384-bit integer and 3817 // will use here a total of 6k bytes of stack space. 3818 int divisor = sizeof(unsigned long) * 3; 3819 guarantee(longwords <= (8192 / divisor), "must be"); 3820 int total_allocation = longwords * sizeof (unsigned long) * 3; 3821 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3822 3823 // Local scratch arrays 3824 unsigned long 3825 *a = scratch + 0 * longwords, 3826 *n = scratch + 1 * longwords, 3827 *m = scratch + 2 * longwords; 3828 3829 reverse_words((unsigned long *)a_ints, a, longwords); 3830 reverse_words((unsigned long *)n_ints, n, longwords); 3831 3832 if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 3833 ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 3834 } else { 3835 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 3836 } 3837 3838 reverse_words(m, (unsigned long *)m_ints, longwords); 3839 } 3840 3841 #if INCLUDE_JFR 3842 3843 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. 3844 // It returns a jobject handle to the event writer. 3845 // The handle is dereferenced and the return value is the event writer oop. 3846 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { 3847 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id); 3848 CodeBuffer code(name, 512, 64); 3849 MacroAssembler* masm = new MacroAssembler(&code); 3850 3851 Register tmp1 = R10_ARG8; 3852 Register tmp2 = R9_ARG7; 3853 3854 int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; 3855 address start = __ pc(); 3856 __ mflr(tmp1); 3857 __ std(tmp1, _abi0(lr), R1_SP); // save return pc 3858 __ push_frame_reg_args(0, tmp1); 3859 int frame_complete = __ pc() - start; 3860 __ set_last_Java_frame(R1_SP, noreg); 3861 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), R16_thread); 3862 address calls_return_pc = __ last_calls_return_pc(); 3863 __ reset_last_Java_frame(); 3864 // The handle is dereferenced through a load barrier. 3865 __ resolve_global_jobject(R3_RET, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE); 3866 __ pop_frame(); 3867 __ ld(tmp1, _abi0(lr), R1_SP); 3868 __ mtlr(tmp1); 3869 __ blr(); 3870 3871 OopMapSet* oop_maps = new OopMapSet(); 3872 OopMap* map = new OopMap(framesize, 0); 3873 oop_maps->add_gc_map(calls_return_pc - start, map); 3874 3875 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) 3876 RuntimeStub::new_runtime_stub(name, &code, frame_complete, 3877 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3878 oop_maps, false); 3879 return stub; 3880 } 3881 3882 // For c2: call to return a leased buffer. 3883 RuntimeStub* SharedRuntime::generate_jfr_return_lease() { 3884 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id); 3885 CodeBuffer code(name, 512, 64); 3886 MacroAssembler* masm = new MacroAssembler(&code); 3887 3888 Register tmp1 = R10_ARG8; 3889 Register tmp2 = R9_ARG7; 3890 3891 int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; 3892 address start = __ pc(); 3893 __ mflr(tmp1); 3894 __ std(tmp1, _abi0(lr), R1_SP); // save return pc 3895 __ push_frame_reg_args(0, tmp1); 3896 int frame_complete = __ pc() - start; 3897 __ set_last_Java_frame(R1_SP, noreg); 3898 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), R16_thread); 3899 address calls_return_pc = __ last_calls_return_pc(); 3900 __ reset_last_Java_frame(); 3901 __ pop_frame(); 3902 __ ld(tmp1, _abi0(lr), R1_SP); 3903 __ mtlr(tmp1); 3904 __ blr(); 3905 3906 OopMapSet* oop_maps = new OopMapSet(); 3907 OopMap* map = new OopMap(framesize, 0); 3908 oop_maps->add_gc_map(calls_return_pc - start, map); 3909 3910 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) 3911 RuntimeStub::new_runtime_stub(name, &code, frame_complete, 3912 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3913 oop_maps, false); 3914 return stub; 3915 } 3916 3917 #endif // INCLUDE_JFR