1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2024 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "frame_ppc.hpp" 32 #include "compiler/oopMap.hpp" 33 #include "gc/shared/gcLocker.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "interpreter/interp_masm.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/klass.inline.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/continuation.hpp" 40 #include "runtime/continuationEntry.inline.hpp" 41 #include "runtime/jniHandles.hpp" 42 #include "runtime/os.inline.hpp" 43 #include "runtime/safepointMechanism.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/signature.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/timerTrace.hpp" 48 #include "runtime/vframeArray.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/macros.hpp" 51 #include "vmreg_ppc.inline.hpp" 52 #ifdef COMPILER1 53 #include "c1/c1_Runtime1.hpp" 54 #endif 55 #ifdef COMPILER2 56 #include "opto/ad.hpp" 57 #include "opto/runtime.hpp" 58 #endif 59 60 #include <alloca.h> 61 62 #define __ masm-> 63 64 #ifdef PRODUCT 65 #define BLOCK_COMMENT(str) // nothing 66 #else 67 #define BLOCK_COMMENT(str) __ block_comment(str) 68 #endif 69 70 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 71 72 73 class RegisterSaver { 74 // Used for saving volatile registers. 75 public: 76 77 // Support different return pc locations. 78 enum ReturnPCLocation { 79 return_pc_is_lr, 80 return_pc_is_pre_saved, 81 return_pc_is_thread_saved_exception_pc 82 }; 83 84 static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 85 int* out_frame_size_in_bytes, 86 bool generate_oop_map, 87 int return_pc_adjustment, 88 ReturnPCLocation return_pc_location, 89 bool save_vectors = false); 90 static void restore_live_registers_and_pop_frame(MacroAssembler* masm, 91 int frame_size_in_bytes, 92 bool restore_ctr, 93 bool save_vectors = false); 94 95 static void push_frame_and_save_argument_registers(MacroAssembler* masm, 96 Register r_temp, 97 int frame_size, 98 int total_args, 99 const VMRegPair *regs, const VMRegPair *regs2 = nullptr); 100 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, 101 int frame_size, 102 int total_args, 103 const VMRegPair *regs, const VMRegPair *regs2 = nullptr); 104 105 // During deoptimization only the result registers need to be restored 106 // all the other values have already been extracted. 107 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); 108 109 // Constants and data structures: 110 111 typedef enum { 112 int_reg, 113 float_reg, 114 special_reg, 115 vs_reg 116 } RegisterType; 117 118 typedef enum { 119 reg_size = 8, 120 half_reg_size = reg_size / 2, 121 vs_reg_size = 16 122 } RegisterConstants; 123 124 typedef struct { 125 RegisterType reg_type; 126 int reg_num; 127 VMReg vmreg; 128 } LiveRegType; 129 }; 130 131 132 #define RegisterSaver_LiveIntReg(regname) \ 133 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() } 134 135 #define RegisterSaver_LiveFloatReg(regname) \ 136 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() } 137 138 #define RegisterSaver_LiveSpecialReg(regname) \ 139 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } 140 141 #define RegisterSaver_LiveVSReg(regname) \ 142 { RegisterSaver::vs_reg, regname->encoding(), regname->as_VMReg() } 143 144 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { 145 // Live registers which get spilled to the stack. Register 146 // positions in this array correspond directly to the stack layout. 147 148 // 149 // live special registers: 150 // 151 RegisterSaver_LiveSpecialReg(SR_CTR), 152 // 153 // live float registers: 154 // 155 RegisterSaver_LiveFloatReg( F0 ), 156 RegisterSaver_LiveFloatReg( F1 ), 157 RegisterSaver_LiveFloatReg( F2 ), 158 RegisterSaver_LiveFloatReg( F3 ), 159 RegisterSaver_LiveFloatReg( F4 ), 160 RegisterSaver_LiveFloatReg( F5 ), 161 RegisterSaver_LiveFloatReg( F6 ), 162 RegisterSaver_LiveFloatReg( F7 ), 163 RegisterSaver_LiveFloatReg( F8 ), 164 RegisterSaver_LiveFloatReg( F9 ), 165 RegisterSaver_LiveFloatReg( F10 ), 166 RegisterSaver_LiveFloatReg( F11 ), 167 RegisterSaver_LiveFloatReg( F12 ), 168 RegisterSaver_LiveFloatReg( F13 ), 169 RegisterSaver_LiveFloatReg( F14 ), 170 RegisterSaver_LiveFloatReg( F15 ), 171 RegisterSaver_LiveFloatReg( F16 ), 172 RegisterSaver_LiveFloatReg( F17 ), 173 RegisterSaver_LiveFloatReg( F18 ), 174 RegisterSaver_LiveFloatReg( F19 ), 175 RegisterSaver_LiveFloatReg( F20 ), 176 RegisterSaver_LiveFloatReg( F21 ), 177 RegisterSaver_LiveFloatReg( F22 ), 178 RegisterSaver_LiveFloatReg( F23 ), 179 RegisterSaver_LiveFloatReg( F24 ), 180 RegisterSaver_LiveFloatReg( F25 ), 181 RegisterSaver_LiveFloatReg( F26 ), 182 RegisterSaver_LiveFloatReg( F27 ), 183 RegisterSaver_LiveFloatReg( F28 ), 184 RegisterSaver_LiveFloatReg( F29 ), 185 RegisterSaver_LiveFloatReg( F30 ), 186 RegisterSaver_LiveFloatReg( F31 ), 187 // 188 // live integer registers: 189 // 190 RegisterSaver_LiveIntReg( R0 ), 191 //RegisterSaver_LiveIntReg( R1 ), // stack pointer 192 RegisterSaver_LiveIntReg( R2 ), 193 RegisterSaver_LiveIntReg( R3 ), 194 RegisterSaver_LiveIntReg( R4 ), 195 RegisterSaver_LiveIntReg( R5 ), 196 RegisterSaver_LiveIntReg( R6 ), 197 RegisterSaver_LiveIntReg( R7 ), 198 RegisterSaver_LiveIntReg( R8 ), 199 RegisterSaver_LiveIntReg( R9 ), 200 RegisterSaver_LiveIntReg( R10 ), 201 RegisterSaver_LiveIntReg( R11 ), 202 RegisterSaver_LiveIntReg( R12 ), 203 //RegisterSaver_LiveIntReg( R13 ), // system thread id 204 RegisterSaver_LiveIntReg( R14 ), 205 RegisterSaver_LiveIntReg( R15 ), 206 RegisterSaver_LiveIntReg( R16 ), 207 RegisterSaver_LiveIntReg( R17 ), 208 RegisterSaver_LiveIntReg( R18 ), 209 RegisterSaver_LiveIntReg( R19 ), 210 RegisterSaver_LiveIntReg( R20 ), 211 RegisterSaver_LiveIntReg( R21 ), 212 RegisterSaver_LiveIntReg( R22 ), 213 RegisterSaver_LiveIntReg( R23 ), 214 RegisterSaver_LiveIntReg( R24 ), 215 RegisterSaver_LiveIntReg( R25 ), 216 RegisterSaver_LiveIntReg( R26 ), 217 RegisterSaver_LiveIntReg( R27 ), 218 RegisterSaver_LiveIntReg( R28 ), 219 RegisterSaver_LiveIntReg( R29 ), 220 RegisterSaver_LiveIntReg( R30 ), 221 RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below) 222 }; 223 224 static const RegisterSaver::LiveRegType RegisterSaver_LiveVSRegs[] = { 225 // 226 // live vector scalar registers (optional, only these ones are used by C2): 227 // 228 RegisterSaver_LiveVSReg( VSR32 ), 229 RegisterSaver_LiveVSReg( VSR33 ), 230 RegisterSaver_LiveVSReg( VSR34 ), 231 RegisterSaver_LiveVSReg( VSR35 ), 232 RegisterSaver_LiveVSReg( VSR36 ), 233 RegisterSaver_LiveVSReg( VSR37 ), 234 RegisterSaver_LiveVSReg( VSR38 ), 235 RegisterSaver_LiveVSReg( VSR39 ), 236 RegisterSaver_LiveVSReg( VSR40 ), 237 RegisterSaver_LiveVSReg( VSR41 ), 238 RegisterSaver_LiveVSReg( VSR42 ), 239 RegisterSaver_LiveVSReg( VSR43 ), 240 RegisterSaver_LiveVSReg( VSR44 ), 241 RegisterSaver_LiveVSReg( VSR45 ), 242 RegisterSaver_LiveVSReg( VSR46 ), 243 RegisterSaver_LiveVSReg( VSR47 ), 244 RegisterSaver_LiveVSReg( VSR48 ), 245 RegisterSaver_LiveVSReg( VSR49 ), 246 RegisterSaver_LiveVSReg( VSR50 ), 247 RegisterSaver_LiveVSReg( VSR51 ) 248 }; 249 250 251 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm, 252 int* out_frame_size_in_bytes, 253 bool generate_oop_map, 254 int return_pc_adjustment, 255 ReturnPCLocation return_pc_location, 256 bool save_vectors) { 257 // Push an abi_reg_args-frame and store all registers which may be live. 258 // If requested, create an OopMap: Record volatile registers as 259 // callee-save values in an OopMap so their save locations will be 260 // propagated to the RegisterMap of the caller frame during 261 // StackFrameStream construction (needed for deoptimization; see 262 // compiledVFrame::create_stack_value). 263 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment. 264 // Updated return pc is returned in R31 (if not return_pc_is_pre_saved). 265 266 // calculate frame size 267 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 268 sizeof(RegisterSaver::LiveRegType); 269 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / 270 sizeof(RegisterSaver::LiveRegType)) 271 : 0; 272 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; 273 const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) 274 + frame::native_abi_reg_args_size; 275 276 *out_frame_size_in_bytes = frame_size_in_bytes; 277 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 278 const int register_save_offset = frame_size_in_bytes - register_save_size; 279 280 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. 281 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : nullptr; 282 283 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); 284 285 // push a new frame 286 __ push_frame(frame_size_in_bytes, noreg); 287 288 // Save some registers in the last (non-vector) slots of the new frame so we 289 // can use them as scratch regs or to determine the return pc. 290 __ std(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); 291 __ std(R30, frame_size_in_bytes - 2*reg_size - vsregstosave_num * vs_reg_size, R1_SP); 292 293 // save the flags 294 // Do the save_LR by hand and adjust the return pc if requested. 295 switch (return_pc_location) { 296 case return_pc_is_lr: __ mflr(R31); break; 297 case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break; 298 case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break; 299 default: ShouldNotReachHere(); 300 } 301 if (return_pc_location != return_pc_is_pre_saved) { 302 if (return_pc_adjustment != 0) { 303 __ addi(R31, R31, return_pc_adjustment); 304 } 305 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 306 } 307 308 // save all registers (ints and floats) 309 int offset = register_save_offset; 310 311 for (int i = 0; i < regstosave_num; i++) { 312 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 313 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 314 315 switch (reg_type) { 316 case RegisterSaver::int_reg: { 317 if (reg_num < 30) { // We spilled R30-31 right at the beginning. 318 __ std(as_Register(reg_num), offset, R1_SP); 319 } 320 break; 321 } 322 case RegisterSaver::float_reg: { 323 __ stfd(as_FloatRegister(reg_num), offset, R1_SP); 324 break; 325 } 326 case RegisterSaver::special_reg: { 327 if (reg_num == SR_CTR.encoding()) { 328 __ mfctr(R30); 329 __ std(R30, offset, R1_SP); 330 } else { 331 Unimplemented(); 332 } 333 break; 334 } 335 default: 336 ShouldNotReachHere(); 337 } 338 339 if (generate_oop_map) { 340 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 341 RegisterSaver_LiveRegs[i].vmreg); 342 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), 343 RegisterSaver_LiveRegs[i].vmreg->next()); 344 } 345 offset += reg_size; 346 } 347 348 for (int i = 0; i < vsregstosave_num; i++) { 349 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; 350 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type; 351 352 __ li(R30, offset); 353 __ stxvd2x(as_VectorSRegister(reg_num), R30, R1_SP); 354 355 if (generate_oop_map) { 356 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), 357 RegisterSaver_LiveVSRegs[i].vmreg); 358 } 359 offset += vs_reg_size; 360 } 361 362 assert(offset == frame_size_in_bytes, "consistency check"); 363 364 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers"); 365 366 // And we're done. 367 return map; 368 } 369 370 371 // Pop the current frame and restore all the registers that we 372 // saved. 373 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, 374 int frame_size_in_bytes, 375 bool restore_ctr, 376 bool save_vectors) { 377 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 378 sizeof(RegisterSaver::LiveRegType); 379 const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / 380 sizeof(RegisterSaver::LiveRegType)) 381 : 0; 382 const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; 383 384 const int register_save_offset = frame_size_in_bytes - register_save_size; 385 386 BLOCK_COMMENT("restore_live_registers_and_pop_frame {"); 387 388 // restore all registers (ints and floats) 389 int offset = register_save_offset; 390 391 for (int i = 0; i < regstosave_num; i++) { 392 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 393 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 394 395 switch (reg_type) { 396 case RegisterSaver::int_reg: { 397 if (reg_num != 31) // R31 restored at the end, it's the tmp reg! 398 __ ld(as_Register(reg_num), offset, R1_SP); 399 break; 400 } 401 case RegisterSaver::float_reg: { 402 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 403 break; 404 } 405 case RegisterSaver::special_reg: { 406 if (reg_num == SR_CTR.encoding()) { 407 if (restore_ctr) { // Nothing to do here if ctr already contains the next address. 408 __ ld(R31, offset, R1_SP); 409 __ mtctr(R31); 410 } 411 } else { 412 Unimplemented(); 413 } 414 break; 415 } 416 default: 417 ShouldNotReachHere(); 418 } 419 offset += reg_size; 420 } 421 422 for (int i = 0; i < vsregstosave_num; i++) { 423 int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; 424 int reg_type = RegisterSaver_LiveVSRegs[i].reg_type; 425 426 __ li(R31, offset); 427 __ lxvd2x(as_VectorSRegister(reg_num), R31, R1_SP); 428 429 offset += vs_reg_size; 430 } 431 432 assert(offset == frame_size_in_bytes, "consistency check"); 433 434 // restore link and the flags 435 __ ld(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 436 __ mtlr(R31); 437 438 // restore scratch register's value 439 __ ld(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); 440 441 // pop the frame 442 __ addi(R1_SP, R1_SP, frame_size_in_bytes); 443 444 BLOCK_COMMENT("} restore_live_registers_and_pop_frame"); 445 } 446 447 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp, 448 int frame_size,int total_args, const VMRegPair *regs, 449 const VMRegPair *regs2) { 450 __ push_frame(frame_size, r_temp); 451 int st_off = frame_size - wordSize; 452 for (int i = 0; i < total_args; i++) { 453 VMReg r_1 = regs[i].first(); 454 VMReg r_2 = regs[i].second(); 455 if (!r_1->is_valid()) { 456 assert(!r_2->is_valid(), ""); 457 continue; 458 } 459 if (r_1->is_Register()) { 460 Register r = r_1->as_Register(); 461 __ std(r, st_off, R1_SP); 462 st_off -= wordSize; 463 } else if (r_1->is_FloatRegister()) { 464 FloatRegister f = r_1->as_FloatRegister(); 465 __ stfd(f, st_off, R1_SP); 466 st_off -= wordSize; 467 } 468 } 469 if (regs2 != nullptr) { 470 for (int i = 0; i < total_args; i++) { 471 VMReg r_1 = regs2[i].first(); 472 VMReg r_2 = regs2[i].second(); 473 if (!r_1->is_valid()) { 474 assert(!r_2->is_valid(), ""); 475 continue; 476 } 477 if (r_1->is_Register()) { 478 Register r = r_1->as_Register(); 479 __ std(r, st_off, R1_SP); 480 st_off -= wordSize; 481 } else if (r_1->is_FloatRegister()) { 482 FloatRegister f = r_1->as_FloatRegister(); 483 __ stfd(f, st_off, R1_SP); 484 st_off -= wordSize; 485 } 486 } 487 } 488 } 489 490 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size, 491 int total_args, const VMRegPair *regs, 492 const VMRegPair *regs2) { 493 int st_off = frame_size - wordSize; 494 for (int i = 0; i < total_args; i++) { 495 VMReg r_1 = regs[i].first(); 496 VMReg r_2 = regs[i].second(); 497 if (r_1->is_Register()) { 498 Register r = r_1->as_Register(); 499 __ ld(r, st_off, R1_SP); 500 st_off -= wordSize; 501 } else if (r_1->is_FloatRegister()) { 502 FloatRegister f = r_1->as_FloatRegister(); 503 __ lfd(f, st_off, R1_SP); 504 st_off -= wordSize; 505 } 506 } 507 if (regs2 != nullptr) 508 for (int i = 0; i < total_args; i++) { 509 VMReg r_1 = regs2[i].first(); 510 VMReg r_2 = regs2[i].second(); 511 if (r_1->is_Register()) { 512 Register r = r_1->as_Register(); 513 __ ld(r, st_off, R1_SP); 514 st_off -= wordSize; 515 } else if (r_1->is_FloatRegister()) { 516 FloatRegister f = r_1->as_FloatRegister(); 517 __ lfd(f, st_off, R1_SP); 518 st_off -= wordSize; 519 } 520 } 521 __ pop_frame(); 522 } 523 524 // Restore the registers that might be holding a result. 525 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { 526 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / 527 sizeof(RegisterSaver::LiveRegType); 528 const int register_save_size = regstosave_num * reg_size; // VS registers not relevant here. 529 const int register_save_offset = frame_size_in_bytes - register_save_size; 530 531 // restore all result registers (ints and floats) 532 int offset = register_save_offset; 533 for (int i = 0; i < regstosave_num; i++) { 534 int reg_num = RegisterSaver_LiveRegs[i].reg_num; 535 int reg_type = RegisterSaver_LiveRegs[i].reg_type; 536 switch (reg_type) { 537 case RegisterSaver::int_reg: { 538 if (as_Register(reg_num)==R3_RET) // int result_reg 539 __ ld(as_Register(reg_num), offset, R1_SP); 540 break; 541 } 542 case RegisterSaver::float_reg: { 543 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg 544 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); 545 break; 546 } 547 case RegisterSaver::special_reg: { 548 // Special registers don't hold a result. 549 break; 550 } 551 default: 552 ShouldNotReachHere(); 553 } 554 offset += reg_size; 555 } 556 557 assert(offset == frame_size_in_bytes, "consistency check"); 558 } 559 560 // Is vector's size (in bytes) bigger than a size saved by default? 561 bool SharedRuntime::is_wide_vector(int size) { 562 // Note, MaxVectorSize == 8/16 on PPC64. 563 assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size); 564 return size > 8; 565 } 566 567 static int reg2slot(VMReg r) { 568 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 569 } 570 571 static int reg2offset(VMReg r) { 572 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 573 } 574 575 // --------------------------------------------------------------------------- 576 // Read the array of BasicTypes from a signature, and compute where the 577 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 578 // quantities. Values less than VMRegImpl::stack0 are registers, those above 579 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 580 // as framesizes are fixed. 581 // VMRegImpl::stack0 refers to the first slot 0(sp). 582 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register 583 // up to Register::number_of_registers) are the 64-bit 584 // integer registers. 585 586 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 587 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 588 // units regardless of build. Of course for i486 there is no 64 bit build 589 590 // The Java calling convention is a "shifted" version of the C ABI. 591 // By skipping the first C ABI register we can call non-static jni methods 592 // with small numbers of arguments without having to shuffle the arguments 593 // at all. Since we control the java ABI we ought to at least get some 594 // advantage out of it. 595 596 const VMReg java_iarg_reg[8] = { 597 R3->as_VMReg(), 598 R4->as_VMReg(), 599 R5->as_VMReg(), 600 R6->as_VMReg(), 601 R7->as_VMReg(), 602 R8->as_VMReg(), 603 R9->as_VMReg(), 604 R10->as_VMReg() 605 }; 606 607 const VMReg java_farg_reg[13] = { 608 F1->as_VMReg(), 609 F2->as_VMReg(), 610 F3->as_VMReg(), 611 F4->as_VMReg(), 612 F5->as_VMReg(), 613 F6->as_VMReg(), 614 F7->as_VMReg(), 615 F8->as_VMReg(), 616 F9->as_VMReg(), 617 F10->as_VMReg(), 618 F11->as_VMReg(), 619 F12->as_VMReg(), 620 F13->as_VMReg() 621 }; 622 623 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]); 624 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]); 625 626 STATIC_ASSERT(num_java_iarg_registers == Argument::n_int_register_parameters_j); 627 STATIC_ASSERT(num_java_farg_registers == Argument::n_float_register_parameters_j); 628 629 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 630 VMRegPair *regs, 631 int total_args_passed) { 632 // C2c calling conventions for compiled-compiled calls. 633 // Put 8 ints/longs into registers _AND_ 13 float/doubles into 634 // registers _AND_ put the rest on the stack. 635 636 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats 637 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles 638 639 int i; 640 VMReg reg; 641 int stk = 0; 642 int ireg = 0; 643 int freg = 0; 644 645 // We put the first 8 arguments into registers and the rest on the 646 // stack, float arguments are already in their argument registers 647 // due to c2c calling conventions (see calling_convention). 648 for (int i = 0; i < total_args_passed; ++i) { 649 switch(sig_bt[i]) { 650 case T_BOOLEAN: 651 case T_CHAR: 652 case T_BYTE: 653 case T_SHORT: 654 case T_INT: 655 if (ireg < num_java_iarg_registers) { 656 // Put int/ptr in register 657 reg = java_iarg_reg[ireg]; 658 ++ireg; 659 } else { 660 // Put int/ptr on stack. 661 reg = VMRegImpl::stack2reg(stk); 662 stk += inc_stk_for_intfloat; 663 } 664 regs[i].set1(reg); 665 break; 666 case T_LONG: 667 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 668 if (ireg < num_java_iarg_registers) { 669 // Put long in register. 670 reg = java_iarg_reg[ireg]; 671 ++ireg; 672 } else { 673 // Put long on stack. They must be aligned to 2 slots. 674 if (stk & 0x1) ++stk; 675 reg = VMRegImpl::stack2reg(stk); 676 stk += inc_stk_for_longdouble; 677 } 678 regs[i].set2(reg); 679 break; 680 case T_OBJECT: 681 case T_ARRAY: 682 case T_ADDRESS: 683 if (ireg < num_java_iarg_registers) { 684 // Put ptr in register. 685 reg = java_iarg_reg[ireg]; 686 ++ireg; 687 } else { 688 // Put ptr on stack. Objects must be aligned to 2 slots too, 689 // because "64-bit pointers record oop-ishness on 2 aligned 690 // adjacent registers." (see OopFlow::build_oop_map). 691 if (stk & 0x1) ++stk; 692 reg = VMRegImpl::stack2reg(stk); 693 stk += inc_stk_for_longdouble; 694 } 695 regs[i].set2(reg); 696 break; 697 case T_FLOAT: 698 if (freg < num_java_farg_registers) { 699 // Put float in register. 700 reg = java_farg_reg[freg]; 701 ++freg; 702 } else { 703 // Put float on stack. 704 reg = VMRegImpl::stack2reg(stk); 705 stk += inc_stk_for_intfloat; 706 } 707 regs[i].set1(reg); 708 break; 709 case T_DOUBLE: 710 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 711 if (freg < num_java_farg_registers) { 712 // Put double in register. 713 reg = java_farg_reg[freg]; 714 ++freg; 715 } else { 716 // Put double on stack. They must be aligned to 2 slots. 717 if (stk & 0x1) ++stk; 718 reg = VMRegImpl::stack2reg(stk); 719 stk += inc_stk_for_longdouble; 720 } 721 regs[i].set2(reg); 722 break; 723 case T_VOID: 724 // Do not count halves. 725 regs[i].set_bad(); 726 break; 727 default: 728 ShouldNotReachHere(); 729 } 730 } 731 return stk; 732 } 733 734 #if defined(COMPILER1) || defined(COMPILER2) 735 // Calling convention for calling C code. 736 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 737 VMRegPair *regs, 738 int total_args_passed) { 739 // Calling conventions for C runtime calls and calls to JNI native methods. 740 // 741 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8 742 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist 743 // the first 13 flt/dbl's in the first 13 fp regs but additionally 744 // copy flt/dbl to the stack if they are beyond the 8th argument. 745 746 const VMReg iarg_reg[8] = { 747 R3->as_VMReg(), 748 R4->as_VMReg(), 749 R5->as_VMReg(), 750 R6->as_VMReg(), 751 R7->as_VMReg(), 752 R8->as_VMReg(), 753 R9->as_VMReg(), 754 R10->as_VMReg() 755 }; 756 757 const VMReg farg_reg[13] = { 758 F1->as_VMReg(), 759 F2->as_VMReg(), 760 F3->as_VMReg(), 761 F4->as_VMReg(), 762 F5->as_VMReg(), 763 F6->as_VMReg(), 764 F7->as_VMReg(), 765 F8->as_VMReg(), 766 F9->as_VMReg(), 767 F10->as_VMReg(), 768 F11->as_VMReg(), 769 F12->as_VMReg(), 770 F13->as_VMReg() 771 }; 772 773 // Check calling conventions consistency. 774 assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c && 775 sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c, 776 "consistency"); 777 778 const int additional_frame_header_slots = ((frame::native_abi_minframe_size - frame::jit_out_preserve_size) 779 / VMRegImpl::stack_slot_size); 780 const int float_offset_in_slots = Argument::float_on_stack_offset_in_bytes_c / VMRegImpl::stack_slot_size; 781 782 VMReg reg; 783 int arg = 0; 784 int freg = 0; 785 bool stack_used = false; 786 787 for (int i = 0; i < total_args_passed; ++i, ++arg) { 788 // Each argument corresponds to a slot in the Parameter Save Area (if not omitted) 789 int stk = (arg * 2) + additional_frame_header_slots; 790 791 switch(sig_bt[i]) { 792 // 793 // If arguments 0-7 are integers, they are passed in integer registers. 794 // Argument i is placed in iarg_reg[i]. 795 // 796 case T_BOOLEAN: 797 case T_CHAR: 798 case T_BYTE: 799 case T_SHORT: 800 case T_INT: 801 // We must cast ints to longs and use full 64 bit stack slots 802 // here. Thus fall through, handle as long. 803 case T_LONG: 804 case T_OBJECT: 805 case T_ARRAY: 806 case T_ADDRESS: 807 case T_METADATA: 808 // Oops are already boxed if required (JNI). 809 if (arg < Argument::n_int_register_parameters_c) { 810 reg = iarg_reg[arg]; 811 } else { 812 reg = VMRegImpl::stack2reg(stk); 813 stack_used = true; 814 } 815 regs[i].set2(reg); 816 break; 817 818 // 819 // Floats are treated differently from int regs: The first 13 float arguments 820 // are passed in registers (not the float args among the first 13 args). 821 // Thus argument i is NOT passed in farg_reg[i] if it is float. It is passed 822 // in farg_reg[j] if argument i is the j-th float argument of this call. 823 // 824 case T_FLOAT: 825 if (freg < Argument::n_float_register_parameters_c) { 826 // Put float in register ... 827 reg = farg_reg[freg]; 828 ++freg; 829 } else { 830 // Put float on stack. 831 reg = VMRegImpl::stack2reg(stk + float_offset_in_slots); 832 stack_used = true; 833 } 834 regs[i].set1(reg); 835 break; 836 case T_DOUBLE: 837 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 838 if (freg < Argument::n_float_register_parameters_c) { 839 // Put double in register ... 840 reg = farg_reg[freg]; 841 ++freg; 842 } else { 843 // Put double on stack. 844 reg = VMRegImpl::stack2reg(stk); 845 stack_used = true; 846 } 847 regs[i].set2(reg); 848 break; 849 850 case T_VOID: 851 // Do not count halves. 852 regs[i].set_bad(); 853 --arg; 854 break; 855 default: 856 ShouldNotReachHere(); 857 } 858 } 859 860 // Return size of the stack frame excluding the jit_out_preserve part in single-word slots. 861 #if defined(ABI_ELFv2) 862 assert(additional_frame_header_slots == 0, "ABIv2 shouldn't use extra slots"); 863 // ABIv2 allows omitting the Parameter Save Area if the callee's prototype 864 // indicates that all parameters can be passed in registers. 865 return stack_used ? (arg * 2) : 0; 866 #else 867 // The Parameter Save Area needs to be at least 8 double-word slots for ABIv1. 868 // We have to add extra slots because ABIv1 uses a larger header. 869 return MAX2(arg, 8) * 2 + additional_frame_header_slots; 870 #endif 871 } 872 #endif // COMPILER2 873 874 int SharedRuntime::vector_calling_convention(VMRegPair *regs, 875 uint num_bits, 876 uint total_args_passed) { 877 Unimplemented(); 878 return 0; 879 } 880 881 static address gen_c2i_adapter(MacroAssembler *masm, 882 int total_args_passed, 883 int comp_args_on_stack, 884 const BasicType *sig_bt, 885 const VMRegPair *regs, 886 Label& call_interpreter, 887 const Register& ientry) { 888 889 address c2i_entrypoint; 890 891 const Register sender_SP = R21_sender_SP; // == R21_tmp1 892 const Register code = R22_tmp2; 893 //const Register ientry = R23_tmp3; 894 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 }; 895 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 896 int value_regs_index = 0; 897 898 const Register return_pc = R27_tmp7; 899 const Register tmp = R28_tmp8; 900 901 assert_different_registers(sender_SP, code, ientry, return_pc, tmp); 902 903 // Adapter needs TOP_IJAVA_FRAME_ABI. 904 const int adapter_size = frame::top_ijava_frame_abi_size + 905 align_up(total_args_passed * wordSize, frame::alignment_in_bytes); 906 907 // regular (verified) c2i entry point 908 c2i_entrypoint = __ pc(); 909 910 // Does compiled code exists? If yes, patch the caller's callsite. 911 __ ld(code, method_(code)); 912 __ cmpdi(CCR0, code, 0); 913 __ ld(ientry, method_(interpreter_entry)); // preloaded 914 __ beq(CCR0, call_interpreter); 915 916 917 // Patch caller's callsite, method_(code) was not null which means that 918 // compiled code exists. 919 __ mflr(return_pc); 920 __ std(return_pc, _abi0(lr), R1_SP); 921 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs); 922 923 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc); 924 925 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs); 926 __ ld(return_pc, _abi0(lr), R1_SP); 927 __ ld(ientry, method_(interpreter_entry)); // preloaded 928 __ mtlr(return_pc); 929 930 931 // Call the interpreter. 932 __ BIND(call_interpreter); 933 __ mtctr(ientry); 934 935 // Get a copy of the current SP for loading caller's arguments. 936 __ mr(sender_SP, R1_SP); 937 938 // Add space for the adapter. 939 __ resize_frame(-adapter_size, R12_scratch2); 940 941 int st_off = adapter_size - wordSize; 942 943 // Write the args into the outgoing interpreter space. 944 for (int i = 0; i < total_args_passed; i++) { 945 VMReg r_1 = regs[i].first(); 946 VMReg r_2 = regs[i].second(); 947 if (!r_1->is_valid()) { 948 assert(!r_2->is_valid(), ""); 949 continue; 950 } 951 if (r_1->is_stack()) { 952 Register tmp_reg = value_regs[value_regs_index]; 953 value_regs_index = (value_regs_index + 1) % num_value_regs; 954 // The calling convention produces OptoRegs that ignore the out 955 // preserve area (JIT's ABI). We must account for it here. 956 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 957 if (!r_2->is_valid()) { 958 __ lwz(tmp_reg, ld_off, sender_SP); 959 } else { 960 __ ld(tmp_reg, ld_off, sender_SP); 961 } 962 // Pretend stack targets were loaded into tmp_reg. 963 r_1 = tmp_reg->as_VMReg(); 964 } 965 966 if (r_1->is_Register()) { 967 Register r = r_1->as_Register(); 968 if (!r_2->is_valid()) { 969 __ stw(r, st_off, R1_SP); 970 st_off-=wordSize; 971 } else { 972 // Longs are given 2 64-bit slots in the interpreter, but the 973 // data is passed in only 1 slot. 974 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 975 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 976 st_off-=wordSize; 977 } 978 __ std(r, st_off, R1_SP); 979 st_off-=wordSize; 980 } 981 } else { 982 assert(r_1->is_FloatRegister(), ""); 983 FloatRegister f = r_1->as_FloatRegister(); 984 if (!r_2->is_valid()) { 985 __ stfs(f, st_off, R1_SP); 986 st_off-=wordSize; 987 } else { 988 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the 989 // data is passed in only 1 slot. 990 // One of these should get known junk... 991 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) 992 st_off-=wordSize; 993 __ stfd(f, st_off, R1_SP); 994 st_off-=wordSize; 995 } 996 } 997 } 998 999 // Jump to the interpreter just as if interpreter was doing it. 1000 1001 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 1002 1003 // load TOS 1004 __ addi(R15_esp, R1_SP, st_off); 1005 1006 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1. 1007 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register"); 1008 __ bctr(); 1009 1010 return c2i_entrypoint; 1011 } 1012 1013 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 1014 int total_args_passed, 1015 int comp_args_on_stack, 1016 const BasicType *sig_bt, 1017 const VMRegPair *regs) { 1018 1019 // Load method's entry-point from method. 1020 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 1021 __ mtctr(R12_scratch2); 1022 1023 // We will only enter here from an interpreted frame and never from after 1024 // passing thru a c2i. Azul allowed this but we do not. If we lose the 1025 // race and use a c2i we will remain interpreted for the race loser(s). 1026 // This removes all sorts of headaches on the x86 side and also eliminates 1027 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 1028 1029 // Note: r13 contains the senderSP on entry. We must preserve it since 1030 // we may do a i2c -> c2i transition if we lose a race where compiled 1031 // code goes non-entrant while we get args ready. 1032 // In addition we use r13 to locate all the interpreter args as 1033 // we must align the stack to 16 bytes on an i2c entry else we 1034 // lose alignment we expect in all compiled code and register 1035 // save code can segv when fxsave instructions find improperly 1036 // aligned stack pointer. 1037 1038 const Register ld_ptr = R15_esp; 1039 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 }; 1040 const int num_value_regs = sizeof(value_regs) / sizeof(Register); 1041 int value_regs_index = 0; 1042 1043 int ld_offset = total_args_passed*wordSize; 1044 1045 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 1046 // in registers, we will occasionally have no stack args. 1047 int comp_words_on_stack = 0; 1048 if (comp_args_on_stack) { 1049 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in 1050 // registers are below. By subtracting stack0, we either get a negative 1051 // number (all values in registers) or the maximum stack slot accessed. 1052 1053 // Convert 4-byte c2 stack slots to words. 1054 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1055 // Round up to miminum stack alignment, in wordSize. 1056 comp_words_on_stack = align_up(comp_words_on_stack, 2); 1057 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1); 1058 } 1059 1060 // Now generate the shuffle code. Pick up all register args and move the 1061 // rest through register value=Z_R12. 1062 BLOCK_COMMENT("Shuffle arguments"); 1063 for (int i = 0; i < total_args_passed; i++) { 1064 if (sig_bt[i] == T_VOID) { 1065 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1066 continue; 1067 } 1068 1069 // Pick up 0, 1 or 2 words from ld_ptr. 1070 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 1071 "scrambled load targets?"); 1072 VMReg r_1 = regs[i].first(); 1073 VMReg r_2 = regs[i].second(); 1074 if (!r_1->is_valid()) { 1075 assert(!r_2->is_valid(), ""); 1076 continue; 1077 } 1078 if (r_1->is_FloatRegister()) { 1079 if (!r_2->is_valid()) { 1080 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr); 1081 ld_offset-=wordSize; 1082 } else { 1083 // Skip the unused interpreter slot. 1084 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr); 1085 ld_offset-=2*wordSize; 1086 } 1087 } else { 1088 Register r; 1089 if (r_1->is_stack()) { 1090 // Must do a memory to memory move thru "value". 1091 r = value_regs[value_regs_index]; 1092 value_regs_index = (value_regs_index + 1) % num_value_regs; 1093 } else { 1094 r = r_1->as_Register(); 1095 } 1096 if (!r_2->is_valid()) { 1097 // Not sure we need to do this but it shouldn't hurt. 1098 if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) { 1099 __ ld(r, ld_offset, ld_ptr); 1100 ld_offset-=wordSize; 1101 } else { 1102 __ lwz(r, ld_offset, ld_ptr); 1103 ld_offset-=wordSize; 1104 } 1105 } else { 1106 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the 1107 // data is passed in only 1 slot. 1108 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 1109 ld_offset-=wordSize; 1110 } 1111 __ ld(r, ld_offset, ld_ptr); 1112 ld_offset-=wordSize; 1113 } 1114 1115 if (r_1->is_stack()) { 1116 // Now store value where the compiler expects it 1117 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size; 1118 1119 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN || 1120 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) { 1121 __ stw(r, st_off, R1_SP); 1122 } else { 1123 __ std(r, st_off, R1_SP); 1124 } 1125 } 1126 } 1127 } 1128 1129 __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about 1130 1131 BLOCK_COMMENT("Store method"); 1132 // Store method into thread->callee_target. 1133 // We might end up in handle_wrong_method if the callee is 1134 // deoptimized as we race thru here. If that happens we don't want 1135 // to take a safepoint because the caller frame will look 1136 // interpreted and arguments are now "compiled" so it is much better 1137 // to make this transition invisible to the stack walking 1138 // code. Unfortunately if we try and find the callee by normal means 1139 // a safepoint is possible. So we stash the desired callee in the 1140 // thread and the vm will find there should this case occur. 1141 __ std(R19_method, thread_(callee_target)); 1142 1143 // Jump to the compiled code just as if compiled code was doing it. 1144 __ bctr(); 1145 } 1146 1147 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1148 int total_args_passed, 1149 int comp_args_on_stack, 1150 const BasicType *sig_bt, 1151 const VMRegPair *regs, 1152 AdapterFingerPrint* fingerprint) { 1153 address i2c_entry; 1154 address c2i_unverified_entry; 1155 address c2i_entry; 1156 1157 1158 // entry: i2c 1159 1160 __ align(CodeEntryAlignment); 1161 i2c_entry = __ pc(); 1162 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1163 1164 1165 // entry: c2i unverified 1166 1167 __ align(CodeEntryAlignment); 1168 BLOCK_COMMENT("c2i unverified entry"); 1169 c2i_unverified_entry = __ pc(); 1170 1171 // inline_cache contains a CompiledICData 1172 const Register ic = R19_inline_cache_reg; 1173 const Register ic_klass = R11_scratch1; 1174 const Register receiver_klass = R12_scratch2; 1175 const Register code = R21_tmp1; 1176 const Register ientry = R23_tmp3; 1177 1178 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry); 1179 assert(R11_scratch1 == R11, "need prologue scratch register"); 1180 1181 Label call_interpreter; 1182 1183 __ ic_check(4 /* end_alignment */); 1184 __ ld(R19_method, CompiledICData::speculated_method_offset(), ic); 1185 // Argument is valid and klass is as expected, continue. 1186 1187 __ ld(code, method_(code)); 1188 __ cmpdi(CCR0, code, 0); 1189 __ ld(ientry, method_(interpreter_entry)); // preloaded 1190 __ beq_predict_taken(CCR0, call_interpreter); 1191 1192 // Branch to ic_miss_stub. 1193 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); 1194 1195 // entry: c2i 1196 1197 c2i_entry = __ pc(); 1198 1199 // Class initialization barrier for static methods 1200 address c2i_no_clinit_check_entry = nullptr; 1201 if (VM_Version::supports_fast_class_init_checks()) { 1202 Label L_skip_barrier; 1203 1204 { // Bypass the barrier for non-static methods 1205 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1206 __ andi_(R0, R0, JVM_ACC_STATIC); 1207 __ beq(CCR0, L_skip_barrier); // non-static 1208 } 1209 1210 Register klass = R11_scratch1; 1211 __ load_method_holder(klass, R19_method); 1212 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 1213 1214 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 1215 __ mtctr(klass); 1216 __ bctr(); 1217 1218 __ bind(L_skip_barrier); 1219 c2i_no_clinit_check_entry = __ pc(); 1220 } 1221 1222 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1223 bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code); 1224 1225 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry); 1226 1227 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, 1228 c2i_no_clinit_check_entry); 1229 } 1230 1231 // An oop arg. Must pass a handle not the oop itself. 1232 static void object_move(MacroAssembler* masm, 1233 int frame_size_in_slots, 1234 OopMap* oop_map, int oop_handle_offset, 1235 bool is_receiver, int* receiver_offset, 1236 VMRegPair src, VMRegPair dst, 1237 Register r_caller_sp, Register r_temp_1, Register r_temp_2) { 1238 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), 1239 "receiver has already been moved"); 1240 1241 // We must pass a handle. First figure out the location we use as a handle. 1242 1243 if (src.first()->is_stack()) { 1244 // stack to stack or reg 1245 1246 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1247 Label skip; 1248 const int oop_slot_in_callers_frame = reg2slot(src.first()); 1249 1250 guarantee(!is_receiver, "expecting receiver in register"); 1251 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots)); 1252 1253 __ addi(r_handle, r_caller_sp, reg2offset(src.first())); 1254 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); 1255 __ cmpdi(CCR0, r_temp_2, 0); 1256 __ bne(CCR0, skip); 1257 // Use a null handle if oop is null. 1258 __ li(r_handle, 0); 1259 __ bind(skip); 1260 1261 if (dst.first()->is_stack()) { 1262 // stack to stack 1263 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1264 } else { 1265 // stack to reg 1266 // Nothing to do, r_handle is already the dst register. 1267 } 1268 } else { 1269 // reg to stack or reg 1270 const Register r_oop = src.first()->as_Register(); 1271 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); 1272 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word 1273 + oop_handle_offset; // in slots 1274 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size; 1275 Label skip; 1276 1277 if (is_receiver) { 1278 *receiver_offset = oop_offset; 1279 } 1280 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1281 1282 __ std( r_oop, oop_offset, R1_SP); 1283 __ addi(r_handle, R1_SP, oop_offset); 1284 1285 __ cmpdi(CCR0, r_oop, 0); 1286 __ bne(CCR0, skip); 1287 // Use a null handle if oop is null. 1288 __ li(r_handle, 0); 1289 __ bind(skip); 1290 1291 if (dst.first()->is_stack()) { 1292 // reg to stack 1293 __ std(r_handle, reg2offset(dst.first()), R1_SP); 1294 } else { 1295 // reg to reg 1296 // Nothing to do, r_handle is already the dst register. 1297 } 1298 } 1299 } 1300 1301 static void int_move(MacroAssembler*masm, 1302 VMRegPair src, VMRegPair dst, 1303 Register r_caller_sp, Register r_temp) { 1304 assert(src.first()->is_valid(), "incoming must be int"); 1305 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1306 1307 if (src.first()->is_stack()) { 1308 if (dst.first()->is_stack()) { 1309 // stack to stack 1310 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp); 1311 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1312 } else { 1313 // stack to reg 1314 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1315 } 1316 } else if (dst.first()->is_stack()) { 1317 // reg to stack 1318 __ extsw(r_temp, src.first()->as_Register()); 1319 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1320 } else { 1321 // reg to reg 1322 __ extsw(dst.first()->as_Register(), src.first()->as_Register()); 1323 } 1324 } 1325 1326 static void long_move(MacroAssembler*masm, 1327 VMRegPair src, VMRegPair dst, 1328 Register r_caller_sp, Register r_temp) { 1329 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long"); 1330 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); 1331 1332 if (src.first()->is_stack()) { 1333 if (dst.first()->is_stack()) { 1334 // stack to stack 1335 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1336 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1337 } else { 1338 // stack to reg 1339 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); 1340 } 1341 } else if (dst.first()->is_stack()) { 1342 // reg to stack 1343 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); 1344 } else { 1345 // reg to reg 1346 if (dst.first()->as_Register() != src.first()->as_Register()) 1347 __ mr(dst.first()->as_Register(), src.first()->as_Register()); 1348 } 1349 } 1350 1351 static void float_move(MacroAssembler*masm, 1352 VMRegPair src, VMRegPair dst, 1353 Register r_caller_sp, Register r_temp) { 1354 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float"); 1355 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float"); 1356 1357 if (src.first()->is_stack()) { 1358 if (dst.first()->is_stack()) { 1359 // stack to stack 1360 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp); 1361 __ stw(r_temp, reg2offset(dst.first()), R1_SP); 1362 } else { 1363 // stack to reg 1364 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1365 } 1366 } else if (dst.first()->is_stack()) { 1367 // reg to stack 1368 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1369 } else { 1370 // reg to reg 1371 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1372 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1373 } 1374 } 1375 1376 static void double_move(MacroAssembler*masm, 1377 VMRegPair src, VMRegPair dst, 1378 Register r_caller_sp, Register r_temp) { 1379 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double"); 1380 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double"); 1381 1382 if (src.first()->is_stack()) { 1383 if (dst.first()->is_stack()) { 1384 // stack to stack 1385 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); 1386 __ std(r_temp, reg2offset(dst.first()), R1_SP); 1387 } else { 1388 // stack to reg 1389 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); 1390 } 1391 } else if (dst.first()->is_stack()) { 1392 // reg to stack 1393 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); 1394 } else { 1395 // reg to reg 1396 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) 1397 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1398 } 1399 } 1400 1401 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1402 switch (ret_type) { 1403 case T_BOOLEAN: 1404 case T_CHAR: 1405 case T_BYTE: 1406 case T_SHORT: 1407 case T_INT: 1408 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1409 break; 1410 case T_ARRAY: 1411 case T_OBJECT: 1412 case T_LONG: 1413 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1414 break; 1415 case T_FLOAT: 1416 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1417 break; 1418 case T_DOUBLE: 1419 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1420 break; 1421 case T_VOID: 1422 break; 1423 default: 1424 ShouldNotReachHere(); 1425 break; 1426 } 1427 } 1428 1429 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1430 switch (ret_type) { 1431 case T_BOOLEAN: 1432 case T_CHAR: 1433 case T_BYTE: 1434 case T_SHORT: 1435 case T_INT: 1436 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1437 break; 1438 case T_ARRAY: 1439 case T_OBJECT: 1440 case T_LONG: 1441 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1442 break; 1443 case T_FLOAT: 1444 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1445 break; 1446 case T_DOUBLE: 1447 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); 1448 break; 1449 case T_VOID: 1450 break; 1451 default: 1452 ShouldNotReachHere(); 1453 break; 1454 } 1455 } 1456 1457 static void verify_oop_args(MacroAssembler* masm, 1458 const methodHandle& method, 1459 const BasicType* sig_bt, 1460 const VMRegPair* regs) { 1461 Register temp_reg = R19_method; // not part of any compiled calling seq 1462 if (VerifyOops) { 1463 for (int i = 0; i < method->size_of_parameters(); i++) { 1464 if (is_reference_type(sig_bt[i])) { 1465 VMReg r = regs[i].first(); 1466 assert(r->is_valid(), "bad oop arg"); 1467 if (r->is_stack()) { 1468 __ ld(temp_reg, reg2offset(r), R1_SP); 1469 __ verify_oop(temp_reg, FILE_AND_LINE); 1470 } else { 1471 __ verify_oop(r->as_Register(), FILE_AND_LINE); 1472 } 1473 } 1474 } 1475 } 1476 } 1477 1478 static void gen_special_dispatch(MacroAssembler* masm, 1479 const methodHandle& method, 1480 const BasicType* sig_bt, 1481 const VMRegPair* regs) { 1482 verify_oop_args(masm, method, sig_bt, regs); 1483 vmIntrinsics::ID iid = method->intrinsic_id(); 1484 1485 // Now write the args into the outgoing interpreter space 1486 bool has_receiver = false; 1487 Register receiver_reg = noreg; 1488 int member_arg_pos = -1; 1489 Register member_reg = noreg; 1490 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1491 if (ref_kind != 0) { 1492 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1493 member_reg = R19_method; // known to be free at this point 1494 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1495 } else if (iid == vmIntrinsics::_invokeBasic) { 1496 has_receiver = true; 1497 } else if (iid == vmIntrinsics::_linkToNative) { 1498 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument 1499 member_reg = R19_method; // known to be free at this point 1500 } else { 1501 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid)); 1502 } 1503 1504 if (member_reg != noreg) { 1505 // Load the member_arg into register, if necessary. 1506 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1507 VMReg r = regs[member_arg_pos].first(); 1508 if (r->is_stack()) { 1509 __ ld(member_reg, reg2offset(r), R1_SP); 1510 } else { 1511 // no data motion is needed 1512 member_reg = r->as_Register(); 1513 } 1514 } 1515 1516 if (has_receiver) { 1517 // Make sure the receiver is loaded into a register. 1518 assert(method->size_of_parameters() > 0, "oob"); 1519 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1520 VMReg r = regs[0].first(); 1521 assert(r->is_valid(), "bad receiver arg"); 1522 if (r->is_stack()) { 1523 // Porting note: This assumes that compiled calling conventions always 1524 // pass the receiver oop in a register. If this is not true on some 1525 // platform, pick a temp and load the receiver from stack. 1526 fatal("receiver always in a register"); 1527 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point? 1528 __ ld(receiver_reg, reg2offset(r), R1_SP); 1529 } else { 1530 // no data motion is needed 1531 receiver_reg = r->as_Register(); 1532 } 1533 } 1534 1535 // Figure out which address we are really jumping to: 1536 MethodHandles::generate_method_handle_dispatch(masm, iid, 1537 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1538 } 1539 1540 //---------------------------- continuation_enter_setup --------------------------- 1541 // 1542 // Frame setup. 1543 // 1544 // Arguments: 1545 // None. 1546 // 1547 // Results: 1548 // R1_SP: pointer to blank ContinuationEntry in the pushed frame. 1549 // 1550 // Kills: 1551 // R0, R20 1552 // 1553 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& framesize_words) { 1554 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, ""); 1555 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1556 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1557 1558 const int frame_size_in_bytes = (int)ContinuationEntry::size(); 1559 assert(is_aligned(frame_size_in_bytes, frame::alignment_in_bytes), "alignment error"); 1560 1561 framesize_words = frame_size_in_bytes / wordSize; 1562 1563 DEBUG_ONLY(__ block_comment("setup {")); 1564 // Save return pc and push entry frame 1565 const Register return_pc = R20; 1566 __ mflr(return_pc); 1567 __ std(return_pc, _abi0(lr), R1_SP); // SP->lr = return_pc 1568 __ push_frame(frame_size_in_bytes , R0); // SP -= frame_size_in_bytes 1569 1570 OopMap* map = new OopMap((int)frame_size_in_bytes / VMRegImpl::stack_slot_size, 0 /* arg_slots*/); 1571 1572 __ ld_ptr(R0, JavaThread::cont_entry_offset(), R16_thread); 1573 __ st_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread); 1574 __ st_ptr(R0, ContinuationEntry::parent_offset(), R1_SP); 1575 DEBUG_ONLY(__ block_comment("} setup")); 1576 1577 return map; 1578 } 1579 1580 //---------------------------- fill_continuation_entry --------------------------- 1581 // 1582 // Initialize the new ContinuationEntry. 1583 // 1584 // Arguments: 1585 // R1_SP: pointer to blank Continuation entry 1586 // reg_cont_obj: pointer to the continuation 1587 // reg_flags: flags 1588 // 1589 // Results: 1590 // R1_SP: pointer to filled out ContinuationEntry 1591 // 1592 // Kills: 1593 // R8_ARG6, R9_ARG7, R10_ARG8 1594 // 1595 static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, Register reg_flags) { 1596 assert_different_registers(reg_cont_obj, reg_flags); 1597 Register zero = R8_ARG6; 1598 Register tmp2 = R9_ARG7; 1599 Register tmp3 = R10_ARG8; 1600 1601 DEBUG_ONLY(__ block_comment("fill {")); 1602 #ifdef ASSERT 1603 __ load_const_optimized(tmp2, ContinuationEntry::cookie_value()); 1604 __ stw(tmp2, in_bytes(ContinuationEntry::cookie_offset()), R1_SP); 1605 #endif //ASSERT 1606 1607 __ li(zero, 0); 1608 __ st_ptr(reg_cont_obj, ContinuationEntry::cont_offset(), R1_SP); 1609 __ stw(reg_flags, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1610 __ st_ptr(zero, ContinuationEntry::chunk_offset(), R1_SP); 1611 __ stw(zero, in_bytes(ContinuationEntry::argsize_offset()), R1_SP); 1612 __ stw(zero, in_bytes(ContinuationEntry::pin_count_offset()), R1_SP); 1613 1614 __ ld_ptr(tmp2, JavaThread::cont_fastpath_offset(), R16_thread); 1615 __ ld(tmp3, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1616 __ st_ptr(tmp2, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP); 1617 __ std(tmp3, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP); 1618 1619 __ st_ptr(zero, JavaThread::cont_fastpath_offset(), R16_thread); 1620 __ std(zero, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1621 DEBUG_ONLY(__ block_comment("} fill")); 1622 } 1623 1624 //---------------------------- continuation_enter_cleanup --------------------------- 1625 // 1626 // Copy corresponding attributes from the top ContinuationEntry to the JavaThread 1627 // before deleting it. 1628 // 1629 // Arguments: 1630 // R1_SP: pointer to the ContinuationEntry 1631 // 1632 // Results: 1633 // None. 1634 // 1635 // Kills: 1636 // R8_ARG6, R9_ARG7, R10_ARG8, R15_esp 1637 // 1638 static void continuation_enter_cleanup(MacroAssembler* masm) { 1639 Register tmp1 = R8_ARG6; 1640 Register tmp2 = R9_ARG7; 1641 Register tmp3 = R10_ARG8; 1642 1643 #ifdef ASSERT 1644 __ block_comment("clean {"); 1645 __ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread); 1646 __ cmpd(CCR0, R1_SP, tmp1); 1647 __ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP"); 1648 #endif 1649 1650 __ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP); 1651 __ st_ptr(tmp1, JavaThread::cont_fastpath_offset(), R16_thread); 1652 1653 if (CheckJNICalls) { 1654 // Check if this is a virtual thread continuation 1655 Label L_skip_vthread_code; 1656 __ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1657 __ cmpwi(CCR0, R0, 0); 1658 __ beq(CCR0, L_skip_vthread_code); 1659 1660 // If the held monitor count is > 0 and this vthread is terminating then 1661 // it failed to release a JNI monitor. So we issue the same log message 1662 // that JavaThread::exit does. 1663 __ ld(R0, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1664 __ cmpdi(CCR0, R0, 0); 1665 __ beq(CCR0, L_skip_vthread_code); 1666 1667 // Save return value potentially containing the exception oop 1668 Register ex_oop = R15_esp; // nonvolatile register 1669 __ mr(ex_oop, R3_RET); 1670 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held)); 1671 // Restore potental return value 1672 __ mr(R3_RET, ex_oop); 1673 1674 // For vthreads we have to explicitly zero the JNI monitor count of the carrier 1675 // on termination. The held count is implicitly zeroed below when we restore from 1676 // the parent held count (which has to be zero). 1677 __ li(tmp1, 0); 1678 __ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1679 1680 __ bind(L_skip_vthread_code); 1681 } 1682 #ifdef ASSERT 1683 else { 1684 // Check if this is a virtual thread continuation 1685 Label L_skip_vthread_code; 1686 __ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP); 1687 __ cmpwi(CCR0, R0, 0); 1688 __ beq(CCR0, L_skip_vthread_code); 1689 1690 // See comment just above. If not checking JNI calls the JNI count is only 1691 // needed for assertion checking. 1692 __ li(tmp1, 0); 1693 __ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread); 1694 1695 __ bind(L_skip_vthread_code); 1696 } 1697 #endif 1698 1699 __ ld(tmp2, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP); 1700 __ ld_ptr(tmp3, ContinuationEntry::parent_offset(), R1_SP); 1701 __ std(tmp2, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread); 1702 __ st_ptr(tmp3, JavaThread::cont_entry_offset(), R16_thread); 1703 DEBUG_ONLY(__ block_comment("} clean")); 1704 } 1705 1706 static void check_continuation_enter_argument(VMReg actual_vmreg, 1707 Register expected_reg, 1708 const char* name) { 1709 assert(!actual_vmreg->is_stack(), "%s cannot be on stack", name); 1710 assert(actual_vmreg->as_Register() == expected_reg, 1711 "%s is in unexpected register: %s instead of %s", 1712 name, actual_vmreg->as_Register()->name(), expected_reg->name()); 1713 } 1714 1715 static void gen_continuation_enter(MacroAssembler* masm, 1716 const VMRegPair* regs, 1717 int& exception_offset, 1718 OopMapSet* oop_maps, 1719 int& frame_complete, 1720 int& framesize_words, 1721 int& interpreted_entry_offset, 1722 int& compiled_entry_offset) { 1723 1724 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread) 1725 int pos_cont_obj = 0; 1726 int pos_is_cont = 1; 1727 int pos_is_virtual = 2; 1728 1729 // The platform-specific calling convention may present the arguments in various registers. 1730 // To simplify the rest of the code, we expect the arguments to reside at these known 1731 // registers, and we additionally check the placement here in case calling convention ever 1732 // changes. 1733 Register reg_cont_obj = R3_ARG1; 1734 Register reg_is_cont = R4_ARG2; 1735 Register reg_is_virtual = R5_ARG3; 1736 1737 check_continuation_enter_argument(regs[pos_cont_obj].first(), reg_cont_obj, "Continuation object"); 1738 check_continuation_enter_argument(regs[pos_is_cont].first(), reg_is_cont, "isContinue"); 1739 check_continuation_enter_argument(regs[pos_is_virtual].first(), reg_is_virtual, "isVirtualThread"); 1740 1741 address resolve_static_call = SharedRuntime::get_resolve_static_call_stub(); 1742 1743 address start = __ pc(); 1744 1745 Label L_thaw, L_exit; 1746 1747 // i2i entry used at interp_only_mode only 1748 interpreted_entry_offset = __ pc() - start; 1749 { 1750 #ifdef ASSERT 1751 Label is_interp_only; 1752 __ lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread); 1753 __ cmpwi(CCR0, R0, 0); 1754 __ bne(CCR0, is_interp_only); 1755 __ stop("enterSpecial interpreter entry called when not in interp_only_mode"); 1756 __ bind(is_interp_only); 1757 #endif 1758 1759 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter) 1760 __ ld(reg_cont_obj, Interpreter::stackElementSize*3, R15_esp); 1761 __ lwz(reg_is_cont, Interpreter::stackElementSize*2, R15_esp); 1762 __ lwz(reg_is_virtual, Interpreter::stackElementSize*1, R15_esp); 1763 1764 __ push_cont_fastpath(); 1765 1766 OopMap* map = continuation_enter_setup(masm, framesize_words); 1767 1768 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe, 1769 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway. 1770 1771 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual); 1772 1773 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue) 1774 __ cmpwi(CCR0, reg_is_cont, 0); 1775 __ bne(CCR0, L_thaw); 1776 1777 // --- call Continuation.enter(Continuation c, boolean isContinue) 1778 1779 // Emit compiled static call. The call will be always resolved to the c2i 1780 // entry of Continuation.enter(Continuation c, boolean isContinue). 1781 // There are special cases in SharedRuntime::resolve_static_call_C() and 1782 // SharedRuntime::resolve_sub_helper_internal() to achieve this 1783 // See also corresponding call below. 1784 address c2i_call_pc = __ pc(); 1785 int start_offset = __ offset(); 1786 // Put the entry point as a constant into the constant pool. 1787 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none); 1788 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 1789 guarantee(entry_point_toc_addr != nullptr, "const section overflow"); 1790 1791 // Emit the trampoline stub which will be related to the branch-and-link below. 1792 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset); 1793 guarantee(stub != nullptr, "no space for trampoline stub"); 1794 1795 __ relocate(relocInfo::static_call_type); 1796 // Note: At this point we do not have the address of the trampoline 1797 // stub, and the entry point might be too far away for bl, so __ pc() 1798 // serves as dummy and the bl will be patched later. 1799 __ bl(__ pc()); 1800 oop_maps->add_gc_map(__ pc() - start, map); 1801 __ post_call_nop(); 1802 1803 __ b(L_exit); 1804 1805 // static stub for the call above 1806 stub = CompiledDirectCall::emit_to_interp_stub(masm, c2i_call_pc); 1807 guarantee(stub != nullptr, "no space for static stub"); 1808 } 1809 1810 // compiled entry 1811 __ align(CodeEntryAlignment); 1812 compiled_entry_offset = __ pc() - start; 1813 1814 OopMap* map = continuation_enter_setup(masm, framesize_words); 1815 1816 // Frame is now completed as far as size and linkage. 1817 frame_complete =__ pc() - start; 1818 1819 fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual); 1820 1821 // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue) 1822 __ cmpwi(CCR0, reg_is_cont, 0); 1823 __ bne(CCR0, L_thaw); 1824 1825 // --- call Continuation.enter(Continuation c, boolean isContinue) 1826 1827 // Emit compiled static call 1828 // The call needs to be resolved. There's a special case for this in 1829 // SharedRuntime::find_callee_info_helper() which calls 1830 // LinkResolver::resolve_continuation_enter() which resolves the call to 1831 // Continuation.enter(Continuation c, boolean isContinue). 1832 address call_pc = __ pc(); 1833 int start_offset = __ offset(); 1834 // Put the entry point as a constant into the constant pool. 1835 const address entry_point_toc_addr = __ address_constant(resolve_static_call, RelocationHolder::none); 1836 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 1837 guarantee(entry_point_toc_addr != nullptr, "const section overflow"); 1838 1839 // Emit the trampoline stub which will be related to the branch-and-link below. 1840 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset); 1841 guarantee(stub != nullptr, "no space for trampoline stub"); 1842 1843 __ relocate(relocInfo::static_call_type); 1844 // Note: At this point we do not have the address of the trampoline 1845 // stub, and the entry point might be too far away for bl, so __ pc() 1846 // serves as dummy and the bl will be patched later. 1847 __ bl(__ pc()); 1848 oop_maps->add_gc_map(__ pc() - start, map); 1849 __ post_call_nop(); 1850 1851 __ b(L_exit); 1852 1853 // --- Thawing path 1854 1855 __ bind(L_thaw); 1856 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(StubRoutines::cont_thaw())); 1857 __ mtctr(R0); 1858 __ bctrl(); 1859 oop_maps->add_gc_map(__ pc() - start, map->deep_copy()); 1860 ContinuationEntry::_return_pc_offset = __ pc() - start; 1861 __ post_call_nop(); 1862 1863 // --- Normal exit (resolve/thawing) 1864 1865 __ bind(L_exit); 1866 continuation_enter_cleanup(masm); 1867 1868 // Pop frame and return 1869 DEBUG_ONLY(__ ld_ptr(R0, 0, R1_SP)); 1870 __ addi(R1_SP, R1_SP, framesize_words*wordSize); 1871 DEBUG_ONLY(__ cmpd(CCR0, R0, R1_SP)); 1872 __ asm_assert_eq(FILE_AND_LINE ": inconsistent frame size"); 1873 __ ld(R0, _abi0(lr), R1_SP); // Return pc 1874 __ mtlr(R0); 1875 __ blr(); 1876 1877 // --- Exception handling path 1878 1879 exception_offset = __ pc() - start; 1880 1881 continuation_enter_cleanup(masm); 1882 Register ex_pc = R17_tos; // nonvolatile register 1883 Register ex_oop = R15_esp; // nonvolatile register 1884 __ ld(ex_pc, _abi0(callers_sp), R1_SP); // Load caller's return pc 1885 __ ld(ex_pc, _abi0(lr), ex_pc); 1886 __ mr(ex_oop, R3_RET); // save return value containing the exception oop 1887 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, ex_pc); 1888 __ mtlr(R3_RET); // the exception handler 1889 __ ld(R1_SP, _abi0(callers_sp), R1_SP); // remove enterSpecial frame 1890 1891 // Continue at exception handler 1892 // See OptoRuntime::generate_exception_blob for register arguments 1893 __ mr(R3_ARG1, ex_oop); // pass exception oop 1894 __ mr(R4_ARG2, ex_pc); // pass exception pc 1895 __ blr(); 1896 1897 // static stub for the call above 1898 stub = CompiledDirectCall::emit_to_interp_stub(masm, call_pc); 1899 guarantee(stub != nullptr, "no space for static stub"); 1900 } 1901 1902 static void gen_continuation_yield(MacroAssembler* masm, 1903 const VMRegPair* regs, 1904 OopMapSet* oop_maps, 1905 int& frame_complete, 1906 int& framesize_words, 1907 int& compiled_entry_offset) { 1908 Register tmp = R10_ARG8; 1909 1910 const int framesize_bytes = (int)align_up((int)frame::native_abi_reg_args_size, frame::alignment_in_bytes); 1911 framesize_words = framesize_bytes / wordSize; 1912 1913 address start = __ pc(); 1914 compiled_entry_offset = __ pc() - start; 1915 1916 // Save return pc and push entry frame 1917 __ mflr(tmp); 1918 __ std(tmp, _abi0(lr), R1_SP); // SP->lr = return_pc 1919 __ push_frame(framesize_bytes , R0); // SP -= frame_size_in_bytes 1920 1921 DEBUG_ONLY(__ block_comment("Frame Complete")); 1922 frame_complete = __ pc() - start; 1923 address last_java_pc = __ pc(); 1924 1925 // This nop must be exactly at the PC we push into the frame info. 1926 // We use this nop for fast CodeBlob lookup, associate the OopMap 1927 // with it right away. 1928 __ post_call_nop(); 1929 OopMap* map = new OopMap(framesize_bytes / VMRegImpl::stack_slot_size, 1); 1930 oop_maps->add_gc_map(last_java_pc - start, map); 1931 1932 __ calculate_address_from_global_toc(tmp, last_java_pc); // will be relocated 1933 __ set_last_Java_frame(R1_SP, tmp); 1934 __ call_VM_leaf(Continuation::freeze_entry(), R16_thread, R1_SP); 1935 __ reset_last_Java_frame(); 1936 1937 Label L_pinned; 1938 1939 __ cmpwi(CCR0, R3_RET, 0); 1940 __ bne(CCR0, L_pinned); 1941 1942 // yield succeeded 1943 1944 // Pop frames of continuation including this stub's frame 1945 __ ld_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread); 1946 // The frame pushed by gen_continuation_enter is on top now again 1947 continuation_enter_cleanup(masm); 1948 1949 // Pop frame and return 1950 Label L_return; 1951 __ bind(L_return); 1952 __ pop_frame(); 1953 __ ld(R0, _abi0(lr), R1_SP); // Return pc 1954 __ mtlr(R0); 1955 __ blr(); 1956 1957 // yield failed - continuation is pinned 1958 1959 __ bind(L_pinned); 1960 1961 // handle pending exception thrown by freeze 1962 __ ld(tmp, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 1963 __ cmpdi(CCR0, tmp, 0); 1964 __ beq(CCR0, L_return); // return if no exception is pending 1965 __ pop_frame(); 1966 __ ld(R0, _abi0(lr), R1_SP); // Return pc 1967 __ mtlr(R0); 1968 __ load_const_optimized(tmp, StubRoutines::forward_exception_entry(), R0); 1969 __ mtctr(tmp); 1970 __ bctr(); 1971 } 1972 1973 // --------------------------------------------------------------------------- 1974 // Generate a native wrapper for a given method. The method takes arguments 1975 // in the Java compiled code convention, marshals them to the native 1976 // convention (handlizes oops, etc), transitions to native, makes the call, 1977 // returns to java state (possibly blocking), unhandlizes any result and 1978 // returns. 1979 // 1980 // Critical native functions are a shorthand for the use of 1981 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1982 // functions. The wrapper is expected to unpack the arguments before 1983 // passing them to the callee. Critical native functions leave the state _in_Java, 1984 // since they cannot stop for GC. 1985 // Some other parts of JNI setup are skipped like the tear down of the JNI handle 1986 // block and the check for pending exceptions it's impossible for them 1987 // to be thrown. 1988 // 1989 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, 1990 const methodHandle& method, 1991 int compile_id, 1992 BasicType *in_sig_bt, 1993 VMRegPair *in_regs, 1994 BasicType ret_type) { 1995 if (method->is_continuation_native_intrinsic()) { 1996 int exception_offset = -1; 1997 OopMapSet* oop_maps = new OopMapSet(); 1998 int frame_complete = -1; 1999 int stack_slots = -1; 2000 int interpreted_entry_offset = -1; 2001 int vep_offset = -1; 2002 if (method->is_continuation_enter_intrinsic()) { 2003 gen_continuation_enter(masm, 2004 in_regs, 2005 exception_offset, 2006 oop_maps, 2007 frame_complete, 2008 stack_slots, 2009 interpreted_entry_offset, 2010 vep_offset); 2011 } else if (method->is_continuation_yield_intrinsic()) { 2012 gen_continuation_yield(masm, 2013 in_regs, 2014 oop_maps, 2015 frame_complete, 2016 stack_slots, 2017 vep_offset); 2018 } else { 2019 guarantee(false, "Unknown Continuation native intrinsic"); 2020 } 2021 2022 #ifdef ASSERT 2023 if (method->is_continuation_enter_intrinsic()) { 2024 assert(interpreted_entry_offset != -1, "Must be set"); 2025 assert(exception_offset != -1, "Must be set"); 2026 } else { 2027 assert(interpreted_entry_offset == -1, "Must be unset"); 2028 assert(exception_offset == -1, "Must be unset"); 2029 } 2030 assert(frame_complete != -1, "Must be set"); 2031 assert(stack_slots != -1, "Must be set"); 2032 assert(vep_offset != -1, "Must be set"); 2033 #endif 2034 2035 __ flush(); 2036 nmethod* nm = nmethod::new_native_nmethod(method, 2037 compile_id, 2038 masm->code(), 2039 vep_offset, 2040 frame_complete, 2041 stack_slots, 2042 in_ByteSize(-1), 2043 in_ByteSize(-1), 2044 oop_maps, 2045 exception_offset); 2046 if (nm == nullptr) return nm; 2047 if (method->is_continuation_enter_intrinsic()) { 2048 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset); 2049 } else if (method->is_continuation_yield_intrinsic()) { 2050 _cont_doYield_stub = nm; 2051 } 2052 return nm; 2053 } 2054 2055 if (method->is_method_handle_intrinsic()) { 2056 vmIntrinsics::ID iid = method->intrinsic_id(); 2057 intptr_t start = (intptr_t)__ pc(); 2058 int vep_offset = ((intptr_t)__ pc()) - start; 2059 gen_special_dispatch(masm, 2060 method, 2061 in_sig_bt, 2062 in_regs); 2063 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 2064 __ flush(); 2065 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 2066 return nmethod::new_native_nmethod(method, 2067 compile_id, 2068 masm->code(), 2069 vep_offset, 2070 frame_complete, 2071 stack_slots / VMRegImpl::slots_per_word, 2072 in_ByteSize(-1), 2073 in_ByteSize(-1), 2074 (OopMapSet*)nullptr); 2075 } 2076 2077 address native_func = method->native_function(); 2078 assert(native_func != nullptr, "must have function"); 2079 2080 // First, create signature for outgoing C call 2081 // -------------------------------------------------------------------------- 2082 2083 int total_in_args = method->size_of_parameters(); 2084 // We have received a description of where all the java args are located 2085 // on entry to the wrapper. We need to convert these args to where 2086 // the jni function will expect them. To figure out where they go 2087 // we convert the java signature to a C signature by inserting 2088 // the hidden arguments as arg[0] and possibly arg[1] (static method) 2089 2090 // Calculate the total number of C arguments and create arrays for the 2091 // signature and the outgoing registers. 2092 // On ppc64, we have two arrays for the outgoing registers, because 2093 // some floating-point arguments must be passed in registers _and_ 2094 // in stack locations. 2095 bool method_is_static = method->is_static(); 2096 int total_c_args = total_in_args + (method_is_static ? 2 : 1); 2097 2098 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 2099 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 2100 BasicType* in_elem_bt = nullptr; 2101 2102 // Create the signature for the C call: 2103 // 1) add the JNIEnv* 2104 // 2) add the class if the method is static 2105 // 3) copy the rest of the incoming signature (shifted by the number of 2106 // hidden arguments). 2107 2108 int argc = 0; 2109 out_sig_bt[argc++] = T_ADDRESS; 2110 if (method->is_static()) { 2111 out_sig_bt[argc++] = T_OBJECT; 2112 } 2113 2114 for (int i = 0; i < total_in_args ; i++ ) { 2115 out_sig_bt[argc++] = in_sig_bt[i]; 2116 } 2117 2118 2119 // Compute the wrapper's frame size. 2120 // -------------------------------------------------------------------------- 2121 2122 // Now figure out where the args must be stored and how much stack space 2123 // they require. 2124 // 2125 // Compute framesize for the wrapper. We need to handlize all oops in 2126 // incoming registers. 2127 // 2128 // Calculate the total number of stack slots we will need: 2129 // 1) abi requirements 2130 // 2) outgoing arguments 2131 // 3) space for inbound oop handle area 2132 // 4) space for handlizing a klass if static method 2133 // 5) space for a lock if synchronized method 2134 // 6) workspace for saving return values, int <-> float reg moves, etc. 2135 // 7) alignment 2136 // 2137 // Layout of the native wrapper frame: 2138 // (stack grows upwards, memory grows downwards) 2139 // 2140 // NW [ABI_REG_ARGS] <-- 1) R1_SP 2141 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset 2142 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset 2143 // klass <-- 4) R1_SP + klass_offset 2144 // lock <-- 5) R1_SP + lock_offset 2145 // [workspace] <-- 6) R1_SP + workspace_offset 2146 // [alignment] (optional) <-- 7) 2147 // caller [JIT_TOP_ABI_48] <-- r_callers_sp 2148 // 2149 // - *_slot_offset Indicates offset from SP in number of stack slots. 2150 // - *_offset Indicates offset from SP in bytes. 2151 2152 int stack_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args) + // 1+2) 2153 SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention. 2154 2155 // Now the space for the inbound oop handle area. 2156 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word; 2157 2158 int oop_handle_slot_offset = stack_slots; 2159 stack_slots += total_save_slots; // 3) 2160 2161 int klass_slot_offset = 0; 2162 int klass_offset = -1; 2163 if (method_is_static) { // 4) 2164 klass_slot_offset = stack_slots; 2165 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2166 stack_slots += VMRegImpl::slots_per_word; 2167 } 2168 2169 int lock_slot_offset = 0; 2170 int lock_offset = -1; 2171 if (method->is_synchronized()) { // 5) 2172 lock_slot_offset = stack_slots; 2173 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size; 2174 stack_slots += VMRegImpl::slots_per_word; 2175 } 2176 2177 int workspace_slot_offset = stack_slots; // 6) 2178 stack_slots += 2; 2179 2180 // Now compute actual number of stack words we need. 2181 // Rounding to make stack properly aligned. 2182 stack_slots = align_up(stack_slots, // 7) 2183 frame::alignment_in_bytes / VMRegImpl::stack_slot_size); 2184 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size; 2185 2186 2187 // Now we can start generating code. 2188 // -------------------------------------------------------------------------- 2189 2190 intptr_t start_pc = (intptr_t)__ pc(); 2191 intptr_t vep_start_pc; 2192 intptr_t frame_done_pc; 2193 intptr_t oopmap_pc; 2194 2195 Label handle_pending_exception; 2196 2197 Register r_callers_sp = R21; 2198 Register r_temp_1 = R22; 2199 Register r_temp_2 = R23; 2200 Register r_temp_3 = R24; 2201 Register r_temp_4 = R25; 2202 Register r_temp_5 = R26; 2203 Register r_temp_6 = R27; 2204 Register r_return_pc = R28; 2205 2206 Register r_carg1_jnienv = noreg; 2207 Register r_carg2_classorobject = noreg; 2208 r_carg1_jnienv = out_regs[0].first()->as_Register(); 2209 r_carg2_classorobject = out_regs[1].first()->as_Register(); 2210 2211 2212 // Generate the Unverified Entry Point (UEP). 2213 // -------------------------------------------------------------------------- 2214 assert(start_pc == (intptr_t)__ pc(), "uep must be at start"); 2215 2216 // Check ic: object class == cached class? 2217 if (!method_is_static) { 2218 __ ic_check(4 /* end_alignment */); 2219 } 2220 2221 // Generate the Verified Entry Point (VEP). 2222 // -------------------------------------------------------------------------- 2223 vep_start_pc = (intptr_t)__ pc(); 2224 2225 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { 2226 Label L_skip_barrier; 2227 Register klass = r_temp_1; 2228 // Notify OOP recorder (don't need the relocation) 2229 AddressLiteral md = __ constant_metadata_address(method->method_holder()); 2230 __ load_const_optimized(klass, md.value(), R0); 2231 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 2232 2233 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 2234 __ mtctr(klass); 2235 __ bctr(); 2236 2237 __ bind(L_skip_barrier); 2238 } 2239 2240 __ save_LR(r_temp_1); 2241 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. 2242 __ mr(r_callers_sp, R1_SP); // Remember frame pointer. 2243 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame. 2244 2245 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2246 bs->nmethod_entry_barrier(masm, r_temp_1); 2247 2248 frame_done_pc = (intptr_t)__ pc(); 2249 2250 // Native nmethod wrappers never take possession of the oop arguments. 2251 // So the caller will gc the arguments. 2252 // The only thing we need an oopMap for is if the call is static. 2253 // 2254 // An OopMap for lock (and class if static), and one for the VM call itself. 2255 OopMapSet *oop_maps = new OopMapSet(); 2256 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2257 2258 // Move arguments from register/stack to register/stack. 2259 // -------------------------------------------------------------------------- 2260 // 2261 // We immediately shuffle the arguments so that for any vm call we have 2262 // to make from here on out (sync slow path, jvmti, etc.) we will have 2263 // captured the oops from our caller and have a valid oopMap for them. 2264 // 2265 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2266 // (derived from JavaThread* which is in R16_thread) and, if static, 2267 // the class mirror instead of a receiver. This pretty much guarantees that 2268 // register layout will not match. We ignore these extra arguments during 2269 // the shuffle. The shuffle is described by the two calling convention 2270 // vectors we have in our possession. We simply walk the java vector to 2271 // get the source locations and the c vector to get the destinations. 2272 2273 // Record sp-based slot for receiver on stack for non-static methods. 2274 int receiver_offset = -1; 2275 2276 // We move the arguments backward because the floating point registers 2277 // destination will always be to a register with a greater or equal 2278 // register number or the stack. 2279 // in is the index of the incoming Java arguments 2280 // out is the index of the outgoing C arguments 2281 2282 #ifdef ASSERT 2283 bool reg_destroyed[Register::number_of_registers]; 2284 bool freg_destroyed[FloatRegister::number_of_registers]; 2285 for (int r = 0 ; r < Register::number_of_registers ; r++) { 2286 reg_destroyed[r] = false; 2287 } 2288 for (int f = 0 ; f < FloatRegister::number_of_registers ; f++) { 2289 freg_destroyed[f] = false; 2290 } 2291 #endif // ASSERT 2292 2293 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) { 2294 2295 #ifdef ASSERT 2296 if (in_regs[in].first()->is_Register()) { 2297 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!"); 2298 } else if (in_regs[in].first()->is_FloatRegister()) { 2299 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!"); 2300 } 2301 if (out_regs[out].first()->is_Register()) { 2302 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true; 2303 } else if (out_regs[out].first()->is_FloatRegister()) { 2304 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true; 2305 } 2306 #endif // ASSERT 2307 2308 switch (in_sig_bt[in]) { 2309 case T_BOOLEAN: 2310 case T_CHAR: 2311 case T_BYTE: 2312 case T_SHORT: 2313 case T_INT: 2314 // Move int and do sign extension. 2315 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2316 break; 2317 case T_LONG: 2318 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2319 break; 2320 case T_ARRAY: 2321 case T_OBJECT: 2322 object_move(masm, stack_slots, 2323 oop_map, oop_handle_slot_offset, 2324 ((in == 0) && (!method_is_static)), &receiver_offset, 2325 in_regs[in], out_regs[out], 2326 r_callers_sp, r_temp_1, r_temp_2); 2327 break; 2328 case T_VOID: 2329 break; 2330 case T_FLOAT: 2331 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2332 break; 2333 case T_DOUBLE: 2334 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); 2335 break; 2336 case T_ADDRESS: 2337 fatal("found type (T_ADDRESS) in java args"); 2338 break; 2339 default: 2340 ShouldNotReachHere(); 2341 break; 2342 } 2343 } 2344 2345 // Pre-load a static method's oop into ARG2. 2346 // Used both by locking code and the normal JNI call code. 2347 if (method_is_static) { 2348 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), 2349 r_carg2_classorobject); 2350 2351 // Now handlize the static class mirror in carg2. It's known not-null. 2352 __ std(r_carg2_classorobject, klass_offset, R1_SP); 2353 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2354 __ addi(r_carg2_classorobject, R1_SP, klass_offset); 2355 } 2356 2357 // Get JNIEnv* which is first argument to native. 2358 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset())); 2359 2360 // NOTE: 2361 // 2362 // We have all of the arguments setup at this point. 2363 // We MUST NOT touch any outgoing regs from this point on. 2364 // So if we must call out we must push a new frame. 2365 2366 // Get current pc for oopmap, and load it patchable relative to global toc. 2367 oopmap_pc = (intptr_t) __ pc(); 2368 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true); 2369 2370 // We use the same pc/oopMap repeatedly when we call out. 2371 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); 2372 2373 // r_return_pc now has the pc loaded that we will use when we finally call 2374 // to native. 2375 2376 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. 2377 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); 2378 2379 # if 0 2380 // DTrace method entry 2381 # endif 2382 2383 // Lock a synchronized method. 2384 // -------------------------------------------------------------------------- 2385 2386 if (method->is_synchronized()) { 2387 Register r_oop = r_temp_4; 2388 const Register r_box = r_temp_5; 2389 Label done, locked; 2390 2391 // Load the oop for the object or class. r_carg2_classorobject contains 2392 // either the handlized oop from the incoming arguments or the handlized 2393 // class mirror (if the method is static). 2394 __ ld(r_oop, 0, r_carg2_classorobject); 2395 2396 // Get the lock box slot's address. 2397 __ addi(r_box, R1_SP, lock_offset); 2398 2399 // Try fastpath for locking. 2400 if (LockingMode == LM_LIGHTWEIGHT) { 2401 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2402 __ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2403 } else { 2404 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. 2405 __ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2406 } 2407 __ beq(CCR0, locked); 2408 2409 // None of the above fast optimizations worked so we have to get into the 2410 // slow case of monitor enter. Inline a special case of call_VM that 2411 // disallows any pending_exception. 2412 2413 // Save argument registers and leave room for C-compatible ABI_REG_ARGS. 2414 int frame_size = frame::native_abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); 2415 __ mr(R11_scratch1, R1_SP); 2416 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs); 2417 2418 // Do the call. 2419 __ set_last_Java_frame(R11_scratch1, r_return_pc); 2420 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register"); 2421 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); 2422 __ reset_last_Java_frame(); 2423 2424 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs); 2425 2426 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2427 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C"); 2428 2429 __ bind(locked); 2430 } 2431 2432 // Use that pc we placed in r_return_pc a while back as the current frame anchor. 2433 __ set_last_Java_frame(R1_SP, r_return_pc); 2434 2435 // Publish thread state 2436 // -------------------------------------------------------------------------- 2437 2438 // Transition from _thread_in_Java to _thread_in_native. 2439 __ li(R0, _thread_in_native); 2440 __ release(); 2441 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2442 __ stw(R0, thread_(thread_state)); 2443 2444 2445 // The JNI call 2446 // -------------------------------------------------------------------------- 2447 __ call_c(native_func, relocInfo::runtime_call_type); 2448 2449 2450 // Now, we are back from the native code. 2451 2452 2453 // Unpack the native result. 2454 // -------------------------------------------------------------------------- 2455 2456 // For int-types, we do any needed sign-extension required. 2457 // Care must be taken that the return values (R3_RET and F1_RET) 2458 // will survive any VM calls for blocking or unlocking. 2459 // An OOP result (handle) is done specially in the slow-path code. 2460 2461 switch (ret_type) { 2462 case T_VOID: break; // Nothing to do! 2463 case T_FLOAT: break; // Got it where we want it (unless slow-path). 2464 case T_DOUBLE: break; // Got it where we want it (unless slow-path). 2465 case T_LONG: break; // Got it where we want it (unless slow-path). 2466 case T_OBJECT: break; // Really a handle. 2467 // Cannot de-handlize until after reclaiming jvm_lock. 2468 case T_ARRAY: break; 2469 2470 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1) 2471 __ normalize_bool(R3_RET); 2472 break; 2473 } 2474 case T_BYTE: { // sign extension 2475 __ extsb(R3_RET, R3_RET); 2476 break; 2477 } 2478 case T_CHAR: { // unsigned result 2479 __ andi(R3_RET, R3_RET, 0xffff); 2480 break; 2481 } 2482 case T_SHORT: { // sign extension 2483 __ extsh(R3_RET, R3_RET); 2484 break; 2485 } 2486 case T_INT: // nothing to do 2487 break; 2488 default: 2489 ShouldNotReachHere(); 2490 break; 2491 } 2492 2493 Label after_transition; 2494 2495 // Publish thread state 2496 // -------------------------------------------------------------------------- 2497 2498 // Switch thread to "native transition" state before reading the 2499 // synchronization state. This additional state is necessary because reading 2500 // and testing the synchronization state is not atomic w.r.t. GC, as this 2501 // scenario demonstrates: 2502 // - Java thread A, in _thread_in_native state, loads _not_synchronized 2503 // and is preempted. 2504 // - VM thread changes sync state to synchronizing and suspends threads 2505 // for GC. 2506 // - Thread A is resumed to finish this native method, but doesn't block 2507 // here since it didn't see any synchronization in progress, and escapes. 2508 2509 // Transition from _thread_in_native to _thread_in_native_trans. 2510 __ li(R0, _thread_in_native_trans); 2511 __ release(); 2512 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2513 __ stw(R0, thread_(thread_state)); 2514 2515 2516 // Must we block? 2517 // -------------------------------------------------------------------------- 2518 2519 // Block, if necessary, before resuming in _thread_in_Java state. 2520 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2521 { 2522 Label no_block, sync; 2523 2524 // Force this write out before the read below. 2525 if (!UseSystemMemoryBarrier) { 2526 __ fence(); 2527 } 2528 2529 Register sync_state_addr = r_temp_4; 2530 Register sync_state = r_temp_5; 2531 Register suspend_flags = r_temp_6; 2532 2533 // No synchronization in progress nor yet synchronized 2534 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 2535 __ safepoint_poll(sync, sync_state, true /* at_return */, false /* in_nmethod */); 2536 2537 // Not suspended. 2538 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 2539 __ lwz(suspend_flags, thread_(suspend_flags)); 2540 __ cmpwi(CCR1, suspend_flags, 0); 2541 __ beq(CCR1, no_block); 2542 2543 // Block. Save any potential method result value before the operation and 2544 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2545 // lets us share the oopMap we used when we went native rather than create 2546 // a distinct one for this pc. 2547 __ bind(sync); 2548 __ isync(); 2549 2550 address entry_point = 2551 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans); 2552 save_native_result(masm, ret_type, workspace_slot_offset); 2553 __ call_VM_leaf(entry_point, R16_thread); 2554 restore_native_result(masm, ret_type, workspace_slot_offset); 2555 2556 __ bind(no_block); 2557 2558 // Publish thread state. 2559 // -------------------------------------------------------------------------- 2560 2561 // Thread state is thread_in_native_trans. Any safepoint blocking has 2562 // already happened so we can now change state to _thread_in_Java. 2563 2564 // Transition from _thread_in_native_trans to _thread_in_Java. 2565 __ li(R0, _thread_in_Java); 2566 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 2567 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 2568 __ stw(R0, thread_(thread_state)); 2569 __ bind(after_transition); 2570 } 2571 2572 // Reguard any pages if necessary. 2573 // -------------------------------------------------------------------------- 2574 2575 Label no_reguard; 2576 __ lwz(r_temp_1, thread_(stack_guard_state)); 2577 __ cmpwi(CCR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled); 2578 __ bne(CCR0, no_reguard); 2579 2580 save_native_result(masm, ret_type, workspace_slot_offset); 2581 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2582 restore_native_result(masm, ret_type, workspace_slot_offset); 2583 2584 __ bind(no_reguard); 2585 2586 2587 // Unlock 2588 // -------------------------------------------------------------------------- 2589 2590 if (method->is_synchronized()) { 2591 const Register r_oop = r_temp_4; 2592 const Register r_box = r_temp_5; 2593 const Register r_exception = r_temp_6; 2594 Label done; 2595 2596 // Get oop and address of lock object box. 2597 if (method_is_static) { 2598 assert(klass_offset != -1, ""); 2599 __ ld(r_oop, klass_offset, R1_SP); 2600 } else { 2601 assert(receiver_offset != -1, ""); 2602 __ ld(r_oop, receiver_offset, R1_SP); 2603 } 2604 __ addi(r_box, R1_SP, lock_offset); 2605 2606 // Try fastpath for unlocking. 2607 if (LockingMode == LM_LIGHTWEIGHT) { 2608 __ compiler_fast_unlock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2609 } else { 2610 __ compiler_fast_unlock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); 2611 } 2612 __ beq(CCR0, done); 2613 2614 // Save and restore any potential method result value around the unlocking operation. 2615 save_native_result(masm, ret_type, workspace_slot_offset); 2616 2617 // Must save pending exception around the slow-path VM call. Since it's a 2618 // leaf call, the pending exception (if any) can be kept in a register. 2619 __ ld(r_exception, thread_(pending_exception)); 2620 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile"); 2621 __ li(R0, 0); 2622 __ std(R0, thread_(pending_exception)); 2623 2624 // Slow case of monitor enter. 2625 // Inline a special case of call_VM that disallows any pending_exception. 2626 // Arguments are (oop obj, BasicLock* lock, JavaThread* thread). 2627 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread); 2628 2629 __ asm_assert_mem8_is_zero(thread_(pending_exception), 2630 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C"); 2631 2632 restore_native_result(masm, ret_type, workspace_slot_offset); 2633 2634 // Check_forward_pending_exception jump to forward_exception if any pending 2635 // exception is set. The forward_exception routine expects to see the 2636 // exception in pending_exception and not in a register. Kind of clumsy, 2637 // since all folks who branch to forward_exception must have tested 2638 // pending_exception first and hence have it in a register already. 2639 __ std(r_exception, thread_(pending_exception)); 2640 2641 __ bind(done); 2642 } 2643 2644 # if 0 2645 // DTrace method exit 2646 # endif 2647 2648 // Clear "last Java frame" SP and PC. 2649 // -------------------------------------------------------------------------- 2650 2651 __ reset_last_Java_frame(); 2652 2653 // Unbox oop result, e.g. JNIHandles::resolve value. 2654 // -------------------------------------------------------------------------- 2655 2656 if (is_reference_type(ret_type)) { 2657 __ resolve_jobject(R3_RET, r_temp_1, r_temp_2, MacroAssembler::PRESERVATION_NONE); 2658 } 2659 2660 if (CheckJNICalls) { 2661 // clear_pending_jni_exception_check 2662 __ load_const_optimized(R0, 0L); 2663 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 2664 } 2665 2666 // Reset handle block. 2667 // -------------------------------------------------------------------------- 2668 __ ld(r_temp_1, thread_(active_handles)); 2669 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 2670 __ li(r_temp_2, 0); 2671 __ stw(r_temp_2, in_bytes(JNIHandleBlock::top_offset()), r_temp_1); 2672 2673 2674 // Check for pending exceptions. 2675 // -------------------------------------------------------------------------- 2676 __ ld(r_temp_2, thread_(pending_exception)); 2677 __ cmpdi(CCR0, r_temp_2, 0); 2678 __ bne(CCR0, handle_pending_exception); 2679 2680 // Return 2681 // -------------------------------------------------------------------------- 2682 2683 __ pop_frame(); 2684 __ restore_LR(R11); 2685 __ blr(); 2686 2687 2688 // Handler for pending exceptions (out-of-line). 2689 // -------------------------------------------------------------------------- 2690 // Since this is a native call, we know the proper exception handler 2691 // is the empty function. We just pop this frame and then jump to 2692 // forward_exception_entry. 2693 __ bind(handle_pending_exception); 2694 2695 __ pop_frame(); 2696 __ restore_LR(R11); 2697 __ b64_patchable((address)StubRoutines::forward_exception_entry(), 2698 relocInfo::runtime_call_type); 2699 2700 // Done. 2701 // -------------------------------------------------------------------------- 2702 2703 __ flush(); 2704 2705 nmethod *nm = nmethod::new_native_nmethod(method, 2706 compile_id, 2707 masm->code(), 2708 vep_start_pc-start_pc, 2709 frame_done_pc-start_pc, 2710 stack_slots / VMRegImpl::slots_per_word, 2711 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2712 in_ByteSize(lock_offset), 2713 oop_maps); 2714 2715 return nm; 2716 } 2717 2718 // This function returns the adjust size (in number of words) to a c2i adapter 2719 // activation for use during deoptimization. 2720 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2721 return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::frame_alignment_in_words); 2722 } 2723 2724 uint SharedRuntime::in_preserve_stack_slots() { 2725 return frame::jit_in_preserve_size / VMRegImpl::stack_slot_size; 2726 } 2727 2728 uint SharedRuntime::out_preserve_stack_slots() { 2729 #if defined(COMPILER1) || defined(COMPILER2) 2730 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size; 2731 #else 2732 return 0; 2733 #endif 2734 } 2735 2736 #if defined(COMPILER1) || defined(COMPILER2) 2737 // Frame generation for deopt and uncommon trap blobs. 2738 static void push_skeleton_frame(MacroAssembler* masm, bool deopt, 2739 /* Read */ 2740 Register unroll_block_reg, 2741 /* Update */ 2742 Register frame_sizes_reg, 2743 Register number_of_frames_reg, 2744 Register pcs_reg, 2745 /* Invalidate */ 2746 Register frame_size_reg, 2747 Register pc_reg) { 2748 2749 __ ld(pc_reg, 0, pcs_reg); 2750 __ ld(frame_size_reg, 0, frame_sizes_reg); 2751 __ std(pc_reg, _abi0(lr), R1_SP); 2752 __ push_frame(frame_size_reg, R0/*tmp*/); 2753 __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP); 2754 __ addi(number_of_frames_reg, number_of_frames_reg, -1); 2755 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize); 2756 __ addi(pcs_reg, pcs_reg, wordSize); 2757 } 2758 2759 // Loop through the UnrollBlock info and create new frames. 2760 static void push_skeleton_frames(MacroAssembler* masm, bool deopt, 2761 /* read */ 2762 Register unroll_block_reg, 2763 /* invalidate */ 2764 Register frame_sizes_reg, 2765 Register number_of_frames_reg, 2766 Register pcs_reg, 2767 Register frame_size_reg, 2768 Register pc_reg) { 2769 Label loop; 2770 2771 // _number_of_frames is of type int (deoptimization.hpp) 2772 __ lwa(number_of_frames_reg, 2773 in_bytes(Deoptimization::UnrollBlock::number_of_frames_offset()), 2774 unroll_block_reg); 2775 __ ld(pcs_reg, 2776 in_bytes(Deoptimization::UnrollBlock::frame_pcs_offset()), 2777 unroll_block_reg); 2778 __ ld(frame_sizes_reg, 2779 in_bytes(Deoptimization::UnrollBlock::frame_sizes_offset()), 2780 unroll_block_reg); 2781 2782 // stack: (caller_of_deoptee, ...). 2783 2784 // At this point we either have an interpreter frame or a compiled 2785 // frame on top of stack. If it is a compiled frame we push a new c2i 2786 // adapter here 2787 2788 // Memorize top-frame stack-pointer. 2789 __ mr(frame_size_reg/*old_sp*/, R1_SP); 2790 2791 // Resize interpreter top frame OR C2I adapter. 2792 2793 // At this moment, the top frame (which is the caller of the deoptee) is 2794 // an interpreter frame or a newly pushed C2I adapter or an entry frame. 2795 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the 2796 // outgoing arguments. 2797 // 2798 // In order to push the interpreter frame for the deoptee, we need to 2799 // resize the top frame such that we are able to place the deoptee's 2800 // locals in the frame. 2801 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI 2802 // into a valid PARENT_IJAVA_FRAME_ABI. 2803 2804 __ lwa(R11_scratch1, 2805 in_bytes(Deoptimization::UnrollBlock::caller_adjustment_offset()), 2806 unroll_block_reg); 2807 __ neg(R11_scratch1, R11_scratch1); 2808 2809 // R11_scratch1 contains size of locals for frame resizing. 2810 // R12_scratch2 contains top frame's lr. 2811 2812 // Resize frame by complete frame size prevents TOC from being 2813 // overwritten by locals. A more stack space saving way would be 2814 // to copy the TOC to its location in the new abi. 2815 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size); 2816 2817 // now, resize the frame 2818 __ resize_frame(R11_scratch1, pc_reg/*tmp*/); 2819 2820 // In the case where we have resized a c2i frame above, the optional 2821 // alignment below the locals has size 32 (why?). 2822 __ std(R12_scratch2, _abi0(lr), R1_SP); 2823 2824 // Initialize initial_caller_sp. 2825 __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP); 2826 2827 #ifdef ASSERT 2828 // Make sure that there is at least one entry in the array. 2829 __ cmpdi(CCR0, number_of_frames_reg, 0); 2830 __ asm_assert_ne("array_size must be > 0"); 2831 #endif 2832 2833 // Now push the new interpreter frames. 2834 // 2835 __ bind(loop); 2836 // Allocate a new frame, fill in the pc. 2837 push_skeleton_frame(masm, deopt, 2838 unroll_block_reg, 2839 frame_sizes_reg, 2840 number_of_frames_reg, 2841 pcs_reg, 2842 frame_size_reg, 2843 pc_reg); 2844 __ cmpdi(CCR0, number_of_frames_reg, 0); 2845 __ bne(CCR0, loop); 2846 2847 // Get the return address pointing into the frame manager. 2848 __ ld(R0, 0, pcs_reg); 2849 // Store it in the top interpreter frame. 2850 __ std(R0, _abi0(lr), R1_SP); 2851 // Initialize frame_manager_lr of interpreter top frame. 2852 } 2853 #endif 2854 2855 void SharedRuntime::generate_deopt_blob() { 2856 // Allocate space for the code 2857 ResourceMark rm; 2858 // Setup code generation tools 2859 const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); 2860 CodeBuffer buffer(name, 2048, 1024); 2861 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 2862 Label exec_mode_initialized; 2863 int frame_size_in_words; 2864 OopMap* map = nullptr; 2865 OopMapSet *oop_maps = new OopMapSet(); 2866 2867 // size of ABI112 plus spill slots for R3_RET and F1_RET. 2868 const int frame_size_in_bytes = frame::native_abi_reg_args_spill_size; 2869 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 2870 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. 2871 2872 const Register exec_mode_reg = R21_tmp1; 2873 2874 const address start = __ pc(); 2875 2876 #if defined(COMPILER1) || defined(COMPILER2) 2877 // -------------------------------------------------------------------------- 2878 // Prolog for non exception case! 2879 2880 // We have been called from the deopt handler of the deoptee. 2881 // 2882 // deoptee: 2883 // ... 2884 // call X 2885 // ... 2886 // deopt_handler: call_deopt_stub 2887 // cur. return pc --> ... 2888 // 2889 // So currently SR_LR points behind the call in the deopt handler. 2890 // We adjust it such that it points to the start of the deopt handler. 2891 // The return_pc has been stored in the frame of the deoptee and 2892 // will replace the address of the deopt_handler in the call 2893 // to Deoptimization::fetch_unroll_info below. 2894 // We can't grab a free register here, because all registers may 2895 // contain live values, so let the RegisterSaver do the adjustment 2896 // of the return pc. 2897 const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size; 2898 2899 // Push the "unpack frame" 2900 // Save everything in sight. 2901 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2902 &first_frame_size_in_bytes, 2903 /*generate_oop_map=*/ true, 2904 return_pc_adjustment_no_exception, 2905 RegisterSaver::return_pc_is_lr); 2906 assert(map != nullptr, "OopMap must have been created"); 2907 2908 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); 2909 // Save exec mode for unpack_frames. 2910 __ b(exec_mode_initialized); 2911 2912 // -------------------------------------------------------------------------- 2913 // Prolog for exception case 2914 2915 // An exception is pending. 2916 // We have been called with a return (interpreter) or a jump (exception blob). 2917 // 2918 // - R3_ARG1: exception oop 2919 // - R4_ARG2: exception pc 2920 2921 int exception_offset = __ pc() - start; 2922 2923 BLOCK_COMMENT("Prolog for exception case"); 2924 2925 // Store exception oop and pc in thread (location known to GC). 2926 // This is needed since the call to "fetch_unroll_info()" may safepoint. 2927 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2928 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2929 __ std(R4_ARG2, _abi0(lr), R1_SP); 2930 2931 // Vanilla deoptimization with an exception pending in exception_oop. 2932 int exception_in_tls_offset = __ pc() - start; 2933 2934 // Push the "unpack frame". 2935 // Save everything in sight. 2936 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2937 &first_frame_size_in_bytes, 2938 /*generate_oop_map=*/ false, 2939 /*return_pc_adjustment_exception=*/ 0, 2940 RegisterSaver::return_pc_is_pre_saved); 2941 2942 // Deopt during an exception. Save exec mode for unpack_frames. 2943 __ li(exec_mode_reg, Deoptimization::Unpack_exception); 2944 2945 // fall through 2946 2947 int reexecute_offset = 0; 2948 #ifdef COMPILER1 2949 __ b(exec_mode_initialized); 2950 2951 // Reexecute entry, similar to c2 uncommon trap 2952 reexecute_offset = __ pc() - start; 2953 2954 RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 2955 &first_frame_size_in_bytes, 2956 /*generate_oop_map=*/ false, 2957 /*return_pc_adjustment_reexecute=*/ 0, 2958 RegisterSaver::return_pc_is_pre_saved); 2959 __ li(exec_mode_reg, Deoptimization::Unpack_reexecute); 2960 #endif 2961 2962 // -------------------------------------------------------------------------- 2963 __ BIND(exec_mode_initialized); 2964 2965 const Register unroll_block_reg = R22_tmp2; 2966 2967 // We need to set `last_Java_frame' because `fetch_unroll_info' will 2968 // call `last_Java_frame()'. The value of the pc in the frame is not 2969 // particularly important. It just needs to identify this blob. 2970 __ set_last_Java_frame(R1_SP, noreg); 2971 2972 // With EscapeAnalysis turned on, this call may safepoint! 2973 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg); 2974 address calls_return_pc = __ last_calls_return_pc(); 2975 // Set an oopmap for the call site that describes all our saved registers. 2976 oop_maps->add_gc_map(calls_return_pc - start, map); 2977 2978 __ reset_last_Java_frame(); 2979 // Save the return value. 2980 __ mr(unroll_block_reg, R3_RET); 2981 2982 // Restore only the result registers that have been saved 2983 // by save_volatile_registers(...). 2984 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); 2985 2986 // reload the exec mode from the UnrollBlock (it might have changed) 2987 __ lwz(exec_mode_reg, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg); 2988 // In excp_deopt_mode, restore and clear exception oop which we 2989 // stored in the thread during exception entry above. The exception 2990 // oop will be the return value of this stub. 2991 Label skip_restore_excp; 2992 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception); 2993 __ bne(CCR0, skip_restore_excp); 2994 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2995 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2996 __ li(R0, 0); 2997 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 2998 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 2999 __ BIND(skip_restore_excp); 3000 3001 __ pop_frame(); 3002 3003 // stack: (deoptee, optional i2c, caller of deoptee, ...). 3004 3005 // pop the deoptee's frame 3006 __ pop_frame(); 3007 3008 // stack: (caller_of_deoptee, ...). 3009 3010 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled. 3011 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info()) 3012 // and the frame is effectively not resized. 3013 Register caller_sp = R23_tmp3; 3014 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg); 3015 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5); 3016 3017 // Loop through the `UnrollBlock' info and create interpreter frames. 3018 push_skeleton_frames(masm, true/*deopt*/, 3019 unroll_block_reg, 3020 R23_tmp3, 3021 R24_tmp4, 3022 R25_tmp5, 3023 R26_tmp6, 3024 R27_tmp7); 3025 3026 // stack: (skeletal interpreter frame, ..., optional skeletal 3027 // interpreter frame, optional c2i, caller of deoptee, ...). 3028 3029 // push an `unpack_frame' taking care of float / int return values. 3030 __ push_frame(frame_size_in_bytes, R0/*tmp*/); 3031 3032 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3033 // skeletal interpreter frame, optional c2i, caller of deoptee, 3034 // ...). 3035 3036 // Spill live volatile registers since we'll do a call. 3037 __ std( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP); 3038 __ stfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP); 3039 3040 // Let the unpacker layout information in the skeletal frames just 3041 // allocated. 3042 __ calculate_address_from_global_toc(R3_RET, calls_return_pc, true, true, true, true); 3043 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET); 3044 // This is a call to a LEAF method, so no oop map is required. 3045 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3046 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/); 3047 __ reset_last_Java_frame(); 3048 3049 // Restore the volatiles saved above. 3050 __ ld( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP); 3051 __ lfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP); 3052 3053 // Pop the unpack frame. 3054 __ pop_frame(); 3055 __ restore_LR(R0); 3056 3057 // stack: (top interpreter frame, ..., optional interpreter frame, 3058 // optional c2i, caller of deoptee, ...). 3059 3060 // Initialize R14_state. 3061 __ restore_interpreter_state(R11_scratch1); 3062 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3063 3064 // Return to the interpreter entry point. 3065 __ blr(); 3066 __ flush(); 3067 #else // COMPILER2 3068 __ unimplemented("deopt blob needed only with compiler"); 3069 int exception_offset = __ pc() - start; 3070 #endif // COMPILER2 3071 3072 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 3073 reexecute_offset, first_frame_size_in_bytes / wordSize); 3074 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3075 } 3076 3077 #ifdef COMPILER2 3078 void OptoRuntime::generate_uncommon_trap_blob() { 3079 // Allocate space for the code. 3080 ResourceMark rm; 3081 // Setup code generation tools. 3082 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 3083 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); 3084 address start = __ pc(); 3085 3086 Register unroll_block_reg = R21_tmp1; 3087 Register klass_index_reg = R22_tmp2; 3088 Register unc_trap_reg = R23_tmp3; 3089 Register r_return_pc = R27_tmp7; 3090 3091 OopMapSet* oop_maps = new OopMapSet(); 3092 int frame_size_in_bytes = frame::native_abi_reg_args_size; 3093 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 3094 3095 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 3096 3097 // Push a dummy `unpack_frame' and call 3098 // `Deoptimization::uncommon_trap' to pack the compiled frame into a 3099 // vframe array and return the `UnrollBlock' information. 3100 3101 // Save LR to compiled frame. 3102 __ save_LR(R11_scratch1); 3103 3104 // Push an "uncommon_trap" frame. 3105 __ push_frame_reg_args(0, R11_scratch1); 3106 3107 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). 3108 3109 // Set the `unpack_frame' as last_Java_frame. 3110 // `Deoptimization::uncommon_trap' expects it and considers its 3111 // sender frame as the deoptee frame. 3112 // Remember the offset of the instruction whose address will be 3113 // moved to R11_scratch1. 3114 address gc_map_pc = __ pc(); 3115 __ calculate_address_from_global_toc(r_return_pc, gc_map_pc, true, true, true, true); 3116 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc); 3117 3118 __ mr(klass_index_reg, R3); 3119 __ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap); 3120 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), 3121 R16_thread, klass_index_reg, R5_ARG3); 3122 3123 // Set an oopmap for the call site. 3124 oop_maps->add_gc_map(gc_map_pc - start, map); 3125 3126 __ reset_last_Java_frame(); 3127 3128 // Pop the `unpack frame'. 3129 __ pop_frame(); 3130 3131 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). 3132 3133 // Save the return value. 3134 __ mr(unroll_block_reg, R3_RET); 3135 3136 // Pop the uncommon_trap frame. 3137 __ pop_frame(); 3138 3139 // stack: (caller_of_deoptee, ...). 3140 3141 #ifdef ASSERT 3142 __ lwz(R22_tmp2, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg); 3143 __ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap); 3144 __ asm_assert_eq("OptoRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap"); 3145 #endif 3146 3147 // Freezing continuation frames requires that the caller is trimmed to unextended sp if compiled. 3148 // If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info()) 3149 // and the frame is effectively not resized. 3150 Register caller_sp = R23_tmp3; 3151 __ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg); 3152 __ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5); 3153 3154 // Allocate new interpreter frame(s) and possibly a c2i adapter 3155 // frame. 3156 push_skeleton_frames(masm, false/*deopt*/, 3157 unroll_block_reg, 3158 R22_tmp2, 3159 R23_tmp3, 3160 R24_tmp4, 3161 R25_tmp5, 3162 R26_tmp6); 3163 3164 // stack: (skeletal interpreter frame, ..., optional skeletal 3165 // interpreter frame, optional c2i, caller of deoptee, ...). 3166 3167 // Push a dummy `unpack_frame' taking care of float return values. 3168 // Call `Deoptimization::unpack_frames' to layout information in the 3169 // interpreter frames just created. 3170 3171 // Push a simple "unpack frame" here. 3172 __ push_frame_reg_args(0, R11_scratch1); 3173 3174 // stack: (unpack frame, skeletal interpreter frame, ..., optional 3175 // skeletal interpreter frame, optional c2i, caller of deoptee, 3176 // ...). 3177 3178 // Set the "unpack_frame" as last_Java_frame. 3179 __ set_last_Java_frame(/*sp*/R1_SP, r_return_pc); 3180 3181 // Indicate it is the uncommon trap case. 3182 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap); 3183 // Let the unpacker layout information in the skeletal frames just 3184 // allocated. 3185 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), 3186 R16_thread, unc_trap_reg); 3187 3188 __ reset_last_Java_frame(); 3189 // Pop the `unpack frame'. 3190 __ pop_frame(); 3191 // Restore LR from top interpreter frame. 3192 __ restore_LR(R11_scratch1); 3193 3194 // stack: (top interpreter frame, ..., optional interpreter frame, 3195 // optional c2i, caller of deoptee, ...). 3196 3197 __ restore_interpreter_state(R11_scratch1); 3198 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 3199 3200 // Return to the interpreter entry point. 3201 __ blr(); 3202 3203 masm->flush(); 3204 3205 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize); 3206 } 3207 #endif // COMPILER2 3208 3209 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. 3210 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { 3211 assert(StubRoutines::forward_exception_entry() != nullptr, 3212 "must be generated before"); 3213 assert(is_polling_page_id(id), "expected a polling page stub id"); 3214 3215 ResourceMark rm; 3216 OopMapSet *oop_maps = new OopMapSet(); 3217 OopMap* map; 3218 3219 // Allocate space for the code. Setup code generation tools. 3220 const char* name = SharedRuntime::stub_name(id); 3221 CodeBuffer buffer(name, 2048, 1024); 3222 MacroAssembler* masm = new MacroAssembler(&buffer); 3223 3224 address start = __ pc(); 3225 int frame_size_in_bytes = 0; 3226 3227 RegisterSaver::ReturnPCLocation return_pc_location; 3228 bool cause_return = (id == SharedStubId::polling_page_return_handler_id); 3229 if (cause_return) { 3230 // Nothing to do here. The frame has already been popped in MachEpilogNode. 3231 // Register LR already contains the return pc. 3232 return_pc_location = RegisterSaver::return_pc_is_pre_saved; 3233 } else { 3234 // Use thread()->saved_exception_pc() as return pc. 3235 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; 3236 } 3237 3238 bool save_vectors = (id == SharedStubId::polling_page_vectors_safepoint_handler_id); 3239 3240 // Save registers, fpu state, and flags. Set R31 = return pc. 3241 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3242 &frame_size_in_bytes, 3243 /*generate_oop_map=*/ true, 3244 /*return_pc_adjustment=*/0, 3245 return_pc_location, save_vectors); 3246 3247 // The following is basically a call_VM. However, we need the precise 3248 // address of the call in order to generate an oopmap. Hence, we do all the 3249 // work ourselves. 3250 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg); 3251 3252 // The return address must always be correct so that the frame constructor 3253 // never sees an invalid pc. 3254 3255 // Do the call 3256 __ call_VM_leaf(call_ptr, R16_thread); 3257 address calls_return_pc = __ last_calls_return_pc(); 3258 3259 // Set an oopmap for the call site. This oopmap will map all 3260 // oop-registers and debug-info registers as callee-saved. This 3261 // will allow deoptimization at this safepoint to find all possible 3262 // debug-info recordings, as well as let GC find all oops. 3263 oop_maps->add_gc_map(calls_return_pc - start, map); 3264 3265 Label noException; 3266 3267 // Clear the last Java frame. 3268 __ reset_last_Java_frame(); 3269 3270 BLOCK_COMMENT(" Check pending exception."); 3271 const Register pending_exception = R0; 3272 __ ld(pending_exception, thread_(pending_exception)); 3273 __ cmpdi(CCR0, pending_exception, 0); 3274 __ beq(CCR0, noException); 3275 3276 // Exception pending 3277 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3278 frame_size_in_bytes, 3279 /*restore_ctr=*/true, save_vectors); 3280 3281 BLOCK_COMMENT(" Jump to forward_exception_entry."); 3282 // Jump to forward_exception_entry, with the issuing PC in LR 3283 // so it looks like the original nmethod called forward_exception_entry. 3284 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3285 3286 // No exception case. 3287 __ BIND(noException); 3288 3289 if (!cause_return) { 3290 Label no_adjust; 3291 // If our stashed return pc was modified by the runtime we avoid touching it 3292 __ ld(R0, frame_size_in_bytes + _abi0(lr), R1_SP); 3293 __ cmpd(CCR0, R0, R31); 3294 __ bne(CCR0, no_adjust); 3295 3296 // Adjust return pc forward to step over the safepoint poll instruction 3297 __ addi(R31, R31, 4); 3298 __ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP); 3299 3300 __ bind(no_adjust); 3301 } 3302 3303 // Normal exit, restore registers and exit. 3304 RegisterSaver::restore_live_registers_and_pop_frame(masm, 3305 frame_size_in_bytes, 3306 /*restore_ctr=*/true, save_vectors); 3307 3308 __ blr(); 3309 3310 // Make sure all code is generated 3311 masm->flush(); 3312 3313 // Fill-out other meta info 3314 // CodeBlob frame size is in words. 3315 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize); 3316 } 3317 3318 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss) 3319 // 3320 // Generate a stub that calls into the vm to find out the proper destination 3321 // of a java call. All the argument registers are live at this point 3322 // but since this is generic code we don't know what they are and the caller 3323 // must do any gc of the args. 3324 // 3325 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { 3326 assert(is_resolve_id(id), "expected a resolve stub id"); 3327 3328 // allocate space for the code 3329 ResourceMark rm; 3330 3331 const char* name = SharedRuntime::stub_name(id); 3332 CodeBuffer buffer(name, 1000, 512); 3333 MacroAssembler* masm = new MacroAssembler(&buffer); 3334 3335 int frame_size_in_bytes; 3336 3337 OopMapSet *oop_maps = new OopMapSet(); 3338 OopMap* map = nullptr; 3339 3340 address start = __ pc(); 3341 3342 map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, 3343 &frame_size_in_bytes, 3344 /*generate_oop_map*/ true, 3345 /*return_pc_adjustment*/ 0, 3346 RegisterSaver::return_pc_is_lr); 3347 3348 // Use noreg as last_Java_pc, the return pc will be reconstructed 3349 // from the physical frame. 3350 __ set_last_Java_frame(/*sp*/R1_SP, noreg); 3351 3352 int frame_complete = __ offset(); 3353 3354 // Pass R19_method as 2nd (optional) argument, used by 3355 // counter_overflow_stub. 3356 __ call_VM_leaf(destination, R16_thread, R19_method); 3357 address calls_return_pc = __ last_calls_return_pc(); 3358 // Set an oopmap for the call site. 3359 // We need this not only for callee-saved registers, but also for volatile 3360 // registers that the compiler might be keeping live across a safepoint. 3361 // Create the oopmap for the call's return pc. 3362 oop_maps->add_gc_map(calls_return_pc - start, map); 3363 3364 // R3_RET contains the address we are going to jump to assuming no exception got installed. 3365 3366 // clear last_Java_sp 3367 __ reset_last_Java_frame(); 3368 3369 // Check for pending exceptions. 3370 BLOCK_COMMENT("Check for pending exceptions."); 3371 Label pending; 3372 __ ld(R11_scratch1, thread_(pending_exception)); 3373 __ cmpdi(CCR0, R11_scratch1, 0); 3374 __ bne(CCR0, pending); 3375 3376 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame. 3377 3378 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false); 3379 3380 // Get the returned method. 3381 __ get_vm_result_2(R19_method); 3382 3383 __ bctr(); 3384 3385 3386 // Pending exception after the safepoint. 3387 __ BIND(pending); 3388 3389 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true); 3390 3391 // exception pending => remove activation and forward to exception handler 3392 3393 __ li(R11_scratch1, 0); 3394 __ ld(R3_ARG1, thread_(pending_exception)); 3395 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread); 3396 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3397 3398 // ------------- 3399 // Make sure all code is generated. 3400 masm->flush(); 3401 3402 // return the blob 3403 // frame_size_words or bytes?? 3404 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, 3405 oop_maps, true); 3406 } 3407 3408 // Continuation point for throwing of implicit exceptions that are 3409 // not handled in the current activation. Fabricates an exception 3410 // oop and initiates normal exception dispatching in this 3411 // frame. Only callee-saved registers are preserved (through the 3412 // normal register window / RegisterMap handling). If the compiler 3413 // needs all registers to be preserved between the fault point and 3414 // the exception handler then it must assume responsibility for that 3415 // in AbstractCompiler::continuation_for_implicit_null_exception or 3416 // continuation_for_implicit_division_by_zero_exception. All other 3417 // implicit exceptions (e.g., NullPointerException or 3418 // AbstractMethodError on entry) are either at call sites or 3419 // otherwise assume that stack unwinding will be initiated, so 3420 // caller saved registers were assumed volatile in the compiler. 3421 // 3422 // Note that we generate only this stub into a RuntimeStub, because 3423 // it needs to be properly traversed and ignored during GC, so we 3424 // change the meaning of the "__" macro within this method. 3425 // 3426 // Note: the routine set_pc_not_at_call_for_caller in 3427 // SharedRuntime.cpp requires that this code be generated into a 3428 // RuntimeStub. 3429 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { 3430 assert(is_throw_id(id), "expected a throw stub id"); 3431 3432 const char* name = SharedRuntime::stub_name(id); 3433 3434 ResourceMark rm; 3435 const char* timer_msg = "SharedRuntime generate_throw_exception"; 3436 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); 3437 3438 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0); 3439 MacroAssembler* masm = new MacroAssembler(&code); 3440 3441 OopMapSet* oop_maps = new OopMapSet(); 3442 int frame_size_in_bytes = frame::native_abi_reg_args_size; 3443 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 3444 3445 address start = __ pc(); 3446 3447 __ save_LR(R11_scratch1); 3448 3449 // Push a frame. 3450 __ push_frame_reg_args(0, R11_scratch1); 3451 3452 address frame_complete_pc = __ pc(); 3453 3454 // Note that we always have a runtime stub frame on the top of 3455 // stack by this point. Remember the offset of the instruction 3456 // whose address will be moved to R11_scratch1. 3457 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 3458 3459 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 3460 3461 __ mr(R3_ARG1, R16_thread); 3462 __ call_c(runtime_entry); 3463 3464 // Set an oopmap for the call site. 3465 oop_maps->add_gc_map((int)(gc_map_pc - start), map); 3466 3467 __ reset_last_Java_frame(); 3468 3469 #ifdef ASSERT 3470 // Make sure that this code is only executed if there is a pending 3471 // exception. 3472 { 3473 Label L; 3474 __ ld(R0, 3475 in_bytes(Thread::pending_exception_offset()), 3476 R16_thread); 3477 __ cmpdi(CCR0, R0, 0); 3478 __ bne(CCR0, L); 3479 __ stop("SharedRuntime::throw_exception: no pending exception"); 3480 __ bind(L); 3481 } 3482 #endif 3483 3484 // Pop frame. 3485 __ pop_frame(); 3486 3487 __ restore_LR(R11_scratch1); 3488 3489 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry()); 3490 __ mtctr(R11_scratch1); 3491 __ bctr(); 3492 3493 // Create runtime stub with OopMap. 3494 RuntimeStub* stub = 3495 RuntimeStub::new_runtime_stub(name, &code, 3496 /*frame_complete=*/ (int)(frame_complete_pc - start), 3497 frame_size_in_bytes/wordSize, 3498 oop_maps, 3499 false); 3500 return stub; 3501 } 3502 3503 //------------------------------Montgomery multiplication------------------------ 3504 // 3505 3506 // Subtract 0:b from carry:a. Return carry. 3507 static unsigned long 3508 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 3509 long i = 0; 3510 unsigned long tmp, tmp2; 3511 __asm__ __volatile__ ( 3512 "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA 3513 "mtctr %[len] \n" 3514 "0: \n" 3515 "ldx %[tmp], %[i], %[a] \n" 3516 "ldx %[tmp2], %[i], %[b] \n" 3517 "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended 3518 "stdx %[tmp], %[i], %[a] \n" 3519 "addi %[i], %[i], 8 \n" 3520 "bdnz 0b \n" 3521 "addme %[tmp], %[carry] \n" // carry + CA - 1 3522 : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2) 3523 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len) 3524 : "ctr", "xer", "memory" 3525 ); 3526 return tmp; 3527 } 3528 3529 // Multiply (unsigned) Long A by Long B, accumulating the double- 3530 // length result into the accumulator formed of T0, T1, and T2. 3531 inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3532 unsigned long hi, lo; 3533 __asm__ __volatile__ ( 3534 "mulld %[lo], %[A], %[B] \n" 3535 "mulhdu %[hi], %[A], %[B] \n" 3536 "addc %[T0], %[T0], %[lo] \n" 3537 "adde %[T1], %[T1], %[hi] \n" 3538 "addze %[T2], %[T2] \n" 3539 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3540 : [A]"r"(A), [B]"r"(B) 3541 : "xer" 3542 ); 3543 } 3544 3545 // As above, but add twice the double-length result into the 3546 // accumulator. 3547 inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 3548 unsigned long hi, lo; 3549 __asm__ __volatile__ ( 3550 "mulld %[lo], %[A], %[B] \n" 3551 "mulhdu %[hi], %[A], %[B] \n" 3552 "addc %[T0], %[T0], %[lo] \n" 3553 "adde %[T1], %[T1], %[hi] \n" 3554 "addze %[T2], %[T2] \n" 3555 "addc %[T0], %[T0], %[lo] \n" 3556 "adde %[T1], %[T1], %[hi] \n" 3557 "addze %[T2], %[T2] \n" 3558 : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 3559 : [A]"r"(A), [B]"r"(B) 3560 : "xer" 3561 ); 3562 } 3563 3564 // Fast Montgomery multiplication. The derivation of the algorithm is 3565 // in "A Cryptographic Library for the Motorola DSP56000, 3566 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237". 3567 static void 3568 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 3569 unsigned long m[], unsigned long inv, int len) { 3570 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3571 int i; 3572 3573 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3574 3575 for (i = 0; i < len; i++) { 3576 int j; 3577 for (j = 0; j < i; j++) { 3578 MACC(a[j], b[i-j], t0, t1, t2); 3579 MACC(m[j], n[i-j], t0, t1, t2); 3580 } 3581 MACC(a[i], b[0], t0, t1, t2); 3582 m[i] = t0 * inv; 3583 MACC(m[i], n[0], t0, t1, t2); 3584 3585 assert(t0 == 0, "broken Montgomery multiply"); 3586 3587 t0 = t1; t1 = t2; t2 = 0; 3588 } 3589 3590 for (i = len; i < 2*len; i++) { 3591 int j; 3592 for (j = i-len+1; j < len; j++) { 3593 MACC(a[j], b[i-j], t0, t1, t2); 3594 MACC(m[j], n[i-j], t0, t1, t2); 3595 } 3596 m[i-len] = t0; 3597 t0 = t1; t1 = t2; t2 = 0; 3598 } 3599 3600 while (t0) { 3601 t0 = sub(m, n, t0, len); 3602 } 3603 } 3604 3605 // Fast Montgomery squaring. This uses asymptotically 25% fewer 3606 // multiplies so it should be up to 25% faster than Montgomery 3607 // multiplication. However, its loop control is more complex and it 3608 // may actually run slower on some machines. 3609 static void 3610 montgomery_square(unsigned long a[], unsigned long n[], 3611 unsigned long m[], unsigned long inv, int len) { 3612 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 3613 int i; 3614 3615 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 3616 3617 for (i = 0; i < len; i++) { 3618 int j; 3619 int end = (i+1)/2; 3620 for (j = 0; j < end; j++) { 3621 MACC2(a[j], a[i-j], t0, t1, t2); 3622 MACC(m[j], n[i-j], t0, t1, t2); 3623 } 3624 if ((i & 1) == 0) { 3625 MACC(a[j], a[j], t0, t1, t2); 3626 } 3627 for (; j < i; j++) { 3628 MACC(m[j], n[i-j], t0, t1, t2); 3629 } 3630 m[i] = t0 * inv; 3631 MACC(m[i], n[0], t0, t1, t2); 3632 3633 assert(t0 == 0, "broken Montgomery square"); 3634 3635 t0 = t1; t1 = t2; t2 = 0; 3636 } 3637 3638 for (i = len; i < 2*len; i++) { 3639 int start = i-len+1; 3640 int end = start + (len - start)/2; 3641 int j; 3642 for (j = start; j < end; j++) { 3643 MACC2(a[j], a[i-j], t0, t1, t2); 3644 MACC(m[j], n[i-j], t0, t1, t2); 3645 } 3646 if ((i & 1) == 0) { 3647 MACC(a[j], a[j], t0, t1, t2); 3648 } 3649 for (; j < len; j++) { 3650 MACC(m[j], n[i-j], t0, t1, t2); 3651 } 3652 m[i-len] = t0; 3653 t0 = t1; t1 = t2; t2 = 0; 3654 } 3655 3656 while (t0) { 3657 t0 = sub(m, n, t0, len); 3658 } 3659 } 3660 3661 // The threshold at which squaring is advantageous was determined 3662 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 3663 // Doesn't seem to be relevant for Power8 so we use the same value. 3664 #define MONTGOMERY_SQUARING_THRESHOLD 64 3665 3666 // Copy len longwords from s to d, word-swapping as we go. The 3667 // destination array is reversed. 3668 static void reverse_words(unsigned long *s, unsigned long *d, int len) { 3669 d += len; 3670 while(len-- > 0) { 3671 d--; 3672 unsigned long s_val = *s; 3673 // Swap words in a longword on little endian machines. 3674 #ifdef VM_LITTLE_ENDIAN 3675 s_val = (s_val << 32) | (s_val >> 32); 3676 #endif 3677 *d = s_val; 3678 s++; 3679 } 3680 } 3681 3682 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 3683 jint len, jlong inv, 3684 jint *m_ints) { 3685 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3686 assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 3687 int longwords = len/2; 3688 3689 // Make very sure we don't use so much space that the stack might 3690 // overflow. 512 jints corresponds to an 16384-bit integer and 3691 // will use here a total of 8k bytes of stack space. 3692 int divisor = sizeof(unsigned long) * 4; 3693 guarantee(longwords <= 8192 / divisor, "must be"); 3694 int total_allocation = longwords * sizeof (unsigned long) * 4; 3695 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3696 3697 // Local scratch arrays 3698 unsigned long 3699 *a = scratch + 0 * longwords, 3700 *b = scratch + 1 * longwords, 3701 *n = scratch + 2 * longwords, 3702 *m = scratch + 3 * longwords; 3703 3704 reverse_words((unsigned long *)a_ints, a, longwords); 3705 reverse_words((unsigned long *)b_ints, b, longwords); 3706 reverse_words((unsigned long *)n_ints, n, longwords); 3707 3708 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 3709 3710 reverse_words(m, (unsigned long *)m_ints, longwords); 3711 } 3712 3713 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 3714 jint len, jlong inv, 3715 jint *m_ints) { 3716 len = len & 0x7fffFFFF; // C2 does not respect int to long conversion for stub calls. 3717 assert(len % 2 == 0, "array length in montgomery_square must be even"); 3718 int longwords = len/2; 3719 3720 // Make very sure we don't use so much space that the stack might 3721 // overflow. 512 jints corresponds to an 16384-bit integer and 3722 // will use here a total of 6k bytes of stack space. 3723 int divisor = sizeof(unsigned long) * 3; 3724 guarantee(longwords <= (8192 / divisor), "must be"); 3725 int total_allocation = longwords * sizeof (unsigned long) * 3; 3726 unsigned long *scratch = (unsigned long *)alloca(total_allocation); 3727 3728 // Local scratch arrays 3729 unsigned long 3730 *a = scratch + 0 * longwords, 3731 *n = scratch + 1 * longwords, 3732 *m = scratch + 2 * longwords; 3733 3734 reverse_words((unsigned long *)a_ints, a, longwords); 3735 reverse_words((unsigned long *)n_ints, n, longwords); 3736 3737 if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 3738 ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 3739 } else { 3740 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 3741 } 3742 3743 reverse_words(m, (unsigned long *)m_ints, longwords); 3744 } 3745 3746 #if INCLUDE_JFR 3747 3748 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. 3749 // It returns a jobject handle to the event writer. 3750 // The handle is dereferenced and the return value is the event writer oop. 3751 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { 3752 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id); 3753 CodeBuffer code(name, 512, 64); 3754 MacroAssembler* masm = new MacroAssembler(&code); 3755 3756 Register tmp1 = R10_ARG8; 3757 Register tmp2 = R9_ARG7; 3758 3759 int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; 3760 address start = __ pc(); 3761 __ mflr(tmp1); 3762 __ std(tmp1, _abi0(lr), R1_SP); // save return pc 3763 __ push_frame_reg_args(0, tmp1); 3764 int frame_complete = __ pc() - start; 3765 __ set_last_Java_frame(R1_SP, noreg); 3766 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), R16_thread); 3767 address calls_return_pc = __ last_calls_return_pc(); 3768 __ reset_last_Java_frame(); 3769 // The handle is dereferenced through a load barrier. 3770 __ resolve_global_jobject(R3_RET, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE); 3771 __ pop_frame(); 3772 __ ld(tmp1, _abi0(lr), R1_SP); 3773 __ mtlr(tmp1); 3774 __ blr(); 3775 3776 OopMapSet* oop_maps = new OopMapSet(); 3777 OopMap* map = new OopMap(framesize, 0); 3778 oop_maps->add_gc_map(calls_return_pc - start, map); 3779 3780 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) 3781 RuntimeStub::new_runtime_stub(name, &code, frame_complete, 3782 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3783 oop_maps, false); 3784 return stub; 3785 } 3786 3787 // For c2: call to return a leased buffer. 3788 RuntimeStub* SharedRuntime::generate_jfr_return_lease() { 3789 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id); 3790 CodeBuffer code(name, 512, 64); 3791 MacroAssembler* masm = new MacroAssembler(&code); 3792 3793 Register tmp1 = R10_ARG8; 3794 Register tmp2 = R9_ARG7; 3795 3796 int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; 3797 address start = __ pc(); 3798 __ mflr(tmp1); 3799 __ std(tmp1, _abi0(lr), R1_SP); // save return pc 3800 __ push_frame_reg_args(0, tmp1); 3801 int frame_complete = __ pc() - start; 3802 __ set_last_Java_frame(R1_SP, noreg); 3803 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), R16_thread); 3804 address calls_return_pc = __ last_calls_return_pc(); 3805 __ reset_last_Java_frame(); 3806 __ pop_frame(); 3807 __ ld(tmp1, _abi0(lr), R1_SP); 3808 __ mtlr(tmp1); 3809 __ blr(); 3810 3811 OopMapSet* oop_maps = new OopMapSet(); 3812 OopMap* map = new OopMap(framesize, 0); 3813 oop_maps->add_gc_map(calls_return_pc - start, map); 3814 3815 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) 3816 RuntimeStub::new_runtime_stub(name, &code, frame_complete, 3817 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3818 oop_maps, false); 3819 return stub; 3820 } 3821 3822 #endif // INCLUDE_JFR