1 /* 2 * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2015, 2025 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "asm/macroAssembler.inline.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "gc/shared/barrierSetAssembler.hpp" 30 #include "interpreter/bytecodeHistogram.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterRuntime.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "interpreter/templateInterpreterGenerator.hpp" 35 #include "interpreter/templateTable.hpp" 36 #include "oops/arrayOop.hpp" 37 #include "oops/method.hpp" 38 #include "oops/methodCounters.hpp" 39 #include "oops/methodData.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/resolvedIndyEntry.hpp" 42 #include "oops/resolvedMethodEntry.hpp" 43 #include "prims/jvmtiExport.hpp" 44 #include "prims/jvmtiThreadState.hpp" 45 #include "runtime/arguments.hpp" 46 #include "runtime/deoptimization.hpp" 47 #include "runtime/frame.inline.hpp" 48 #include "runtime/jniHandles.hpp" 49 #include "runtime/sharedRuntime.hpp" 50 #include "runtime/stubRoutines.hpp" 51 #include "runtime/synchronizer.hpp" 52 #include "runtime/timer.hpp" 53 #include "runtime/vframeArray.hpp" 54 #include "runtime/vm_version.hpp" 55 #include "utilities/debug.hpp" 56 #include "utilities/macros.hpp" 57 58 #undef __ 59 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 60 61 // Size of interpreter code. Increase if too small. Interpreter will 62 // fail with a guarantee ("not enough space for interpreter generation"); 63 // if too small. 64 // Run with +PrintInterpreter to get the VM to print out the size. 65 // Max size with JVMTI 66 int TemplateInterpreter::InterpreterCodeSize = 256*K; 67 68 #ifdef PRODUCT 69 #define BLOCK_COMMENT(str) /* nothing */ 70 #else 71 #define BLOCK_COMMENT(str) __ block_comment(str) 72 #endif 73 74 #define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":") 75 76 //----------------------------------------------------------------------------- 77 78 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 79 // Slow_signature handler that respects the PPC C calling conventions. 80 // 81 // We get called by the native entry code with our output register 82 // area == 8. First we call InterpreterRuntime::get_result_handler 83 // to copy the pointer to the signature string temporarily to the 84 // first C-argument and to return the result_handler in 85 // R3_RET. Since native_entry will copy the jni-pointer to the 86 // first C-argument slot later on, it is OK to occupy this slot 87 // temporarily. Then we copy the argument list on the java 88 // expression stack into native varargs format on the native stack 89 // and load arguments into argument registers. Integer arguments in 90 // the varargs vector will be sign-extended to 8 bytes. 91 // 92 // On entry: 93 // R3_ARG1 - intptr_t* Address of java argument list in memory. 94 // R15_prev_state - BytecodeInterpreter* Address of interpreter state for 95 // this method 96 // R19_method 97 // 98 // On exit (just before return instruction): 99 // R3_RET - contains the address of the result_handler. 100 // R4_ARG2 - is not updated for static methods and contains "this" otherwise. 101 // R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double, 102 // ARGi contains this argument. Otherwise, ARGi is not updated. 103 // F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double. 104 105 const int LogSizeOfTwoInstructions = 3; 106 107 // FIXME: use Argument:: GL: Argument names different numbers! 108 const int max_fp_register_arguments = 13; 109 const int max_int_register_arguments = 6; // first 2 are reserved 110 111 const Register arg_java = R21_tmp1; 112 const Register arg_c = R22_tmp2; 113 const Register signature = R23_tmp3; // is string 114 const Register sig_byte = R24_tmp4; 115 const Register fpcnt = R25_tmp5; 116 const Register argcnt = R26_tmp6; 117 const Register intSlot = R27_tmp7; 118 const Register target_sp = R28_tmp8; 119 const FloatRegister floatSlot = F0; 120 121 address entry = __ function_entry(); 122 123 __ save_LR(R0); 124 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 125 // We use target_sp for storing arguments in the C frame. 126 __ mr(target_sp, R1_SP); 127 __ push_frame_reg_args_nonvolatiles(0, R11_scratch1); 128 129 __ mr(arg_java, R3_ARG1); 130 131 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method); 132 133 // Signature is in R3_RET. Signature is callee saved. 134 __ mr(signature, R3_RET); 135 136 // Get the result handler. 137 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method); 138 139 { 140 Label L; 141 // test if static 142 // _access_flags._flags must be at offset 0. 143 // TODO PPC port: requires change in shared code. 144 //assert(in_bytes(AccessFlags::flags_offset()) == 0, 145 // "MethodDesc._access_flags == MethodDesc._access_flags._flags"); 146 // _access_flags must be a 16 bit value. 147 assert(sizeof(AccessFlags) == 2, "wrong size"); 148 __ lhz(R11_scratch1/*access_flags*/, method_(access_flags)); 149 // testbit with condition register. 150 __ testbitdi(CR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT); 151 __ btrue(CR0, L); 152 // For non-static functions, pass "this" in R4_ARG2 and copy it 153 // to 2nd C-arg slot. 154 // We need to box the Java object here, so we use arg_java 155 // (address of current Java stack slot) as argument and don't 156 // dereference it as in case of ints, floats, etc. 157 __ mr(R4_ARG2, arg_java); 158 __ addi(arg_java, arg_java, -BytesPerWord); 159 __ std(R4_ARG2, _abi0(carg_2), target_sp); 160 __ bind(L); 161 } 162 163 // Will be incremented directly after loop_start. argcnt=0 164 // corresponds to 3rd C argument. 165 __ li(argcnt, -1); 166 // arg_c points to 3rd C argument 167 __ addi(arg_c, target_sp, _abi0(carg_3)); 168 // no floating-point args parsed so far 169 __ li(fpcnt, 0); 170 171 Label move_intSlot_to_ARG, move_floatSlot_to_FARG; 172 Label loop_start, loop_end; 173 Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed; 174 175 // signature points to '(' at entry 176 #ifdef ASSERT 177 __ lbz(sig_byte, 0, signature); 178 __ cmplwi(CR0, sig_byte, '('); 179 __ bne(CR0, do_dontreachhere); 180 #endif 181 182 __ bind(loop_start); 183 184 __ addi(argcnt, argcnt, 1); 185 __ lbzu(sig_byte, 1, signature); 186 187 __ cmplwi(CR0, sig_byte, ')'); // end of signature 188 __ beq(CR0, loop_end); 189 190 __ cmplwi(CR0, sig_byte, 'B'); // byte 191 __ beq(CR0, do_int); 192 193 __ cmplwi(CR0, sig_byte, 'C'); // char 194 __ beq(CR0, do_int); 195 196 __ cmplwi(CR0, sig_byte, 'D'); // double 197 __ beq(CR0, do_double); 198 199 __ cmplwi(CR0, sig_byte, 'F'); // float 200 __ beq(CR0, do_float); 201 202 __ cmplwi(CR0, sig_byte, 'I'); // int 203 __ beq(CR0, do_int); 204 205 __ cmplwi(CR0, sig_byte, 'J'); // long 206 __ beq(CR0, do_long); 207 208 __ cmplwi(CR0, sig_byte, 'S'); // short 209 __ beq(CR0, do_int); 210 211 __ cmplwi(CR0, sig_byte, 'Z'); // boolean 212 __ beq(CR0, do_int); 213 214 __ cmplwi(CR0, sig_byte, 'L'); // object 215 __ beq(CR0, do_object); 216 217 __ cmplwi(CR0, sig_byte, '['); // array 218 __ beq(CR0, do_array); 219 220 // __ cmplwi(CR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type 221 // __ beq(CR0, do_void); 222 223 __ bind(do_dontreachhere); 224 225 __ unimplemented("ShouldNotReachHere in slow_signature_handler"); 226 227 __ bind(do_array); 228 229 { 230 Label start_skip, end_skip; 231 232 __ bind(start_skip); 233 __ lbzu(sig_byte, 1, signature); 234 __ cmplwi(CR0, sig_byte, '['); 235 __ beq(CR0, start_skip); // skip further brackets 236 __ cmplwi(CR0, sig_byte, '9'); 237 __ bgt(CR0, end_skip); // no optional size 238 __ cmplwi(CR0, sig_byte, '0'); 239 __ bge(CR0, start_skip); // skip optional size 240 __ bind(end_skip); 241 242 __ cmplwi(CR0, sig_byte, 'L'); 243 __ beq(CR0, do_object); // for arrays of objects, the name of the object must be skipped 244 __ b(do_boxed); // otherwise, go directly to do_boxed 245 } 246 247 __ bind(do_object); 248 { 249 Label L; 250 __ bind(L); 251 __ lbzu(sig_byte, 1, signature); 252 __ cmplwi(CR0, sig_byte, ';'); 253 __ bne(CR0, L); 254 } 255 // Need to box the Java object here, so we use arg_java (address of 256 // current Java stack slot) as argument and don't dereference it as 257 // in case of ints, floats, etc. 258 Label do_null; 259 __ bind(do_boxed); 260 __ ld(R0,0, arg_java); 261 __ cmpdi(CR0, R0, 0); 262 __ li(intSlot,0); 263 __ beq(CR0, do_null); 264 __ mr(intSlot, arg_java); 265 __ bind(do_null); 266 __ std(intSlot, 0, arg_c); 267 __ addi(arg_java, arg_java, -BytesPerWord); 268 __ addi(arg_c, arg_c, BytesPerWord); 269 __ cmplwi(CR0, argcnt, max_int_register_arguments); 270 __ blt(CR0, move_intSlot_to_ARG); 271 __ b(loop_start); 272 273 __ bind(do_int); 274 __ lwa(intSlot, 0, arg_java); 275 __ std(intSlot, 0, arg_c); 276 __ addi(arg_java, arg_java, -BytesPerWord); 277 __ addi(arg_c, arg_c, BytesPerWord); 278 __ cmplwi(CR0, argcnt, max_int_register_arguments); 279 __ blt(CR0, move_intSlot_to_ARG); 280 __ b(loop_start); 281 282 __ bind(do_long); 283 __ ld(intSlot, -BytesPerWord, arg_java); 284 __ std(intSlot, 0, arg_c); 285 __ addi(arg_java, arg_java, - 2 * BytesPerWord); 286 __ addi(arg_c, arg_c, BytesPerWord); 287 __ cmplwi(CR0, argcnt, max_int_register_arguments); 288 __ blt(CR0, move_intSlot_to_ARG); 289 __ b(loop_start); 290 291 __ bind(do_float); 292 __ lfs(floatSlot, 0, arg_java); 293 __ stfs(floatSlot, Argument::float_on_stack_offset_in_bytes_c, arg_c); 294 __ addi(arg_java, arg_java, -BytesPerWord); 295 __ addi(arg_c, arg_c, BytesPerWord); 296 __ cmplwi(CR0, fpcnt, max_fp_register_arguments); 297 __ blt(CR0, move_floatSlot_to_FARG); 298 __ b(loop_start); 299 300 __ bind(do_double); 301 __ lfd(floatSlot, - BytesPerWord, arg_java); 302 __ stfd(floatSlot, 0, arg_c); 303 __ addi(arg_java, arg_java, - 2 * BytesPerWord); 304 __ addi(arg_c, arg_c, BytesPerWord); 305 __ cmplwi(CR0, fpcnt, max_fp_register_arguments); 306 __ blt(CR0, move_floatSlot_to_FARG); 307 __ b(loop_start); 308 309 __ bind(loop_end); 310 311 __ pop_frame(); 312 __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 313 __ restore_LR(R0); 314 315 __ blr(); 316 317 Label move_int_arg, move_float_arg; 318 __ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions) 319 __ mr(R5_ARG3, intSlot); __ b(loop_start); 320 __ mr(R6_ARG4, intSlot); __ b(loop_start); 321 __ mr(R7_ARG5, intSlot); __ b(loop_start); 322 __ mr(R8_ARG6, intSlot); __ b(loop_start); 323 __ mr(R9_ARG7, intSlot); __ b(loop_start); 324 __ mr(R10_ARG8, intSlot); __ b(loop_start); 325 326 __ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions) 327 __ fmr(F1_ARG1, floatSlot); __ b(loop_start); 328 __ fmr(F2_ARG2, floatSlot); __ b(loop_start); 329 __ fmr(F3_ARG3, floatSlot); __ b(loop_start); 330 __ fmr(F4_ARG4, floatSlot); __ b(loop_start); 331 __ fmr(F5_ARG5, floatSlot); __ b(loop_start); 332 __ fmr(F6_ARG6, floatSlot); __ b(loop_start); 333 __ fmr(F7_ARG7, floatSlot); __ b(loop_start); 334 __ fmr(F8_ARG8, floatSlot); __ b(loop_start); 335 __ fmr(F9_ARG9, floatSlot); __ b(loop_start); 336 __ fmr(F10_ARG10, floatSlot); __ b(loop_start); 337 __ fmr(F11_ARG11, floatSlot); __ b(loop_start); 338 __ fmr(F12_ARG12, floatSlot); __ b(loop_start); 339 __ fmr(F13_ARG13, floatSlot); __ b(loop_start); 340 341 __ bind(move_intSlot_to_ARG); 342 __ sldi(R0, argcnt, LogSizeOfTwoInstructions); 343 __ load_const(R11_scratch1, move_int_arg); // Label must be bound here. 344 __ add(R11_scratch1, R0, R11_scratch1); 345 __ mtctr(R11_scratch1/*branch_target*/); 346 __ bctr(); 347 __ bind(move_floatSlot_to_FARG); 348 __ sldi(R0, fpcnt, LogSizeOfTwoInstructions); 349 __ addi(fpcnt, fpcnt, 1); 350 __ load_const(R11_scratch1, move_float_arg); // Label must be bound here. 351 __ add(R11_scratch1, R0, R11_scratch1); 352 __ mtctr(R11_scratch1/*branch_target*/); 353 __ bctr(); 354 355 return entry; 356 } 357 358 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 359 // 360 // Registers alive 361 // R3_RET 362 // LR 363 // 364 // Registers updated 365 // R3_RET 366 // 367 368 Label done; 369 address entry = __ pc(); 370 371 switch (type) { 372 case T_BOOLEAN: 373 // convert !=0 to 1 374 __ normalize_bool(R3_RET); 375 break; 376 case T_BYTE: 377 // sign extend 8 bits 378 __ extsb(R3_RET, R3_RET); 379 break; 380 case T_CHAR: 381 // zero extend 16 bits 382 __ clrldi(R3_RET, R3_RET, 48); 383 break; 384 case T_SHORT: 385 // sign extend 16 bits 386 __ extsh(R3_RET, R3_RET); 387 break; 388 case T_INT: 389 // sign extend 32 bits 390 __ extsw(R3_RET, R3_RET); 391 break; 392 case T_LONG: 393 break; 394 case T_OBJECT: 395 // JNIHandles::resolve result. 396 __ resolve_jobject(R3_RET, R11_scratch1, R31, MacroAssembler::PRESERVATION_FRAME_LR); // kills R31 397 break; 398 case T_FLOAT: 399 break; 400 case T_DOUBLE: 401 break; 402 case T_VOID: 403 break; 404 default: ShouldNotReachHere(); 405 } 406 407 BIND(done); 408 __ blr(); 409 410 return entry; 411 } 412 413 // Abstract method entry. 414 // 415 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 416 address entry = __ pc(); 417 418 // 419 // Registers alive 420 // R16_thread - JavaThread* 421 // R19_method - callee's method (method to be invoked) 422 // R1_SP - SP prepared such that caller's outgoing args are near top 423 // LR - return address to caller 424 // 425 // Stack layout at this point: 426 // 427 // 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP 428 // alignment (optional) 429 // [outgoing Java arguments] 430 // ... 431 // PARENT [PARENT_IJAVA_FRAME_ABI] 432 // ... 433 // 434 435 // Can't use call_VM here because we have not set up a new 436 // interpreter state. Make the call to the vm and make it look like 437 // our caller set up the JavaFrameAnchor. 438 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); 439 440 // Push a new C frame and save LR. 441 __ save_LR(R0); 442 __ push_frame_reg_args(0, R11_scratch1); 443 444 // This is not a leaf but we have a JavaFrameAnchor now and we will 445 // check (create) exceptions afterward so this is ok. 446 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), 447 R16_thread, R19_method); 448 449 // Pop the C frame and restore LR. 450 __ pop_frame(); 451 __ restore_LR(R0); 452 453 // Reset JavaFrameAnchor from call_VM_leaf above. 454 __ reset_last_Java_frame(); 455 456 // We don't know our caller, so jump to the general forward exception stub, 457 // which will also pop our full frame off. Satisfy the interface of 458 // SharedRuntime::generate_forward_exception() 459 __ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0); 460 __ mtctr(R11_scratch1); 461 __ bctr(); 462 463 return entry; 464 } 465 466 // Interpreter intrinsic for WeakReference.get(). 467 // 1. Don't push a full blown frame and go on dispatching, but fetch the value 468 // into R8 and return quickly 469 // 2. If G1 is active we *must* execute this intrinsic for corrrectness: 470 // It contains a GC barrier which puts the reference into the satb buffer 471 // to indicate that someone holds a strong reference to the object the 472 // weak ref points to! 473 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 474 // Code: _aload_0, _getfield, _areturn 475 // parameter size = 1 476 // 477 // The code that gets generated by this routine is split into 2 parts: 478 // 1. the "intrinsified" code for G1 (or any SATB based GC), 479 // 2. the slow path - which is an expansion of the regular method entry. 480 // 481 // Notes: 482 // * In the G1 code we do not check whether we need to block for 483 // a safepoint. If G1 is enabled then we must execute the specialized 484 // code for Reference.get (except when the Reference object is null) 485 // so that we can log the value in the referent field with an SATB 486 // update buffer. 487 // If the code for the getfield template is modified so that the 488 // G1 pre-barrier code is executed when the current method is 489 // Reference.get() then going through the normal method entry 490 // will be fine. 491 // * The G1 code can, however, check the receiver object (the instance 492 // of java.lang.Reference) and jump to the slow path if null. If the 493 // Reference object is null then we obviously cannot fetch the referent 494 // and so we don't need to call the G1 pre-barrier. Thus we can use the 495 // regular method entry code to generate the NPE. 496 // 497 498 address entry = __ pc(); 499 500 const int referent_offset = java_lang_ref_Reference::referent_offset(); 501 502 Label slow_path; 503 504 // Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH); 505 506 // In the G1 code we don't check if we need to reach a safepoint. We 507 // continue and the thread will safepoint at the next bytecode dispatch. 508 509 // If the receiver is null then it is OK to jump to the slow path. 510 __ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver 511 512 // Check if receiver == nullptr and go the slow path. 513 __ cmpdi(CR0, R3_RET, 0); 514 __ beq(CR0, slow_path); 515 516 __ load_heap_oop(R3_RET, referent_offset, R3_RET, 517 /* non-volatile temp */ R31, R11_scratch1, 518 MacroAssembler::PRESERVATION_FRAME_LR, 519 ON_WEAK_OOP_REF); 520 521 // Generate the G1 pre-barrier code to log the value of 522 // the referent field in an SATB buffer. Note with 523 // these parameters the pre-barrier does not generate 524 // the load of the previous value. 525 526 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 527 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 528 529 __ blr(); 530 531 __ bind(slow_path); 532 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1); 533 return entry; 534 } 535 536 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 537 address entry = __ pc(); 538 539 // Expression stack must be empty before entering the VM if an 540 // exception happened. 541 __ empty_expression_stack(); 542 // Throw exception. 543 __ call_VM(noreg, 544 CAST_FROM_FN_PTR(address, 545 InterpreterRuntime::throw_StackOverflowError)); 546 return entry; 547 } 548 549 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 550 address entry = __ pc(); 551 __ empty_expression_stack(); 552 // R4_ARG2 already contains the array. 553 // Index is in R17_tos. 554 __ mr(R5_ARG3, R17_tos); 555 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R4_ARG2, R5_ARG3); 556 return entry; 557 } 558 559 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 560 address entry = __ pc(); 561 // Expression stack must be empty before entering the VM if an 562 // exception happened. 563 __ empty_expression_stack(); 564 565 // Load exception object. 566 // Thread will be loaded to R3_ARG1. 567 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos); 568 #ifdef ASSERT 569 // Above call must not return here since exception pending. 570 __ should_not_reach_here(); 571 #endif 572 return entry; 573 } 574 575 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 576 address entry = __ pc(); 577 //__ untested("generate_exception_handler_common"); 578 Register Rexception = R17_tos; 579 580 // Expression stack must be empty before entering the VM if an exception happened. 581 __ empty_expression_stack(); 582 583 __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1); 584 if (pass_oop) { 585 __ mr(R5_ARG3, Rexception); 586 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception)); 587 } else { 588 __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1); 589 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception)); 590 } 591 592 // Throw exception. 593 __ mr(R3_ARG1, Rexception); 594 __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2); 595 __ mtctr(R11_scratch1); 596 __ bctr(); 597 598 return entry; 599 } 600 601 // This entry is returned to when a call returns to the interpreter. 602 // When we arrive here, we expect that the callee stack frame is already popped. 603 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 604 address entry = __ pc(); 605 606 // Move the value out of the return register back to the TOS cache of current frame. 607 switch (state) { 608 case ltos: 609 case btos: 610 case ztos: 611 case ctos: 612 case stos: 613 case atos: 614 case itos: __ mr(R17_tos, R3_RET); break; // RET -> TOS cache 615 case ftos: 616 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 617 case vtos: break; // Nothing to do, this was a void return. 618 default : ShouldNotReachHere(); 619 } 620 621 __ restore_interpreter_state(R11_scratch1, false /*bcp_and_mdx_only*/, true /*restore_top_frame_sp*/); 622 623 // Compiled code destroys templateTableBase, reload. 624 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); 625 626 if (state == atos) { 627 __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2); 628 } 629 630 const Register cache = R11_scratch1; 631 const Register size = R12_scratch2; 632 if (index_size == sizeof(u4)) { 633 __ load_resolved_indy_entry(cache, size /* tmp */); 634 __ lhz(size, in_bytes(ResolvedIndyEntry::num_parameters_offset()), cache); 635 } else { 636 assert(index_size == sizeof(u2), "Can only be u2"); 637 __ load_method_entry(cache, size /* tmp */); 638 __ lhz(size, in_bytes(ResolvedMethodEntry::num_parameters_offset()), cache); 639 } 640 __ sldi(size, size, Interpreter::logStackElementSize); 641 __ add(R15_esp, R15_esp, size); 642 643 __ check_and_handle_popframe(R11_scratch1); 644 __ check_and_handle_earlyret(R11_scratch1); 645 646 __ dispatch_next(state, step); 647 return entry; 648 } 649 650 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 651 address entry = __ pc(); 652 // If state != vtos, we're returning from a native method, which put it's result 653 // into the result register. So move the value out of the return register back 654 // to the TOS cache of current frame. 655 656 switch (state) { 657 case ltos: 658 case btos: 659 case ztos: 660 case ctos: 661 case stos: 662 case atos: 663 case itos: __ mr(R17_tos, R3_RET); break; // GR_RET -> TOS cache 664 case ftos: 665 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 666 case vtos: break; // Nothing to do, this was a void return. 667 default : ShouldNotReachHere(); 668 } 669 670 // Load LcpoolCache @@@ should be already set! 671 __ get_constant_pool_cache(R27_constPoolCache); 672 673 // Handle a pending exception, fall through if none. 674 __ check_and_forward_exception(R11_scratch1, R12_scratch2); 675 676 // Start executing bytecodes. 677 if (continuation == nullptr) { 678 __ dispatch_next(state, step); 679 } else { 680 __ jump_to_entry(continuation, R11_scratch1); 681 } 682 683 return entry; 684 } 685 686 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 687 address entry = __ pc(); 688 689 __ push(state); 690 __ push_cont_fastpath(); 691 __ call_VM(noreg, runtime_entry); 692 __ pop_cont_fastpath(); 693 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 694 695 return entry; 696 } 697 698 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() { 699 if (!Continuations::enabled()) return nullptr; 700 address start = __ pc(); 701 702 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); 703 __ restore_interpreter_state(R11_scratch1, false, true /*restore_top_frame_sp*/); 704 __ blr(); 705 706 return start; 707 } 708 709 // Helpers for commoning out cases in the various type of method entries. 710 711 // Increment invocation count & check for overflow. 712 // 713 // Note: checking for negative value instead of overflow 714 // so we have a 'sticky' overflow test. 715 // 716 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { 717 // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not. 718 Register Rscratch1 = R11_scratch1; 719 Register Rscratch2 = R12_scratch2; 720 Register R3_counters = R3_ARG1; 721 Label done; 722 723 const int increment = InvocationCounter::count_increment; 724 Label no_mdo; 725 if (ProfileInterpreter) { 726 const Register Rmdo = R3_counters; 727 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 728 __ cmpdi(CR0, Rmdo, 0); 729 __ beq(CR0, no_mdo); 730 731 // Increment invocation counter in the MDO. 732 const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 733 __ lwz(Rscratch2, mdo_ic_offs, Rmdo); 734 __ lwz(Rscratch1, in_bytes(MethodData::invoke_mask_offset()), Rmdo); 735 __ addi(Rscratch2, Rscratch2, increment); 736 __ stw(Rscratch2, mdo_ic_offs, Rmdo); 737 __ and_(Rscratch1, Rscratch2, Rscratch1); 738 __ bne(CR0, done); 739 __ b(*overflow); 740 } 741 742 // Increment counter in MethodCounters*. 743 const int mo_ic_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 744 __ bind(no_mdo); 745 __ get_method_counters(R19_method, R3_counters, done); 746 __ lwz(Rscratch2, mo_ic_offs, R3_counters); 747 __ lwz(Rscratch1, in_bytes(MethodCounters::invoke_mask_offset()), R3_counters); 748 __ addi(Rscratch2, Rscratch2, increment); 749 __ stw(Rscratch2, mo_ic_offs, R3_counters); 750 __ and_(Rscratch1, Rscratch2, Rscratch1); 751 __ beq(CR0, *overflow); 752 753 __ bind(done); 754 } 755 756 // Generate code to initiate compilation on invocation counter overflow. 757 void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) { 758 // Generate code to initiate compilation on the counter overflow. 759 760 // InterpreterRuntime::frequency_counter_overflow takes one arguments, 761 // which indicates if the counter overflow occurs at a backwards branch (null bcp) 762 // We pass zero in. 763 // The call returns the address of the verified entry point for the method or null 764 // if the compilation did not complete (either went background or bailed out). 765 // 766 // Unlike the C++ interpreter above: Check exceptions! 767 // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed 768 // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur. 769 770 __ li(R4_ARG2, 0); 771 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 772 773 // Returns verified_entry_point or null. 774 // We ignore it in any case. 775 __ b(continue_entry); 776 } 777 778 // See if we've got enough room on the stack for locals plus overhead below 779 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 780 // without going through the signal handler, i.e., reserved and yellow zones 781 // will not be made usable. The shadow zone must suffice to handle the 782 // overflow. 783 // 784 // Kills Rmem_frame_size, Rscratch1. 785 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) { 786 Label done; 787 assert_different_registers(Rmem_frame_size, Rscratch1); 788 789 BLOCK_COMMENT("stack_overflow_check_with_compare {"); 790 __ sub(Rmem_frame_size, R1_SP, Rmem_frame_size); 791 __ ld(Rscratch1, thread_(stack_overflow_limit)); 792 __ cmpld(CR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1); 793 __ bgt(CR0/*is_stack_overflow*/, done); 794 795 // The stack overflows. Load target address of the runtime stub and call it. 796 assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "generated in wrong order"); 797 __ load_const_optimized(Rscratch1, (SharedRuntime::throw_StackOverflowError_entry()), R0); 798 __ mtctr(Rscratch1); 799 // Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame). 800 #ifdef ASSERT 801 Label frame_not_shrunk; 802 __ cmpld(CR0, R1_SP, R21_sender_SP); 803 __ ble(CR0, frame_not_shrunk); 804 __ stop("frame shrunk"); 805 __ bind(frame_not_shrunk); 806 __ ld(Rscratch1, 0, R1_SP); 807 __ ld(R0, 0, R21_sender_SP); 808 __ cmpd(CR0, R0, Rscratch1); 809 __ asm_assert_eq("backlink"); 810 #endif // ASSERT 811 __ mr(R1_SP, R21_sender_SP); 812 __ bctr(); 813 814 __ align(32, 12); 815 __ bind(done); 816 BLOCK_COMMENT("} stack_overflow_check_with_compare"); 817 } 818 819 // Lock the current method, interpreter register window must be set up! 820 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) { 821 const Register Robj_to_lock = Rscratch2; 822 823 { 824 if (!flags_preloaded) { 825 __ lhz(Rflags, method_(access_flags)); 826 } 827 828 #ifdef ASSERT 829 // Check if methods needs synchronization. 830 { 831 Label Lok; 832 __ testbitdi(CR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT); 833 __ btrue(CR0,Lok); 834 __ stop("method doesn't need synchronization"); 835 __ bind(Lok); 836 } 837 #endif // ASSERT 838 } 839 840 // Get synchronization object to Rscratch2. 841 { 842 Label Lstatic; 843 Label Ldone; 844 845 __ testbitdi(CR0, R0, Rflags, JVM_ACC_STATIC_BIT); 846 __ btrue(CR0, Lstatic); 847 848 // Non-static case: load receiver obj from stack and we're done. 849 __ ld(Robj_to_lock, R18_locals); 850 __ b(Ldone); 851 852 __ bind(Lstatic); // Static case: Lock the java mirror 853 // Load mirror from interpreter frame. 854 __ ld(Robj_to_lock, _abi0(callers_sp), R1_SP); 855 __ ld(Robj_to_lock, _ijava_state_neg(mirror), Robj_to_lock); 856 857 __ bind(Ldone); 858 __ verify_oop(Robj_to_lock); 859 } 860 861 // Got the oop to lock => execute! 862 __ add_monitor_to_stack(true, Rscratch1, R0); 863 864 __ std(Robj_to_lock, in_bytes(BasicObjectLock::obj_offset()), R26_monitor); 865 __ lock_object(R26_monitor, Robj_to_lock); 866 } 867 868 // Generate a fixed interpreter frame for pure interpreter 869 // and I2N native transition frames. 870 // 871 // Before (stack grows downwards): 872 // 873 // | ... | 874 // |------------- | 875 // | java arg0 | 876 // | ... | 877 // | java argn | 878 // | | <- R15_esp 879 // | | 880 // |--------------| 881 // | abi_112 | 882 // | | <- R1_SP 883 // |==============| 884 // 885 // 886 // After: 887 // 888 // | ... | 889 // | java arg0 |<- R18_locals 890 // | ... | 891 // | java argn | 892 // |--------------| 893 // | | 894 // | java locals | 895 // | | 896 // |--------------| 897 // | abi_48 | 898 // |==============| 899 // | | 900 // | istate | 901 // | | 902 // |--------------| 903 // | monitor |<- R26_monitor 904 // |--------------| 905 // | |<- R15_esp 906 // | expression | 907 // | stack | 908 // | | 909 // |--------------| 910 // | | 911 // | abi_112 |<- R1_SP 912 // |==============| 913 // 914 // The top most frame needs an abi space of 112 bytes. This space is needed, 915 // since we call to c. The c function may spill their arguments to the caller 916 // frame. When we call to java, we don't need these spill slots. In order to save 917 // space on the stack, we resize the caller. However, java locals reside in 918 // the caller frame and the frame has to be increased. The frame_size for the 919 // current frame was calculated based on max_stack as size for the expression 920 // stack. At the call, just a part of the expression stack might be used. 921 // We don't want to waste this space and cut the frame back accordingly. 922 // The resulting amount for resizing is calculated as follows: 923 // resize = (number_of_locals - number_of_arguments) * slot_size 924 // + (R1_SP - R15_esp) + 48 925 // 926 // The size for the callee frame is calculated: 927 // framesize = 112 + max_stack + monitor + state_size 928 // 929 // maxstack: Max number of slots on the expression stack, loaded from the method. 930 // monitor: We statically reserve room for one monitor object. 931 // state_size: We save the current state of the interpreter to this area. 932 // 933 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) { 934 Register Rparent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes. 935 Rtop_frame_size = R7_ARG5, 936 Rconst_method = R8_ARG6, 937 Rconst_pool = R9_ARG7, 938 Rmirror = R10_ARG8; 939 940 assert_different_registers(Rsize_of_parameters, Rsize_of_locals, Rparent_frame_resize, Rtop_frame_size, 941 Rconst_method, Rconst_pool); 942 943 __ ld(Rconst_method, method_(const)); 944 __ lhz(Rsize_of_parameters /* number of params */, 945 in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method); 946 if (native_call) { 947 // If we're calling a native method, we reserve space for the worst-case signature 948 // handler varargs vector, which is max(Argument::n_int_register_parameters_c, parameter_count+2). 949 // We add two slots to the parameter_count, one for the jni 950 // environment and one for a possible native mirror. 951 Label skip_native_calculate_max_stack; 952 __ addi(Rtop_frame_size, Rsize_of_parameters, 2); 953 __ cmpwi(CR0, Rtop_frame_size, Argument::n_int_register_parameters_c); 954 __ bge(CR0, skip_native_calculate_max_stack); 955 __ li(Rtop_frame_size, Argument::n_int_register_parameters_c); 956 __ bind(skip_native_calculate_max_stack); 957 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 958 __ sldi(Rtop_frame_size, Rtop_frame_size, Interpreter::logStackElementSize); 959 __ sub(Rparent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 960 assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters. 961 } else { 962 __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method); 963 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 964 __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize); 965 __ lhz(Rtop_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method); 966 __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0 967 __ sub(Rparent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 968 __ sldi(Rtop_frame_size, Rtop_frame_size, Interpreter::logStackElementSize); 969 __ add(Rparent_frame_resize, Rparent_frame_resize, R11_scratch1); 970 } 971 972 // Compute top frame size. 973 __ addi(Rtop_frame_size, Rtop_frame_size, frame::top_ijava_frame_abi_size + frame::ijava_state_size); 974 975 // Cut back area between esp and max_stack. 976 __ addi(Rparent_frame_resize, Rparent_frame_resize, frame::parent_ijava_frame_abi_size - Interpreter::stackElementSize); 977 978 __ round_to(Rtop_frame_size, frame::alignment_in_bytes); 979 __ round_to(Rparent_frame_resize, frame::alignment_in_bytes); 980 // Rparent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size. 981 // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48. 982 983 if (!native_call) { 984 // Stack overflow check. 985 // Native calls don't need the stack size check since they have no 986 // expression stack and the arguments are already on the stack and 987 // we only add a handful of words to the stack. 988 __ add(R11_scratch1, Rparent_frame_resize, Rtop_frame_size); 989 generate_stack_overflow_check(R11_scratch1, R12_scratch2); 990 } 991 992 // Set up interpreter state registers. 993 994 __ add(R18_locals, R15_esp, Rsize_of_parameters); 995 __ ld(Rconst_pool, in_bytes(ConstMethod::constants_offset()), Rconst_method); 996 __ ld(R27_constPoolCache, ConstantPool::cache_offset(), Rconst_pool); 997 998 // Set method data pointer. 999 if (ProfileInterpreter) { 1000 Label zero_continue; 1001 __ ld(R28_mdx, method_(method_data)); 1002 __ cmpdi(CR0, R28_mdx, 0); 1003 __ beq(CR0, zero_continue); 1004 __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset())); 1005 __ bind(zero_continue); 1006 } 1007 1008 if (native_call) { 1009 __ li(R14_bcp, 0); // Must initialize. 1010 } else { 1011 __ addi(R14_bcp, Rconst_method, in_bytes(ConstMethod::codes_offset())); 1012 } 1013 1014 // Resize parent frame. 1015 __ mflr(R12_scratch2); 1016 __ neg(Rparent_frame_resize, Rparent_frame_resize); 1017 __ resize_frame(Rparent_frame_resize, R11_scratch1); 1018 __ std(R12_scratch2, _abi0(lr), R1_SP); 1019 1020 // Get mirror and store it in the frame as GC root for this Method*. 1021 __ ld(Rmirror, ConstantPool::pool_holder_offset(), Rconst_pool); 1022 __ ld(Rmirror, in_bytes(Klass::java_mirror_offset()), Rmirror); 1023 __ resolve_oop_handle(Rmirror, R11_scratch1, R12_scratch2, MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS); 1024 1025 __ addi(R26_monitor, R1_SP, -frame::ijava_state_size); 1026 __ addi(R15_esp, R26_monitor, -Interpreter::stackElementSize); 1027 1028 // Store values. 1029 __ std(R19_method, _ijava_state_neg(method), R1_SP); 1030 __ std(Rmirror, _ijava_state_neg(mirror), R1_SP); 1031 __ sub(R12_scratch2, R18_locals, R1_SP); 1032 __ srdi(R12_scratch2, R12_scratch2, Interpreter::logStackElementSize); 1033 // Store relativized R18_locals, see frame::interpreter_frame_locals(). 1034 __ std(R12_scratch2, _ijava_state_neg(locals), R1_SP); 1035 __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP); 1036 1037 // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only 1038 // be found in the frame after save_interpreter_state is done. This is always true 1039 // for non-top frames. But when a signal occurs, dumping the top frame can go wrong, 1040 // because e.g. frame::interpreter_frame_bcp() will not access the correct value 1041 // (Enhanced Stack Trace). 1042 // The signal handler does not save the interpreter state into the frame. 1043 1044 // We have to initialize some of these frame slots for native calls (accessed by GC). 1045 // Also initialize them for non-native calls for better tool support (even though 1046 // you may not get the most recent version as described above). 1047 __ li(R0, 0); 1048 __ li(R12_scratch2, -(frame::ijava_state_size / wordSize)); 1049 __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP); 1050 __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP); 1051 if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); } 1052 __ sub(R12_scratch2, R15_esp, R1_SP); 1053 __ sradi(R12_scratch2, R12_scratch2, Interpreter::logStackElementSize); 1054 __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP); 1055 __ std(R0, _ijava_state_neg(oop_tmp), R1_SP); // only used for native_call 1056 1057 // Store sender's SP and this frame's top SP. 1058 __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP); 1059 __ neg(R12_scratch2, Rtop_frame_size); 1060 __ sradi(R12_scratch2, R12_scratch2, Interpreter::logStackElementSize); 1061 // Store relativized top_frame_sp 1062 __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP); 1063 1064 // Push top frame. 1065 __ push_frame(Rtop_frame_size, R11_scratch1); 1066 } 1067 1068 // End of helpers 1069 1070 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 1071 1072 // Decide what to do: Use same platform specific instructions and runtime calls as compilers. 1073 bool use_instruction = false; 1074 address runtime_entry = nullptr; 1075 int num_args = 1; 1076 bool double_precision = true; 1077 1078 // PPC64 specific: 1079 switch (kind) { 1080 case Interpreter::java_lang_math_sqrt: use_instruction = VM_Version::has_fsqrt(); break; 1081 case Interpreter::java_lang_math_abs: use_instruction = true; break; 1082 case Interpreter::java_lang_math_fmaF: 1083 case Interpreter::java_lang_math_fmaD: use_instruction = UseFMA; break; 1084 default: break; // Fall back to runtime call. 1085 } 1086 1087 switch (kind) { 1088 case Interpreter::java_lang_math_sin : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break; 1089 case Interpreter::java_lang_math_cos : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break; 1090 case Interpreter::java_lang_math_tan : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break; 1091 case Interpreter::java_lang_math_tanh : /* run interpreted */ break; 1092 case Interpreter::java_lang_math_abs : /* run interpreted */ break; 1093 case Interpreter::java_lang_math_sqrt : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt); break; 1094 case Interpreter::java_lang_math_log : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); break; 1095 case Interpreter::java_lang_math_log10: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); break; 1096 case Interpreter::java_lang_math_pow : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); num_args = 2; break; 1097 case Interpreter::java_lang_math_exp : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); break; 1098 case Interpreter::java_lang_math_fmaF : /* run interpreted */ num_args = 3; double_precision = false; break; 1099 case Interpreter::java_lang_math_fmaD : /* run interpreted */ num_args = 3; break; 1100 default: ShouldNotReachHere(); 1101 } 1102 1103 // Use normal entry if neither instruction nor runtime call is used. 1104 if (!use_instruction && runtime_entry == nullptr) return nullptr; 1105 1106 address entry = __ pc(); 1107 1108 // Load arguments 1109 assert(num_args <= 13, "passed in registers"); 1110 if (double_precision) { 1111 int offset = (2 * num_args - 1) * Interpreter::stackElementSize; 1112 for (int i = 0; i < num_args; ++i) { 1113 __ lfd(as_FloatRegister(F1_ARG1->encoding() + i), offset, R15_esp); 1114 offset -= 2 * Interpreter::stackElementSize; 1115 } 1116 } else { 1117 int offset = num_args * Interpreter::stackElementSize; 1118 for (int i = 0; i < num_args; ++i) { 1119 __ lfs(as_FloatRegister(F1_ARG1->encoding() + i), offset, R15_esp); 1120 offset -= Interpreter::stackElementSize; 1121 } 1122 } 1123 1124 if (use_instruction) { 1125 switch (kind) { 1126 case Interpreter::java_lang_math_sqrt: __ fsqrt(F1_RET, F1); break; 1127 case Interpreter::java_lang_math_abs: __ fabs(F1_RET, F1); break; 1128 case Interpreter::java_lang_math_fmaF: __ fmadds(F1_RET, F1, F2, F3); break; 1129 case Interpreter::java_lang_math_fmaD: __ fmadd(F1_RET, F1, F2, F3); break; 1130 default: ShouldNotReachHere(); 1131 } 1132 } else { 1133 // Comment: Can use tail call if the unextended frame is always C ABI compliant: 1134 //__ load_const_optimized(R12_scratch2, runtime_entry, R0); 1135 //__ call_c_and_return_to_caller(R12_scratch2); 1136 1137 // Push a new C frame and save LR. 1138 __ save_LR(R0); 1139 __ push_frame_reg_args(0, R11_scratch1); 1140 1141 __ call_VM_leaf(runtime_entry); 1142 1143 // Pop the C frame and restore LR. 1144 __ pop_frame(); 1145 __ restore_LR(R0); 1146 } 1147 1148 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1149 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1150 __ blr(); 1151 1152 __ flush(); 1153 1154 return entry; 1155 } 1156 1157 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { 1158 if (!VM_Version::supports_float16()) return nullptr; 1159 1160 address entry = __ pc(); 1161 1162 __ lfs(F1, Interpreter::stackElementSize, R15_esp); 1163 __ f2hf(R3_RET, F1, F0); 1164 1165 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1166 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1167 __ blr(); 1168 1169 __ flush(); 1170 1171 return entry; 1172 } 1173 1174 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { 1175 if (!VM_Version::supports_float16()) return nullptr; 1176 1177 address entry = __ pc(); 1178 1179 // Note: Could also use: 1180 //__ li(R3, Interpreter::stackElementSize); 1181 //__ lfiwax(F1_RET, R15_esp, R3); // short stored as 32 bit integer 1182 //__ xscvhpdp(F1_RET->to_vsr(), F1_RET->to_vsr()); 1183 __ lwa(R3, Interpreter::stackElementSize, R15_esp); 1184 __ hf2f(F1_RET, R3); 1185 1186 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1187 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1188 __ blr(); 1189 1190 __ flush(); 1191 1192 return entry; 1193 } 1194 1195 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 1196 // Quick & dirty stack overflow checking: bang the stack & handle trap. 1197 // Note that we do the banging after the frame is setup, since the exception 1198 // handling code expects to find a valid interpreter frame on the stack. 1199 // Doing the banging earlier fails if the caller frame is not an interpreter 1200 // frame. 1201 // (Also, the exception throwing code expects to unlock any synchronized 1202 // method receiever, so do the banging after locking the receiver.) 1203 1204 // Bang each page in the shadow zone. We can't assume it's been done for 1205 // an interpreter frame with greater than a page of locals, so each page 1206 // needs to be checked. Only true for non-native. 1207 const size_t page_size = os::vm_page_size(); 1208 const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / page_size; 1209 const int start_page = native_call ? n_shadow_pages : 1; 1210 BLOCK_COMMENT("bang_stack_shadow_pages:"); 1211 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 1212 __ bang_stack_with_offset(pages*page_size); 1213 } 1214 } 1215 1216 // Interpreter stub for calling a native method. (asm interpreter) 1217 // This sets up a somewhat different looking stack for calling the 1218 // native method than the typical interpreter frame setup. 1219 // 1220 // On entry: 1221 // R19_method - method 1222 // R16_thread - JavaThread* 1223 // R15_esp - intptr_t* sender tos 1224 // 1225 // abstract stack (grows up) 1226 // [ IJava (caller of JNI callee) ] <-- ASP 1227 // ... 1228 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 1229 1230 address entry = __ pc(); 1231 1232 const bool inc_counter = UseCompiler || CountCompiledCalls; 1233 1234 // ----------------------------------------------------------------------------- 1235 // Allocate a new frame that represents the native callee (i2n frame). 1236 // This is not a full-blown interpreter frame, but in particular, the 1237 // following registers are valid after this: 1238 // - R19_method 1239 // - R18_local (points to start of arguments to native function) 1240 // 1241 // abstract stack (grows up) 1242 // [ IJava (caller of JNI callee) ] <-- ASP 1243 // ... 1244 1245 const Register signature_handler_fd = R11_scratch1; 1246 const Register pending_exception = R0; 1247 const Register result_handler_addr = R31; 1248 const Register native_method_fd = R12_scratch2; // preferred in MacroAssembler::branch_to 1249 const Register access_flags = R22_tmp2; 1250 const Register active_handles = R11_scratch1; // R26_monitor saved to state. 1251 const Register sync_state = R12_scratch2; 1252 const Register sync_state_addr = sync_state; // Address is dead after use. 1253 const Register suspend_flags = R11_scratch1; 1254 1255 //============================================================================= 1256 // Allocate new frame and initialize interpreter state. 1257 1258 Label exception_return; 1259 Label exception_return_sync_check; 1260 Label stack_overflow_return; 1261 1262 Register size_of_parameters = R22_tmp2; 1263 1264 generate_fixed_frame(true, size_of_parameters, noreg /* unused */); 1265 1266 //============================================================================= 1267 // Increment invocation counter. On overflow, entry to JNI method 1268 // will be compiled. 1269 Label invocation_counter_overflow, continue_after_compile; 1270 if (inc_counter) { 1271 if (synchronized) { 1272 // Since at this point in the method invocation the exception handler 1273 // would try to exit the monitor of synchronized methods which hasn't 1274 // been entered yet, we set the thread local variable 1275 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1276 // runtime, exception handling i.e. unlock_if_synchronized_method will 1277 // check this thread local flag. 1278 // This flag has two effects, one is to force an unwind in the topmost 1279 // interpreter frame and not perform an unlock while doing so. 1280 __ li(R0, 1); 1281 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1282 } 1283 generate_counter_incr(&invocation_counter_overflow); 1284 1285 BIND(continue_after_compile); 1286 } 1287 1288 bang_stack_shadow_pages(true); 1289 1290 if (inc_counter) { 1291 // Reset the _do_not_unlock_if_synchronized flag. 1292 if (synchronized) { 1293 __ li(R0, 0); 1294 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1295 } 1296 } 1297 1298 // access_flags = method->access_flags(); 1299 // Load access flags. 1300 assert(__ nonvolatile_accross_vthread_preemtion(access_flags), 1301 "access_flags not preserved"); 1302 // Type check. 1303 assert(2 == sizeof(AccessFlags), "unexpected field size"); 1304 __ lhz(access_flags, method_(access_flags)); 1305 1306 // We don't want to reload R19_method and access_flags after calls 1307 // to some helper functions. 1308 assert(R19_method->is_nonvolatile(), 1309 "R19_method must be a non-volatile register"); 1310 1311 // Check for synchronized methods. Must happen AFTER invocation counter 1312 // check, so method is not locked if counter overflows. 1313 1314 if (synchronized) { 1315 lock_method(access_flags, R11_scratch1, R12_scratch2, true); 1316 1317 // Update monitor in state. 1318 __ ld(R11_scratch1, 0, R1_SP); 1319 __ sub(R12_scratch2, R26_monitor, R11_scratch1); 1320 __ sradi(R12_scratch2, R12_scratch2, Interpreter::logStackElementSize); 1321 __ std(R12_scratch2, _ijava_state_neg(monitors), R11_scratch1); 1322 } 1323 1324 // jvmti/jvmpi support 1325 __ notify_method_entry(); 1326 1327 //============================================================================= 1328 // Get and call the signature handler. 1329 1330 __ ld(signature_handler_fd, method_(signature_handler)); 1331 Label call_signature_handler; 1332 1333 __ cmpdi(CR0, signature_handler_fd, 0); 1334 __ bne(CR0, call_signature_handler); 1335 1336 // Method has never been called. Either generate a specialized 1337 // handler or point to the slow one. 1338 // 1339 // Pass parameter 'false' to avoid exception check in call_VM. 1340 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false); 1341 1342 // Check for an exception while looking up the target method. If we 1343 // incurred one, bail. 1344 __ ld(pending_exception, thread_(pending_exception)); 1345 __ cmpdi(CR0, pending_exception, 0); 1346 __ bne(CR0, exception_return_sync_check); // Has pending exception. 1347 1348 // Reload signature handler, it may have been created/assigned in the meanwhile. 1349 __ ld(signature_handler_fd, method_(signature_handler)); 1350 __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below). 1351 1352 BIND(call_signature_handler); 1353 1354 // Before we call the signature handler we push a new frame to 1355 // protect the interpreter frame volatile registers when we return 1356 // from jni but before we can get back to Java. 1357 1358 // First set the frame anchor while the SP/FP registers are 1359 // convenient and the slow signature handler can use this same frame 1360 // anchor. 1361 1362 bool support_vthread_preemption = Continuations::enabled() && LockingMode != LM_LEGACY; 1363 1364 // We have a TOP_IJAVA_FRAME here, which belongs to us. 1365 Label last_java_pc; 1366 Label *resume_pc = support_vthread_preemption ? &last_java_pc : nullptr; 1367 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R3_ARG1/*tmp*/, resume_pc); 1368 1369 // Now the interpreter frame (and its call chain) have been 1370 // invalidated and flushed. We are now protected against eager 1371 // being enabled in native code. Even if it goes eager the 1372 // registers will be reloaded as clean and we will invalidate after 1373 // the call so no spurious flush should be possible. 1374 1375 // Call signature handler and pass locals address. 1376 // 1377 // Our signature handlers copy required arguments to the C stack 1378 // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13. 1379 __ mr(R3_ARG1, R18_locals); 1380 #if !defined(ABI_ELFv2) 1381 __ ld(signature_handler_fd, 0, signature_handler_fd); 1382 #endif 1383 1384 __ call_stub(signature_handler_fd); 1385 1386 assert(__ nonvolatile_accross_vthread_preemtion(result_handler_addr), 1387 "result_handler_addr not preserved"); 1388 // Save across call to native method. 1389 __ mr(result_handler_addr, R3_RET); 1390 __ ld(R11_scratch1, _abi0(callers_sp), R1_SP); // load FP 1391 1392 __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror. 1393 1394 // Set up fixed parameters and call the native method. 1395 // If the method is static, get mirror into R4_ARG2. 1396 { 1397 Label method_is_not_static; 1398 // Access_flags is non-volatile and still, no need to restore it. 1399 1400 // Restore access flags. 1401 __ testbitdi(CR0, R0, access_flags, JVM_ACC_STATIC_BIT); 1402 __ bfalse(CR0, method_is_not_static); 1403 1404 // Load mirror from interpreter frame (FP in R11_scratch1) 1405 __ ld(R21_tmp1, _ijava_state_neg(mirror), R11_scratch1); 1406 // R4_ARG2 = &state->_oop_temp; 1407 __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp)); 1408 __ std(R21_tmp1/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); 1409 BIND(method_is_not_static); 1410 } 1411 1412 // At this point, arguments have been copied off the stack into 1413 // their JNI positions. Oops are boxed in-place on the stack, with 1414 // handles copied to arguments. The result handler address is in a 1415 // register. 1416 1417 // Pass JNIEnv address as first parameter. 1418 __ addir(R3_ARG1, thread_(jni_environment)); 1419 1420 // Load the native_method entry before we change the thread state. 1421 __ ld(native_method_fd, method_(native_function)); 1422 1423 //============================================================================= 1424 // Transition from _thread_in_Java to _thread_in_native. As soon as 1425 // we make this change the safepoint code needs to be certain that 1426 // the last Java frame we established is good. The pc in that frame 1427 // just needs to be near here not an actual return address. 1428 1429 // We use release_store_fence to update values like the thread state, where 1430 // we don't want the current thread to continue until all our prior memory 1431 // accesses (including the new thread state) are visible to other threads. 1432 __ li(R0, _thread_in_native); 1433 __ release(); 1434 1435 // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 1436 __ stw(R0, thread_(thread_state)); 1437 1438 //============================================================================= 1439 // Call the native method. Argument registers must not have been 1440 // overwritten since "__ call_stub(signature_handler);" (except for 1441 // ARG1 and ARG2 for static methods). 1442 1443 if (support_vthread_preemption) { 1444 // result_handler_addr is a nonvolatile register. Its value will be preserved across 1445 // the native call but only if the call isn't preempted. To preserve its value even 1446 // in the case of preemption we save it in the lresult slot. It is restored at 1447 // resume_pc if, and only if the call was preempted. This works because only 1448 // j.l.Object::wait calls are preempted which don't return a result. 1449 __ std(result_handler_addr, _ijava_state_neg(lresult), R11_scratch1); 1450 } 1451 __ push_cont_fastpath(); 1452 __ call_c(native_method_fd); 1453 __ pop_cont_fastpath(); 1454 1455 __ li(R0, 0); 1456 __ ld(R11_scratch1, 0, R1_SP); 1457 __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 1458 __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 1459 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset 1460 1461 // Note: C++ interpreter needs the following here: 1462 // The frame_manager_lr field, which we use for setting the last 1463 // java frame, gets overwritten by the signature handler. Restore 1464 // it now. 1465 //__ get_PC_trash_LR(R11_scratch1); 1466 //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP); 1467 1468 // Because of GC R19_method may no longer be valid. 1469 1470 // Block, if necessary, before resuming in _thread_in_Java state. 1471 // In order for GC to work, don't clear the last_Java_sp until after 1472 // blocking. 1473 1474 //============================================================================= 1475 // Switch thread to "native transition" state before reading the 1476 // synchronization state. This additional state is necessary 1477 // because reading and testing the synchronization state is not 1478 // atomic w.r.t. GC, as this scenario demonstrates: Java thread A, 1479 // in _thread_in_native state, loads _not_synchronized and is 1480 // preempted. VM thread changes sync state to synchronizing and 1481 // suspends threads for GC. Thread A is resumed to finish this 1482 // native method, but doesn't block here since it didn't see any 1483 // synchronization in progress, and escapes. 1484 1485 // We use release_store_fence to update values like the thread state, where 1486 // we don't want the current thread to continue until all our prior memory 1487 // accesses (including the new thread state) are visible to other threads. 1488 __ li(R0/*thread_state*/, _thread_in_native_trans); 1489 __ release(); 1490 __ stw(R0/*thread_state*/, thread_(thread_state)); 1491 if (!UseSystemMemoryBarrier) { 1492 __ fence(); 1493 } 1494 1495 // Now before we return to java we must look for a current safepoint 1496 // (a new safepoint can not start since we entered native_trans). 1497 // We must check here because a current safepoint could be modifying 1498 // the callers registers right this moment. 1499 1500 // Acquire isn't strictly necessary here because of the fence, but 1501 // sync_state is declared to be volatile, so we do it anyway 1502 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 1503 1504 Label do_safepoint, sync_check_done; 1505 // No synchronization in progress nor yet synchronized. 1506 __ safepoint_poll(do_safepoint, sync_state, true /* at_return */, false /* in_nmethod */); 1507 1508 // Not suspended. 1509 // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 1510 __ lwz(suspend_flags, thread_(suspend_flags)); 1511 __ cmpwi(CR1, suspend_flags, 0); 1512 __ beq(CR1, sync_check_done); 1513 1514 __ bind(do_safepoint); 1515 __ isync(); 1516 // Block. We do the call directly and leave the current 1517 // last_Java_frame setup undisturbed. We must save any possible 1518 // native result across the call. No oop is present. 1519 1520 __ mr(R3_ARG1, R16_thread); 1521 __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); 1522 1523 __ bind(sync_check_done); 1524 1525 //============================================================================= 1526 // <<<<<< Back in Interpreter Frame >>>>> 1527 1528 // We are in thread_in_native_trans here and back in the normal 1529 // interpreter frame. We don't have to do anything special about 1530 // safepoints and we can switch to Java mode anytime we are ready. 1531 1532 // Note: frame::interpreter_frame_result has a dependency on how the 1533 // method result is saved across the call to post_method_exit. For 1534 // native methods it assumes that the non-FPU/non-void result is 1535 // saved in _native_lresult and a FPU result in _native_fresult. If 1536 // this changes then the interpreter_frame_result implementation 1537 // will need to be updated too. 1538 1539 // On PPC64, we have stored the result directly after the native call. 1540 1541 //============================================================================= 1542 // Back in Java 1543 1544 // We use release_store_fence to update values like the thread state, where 1545 // we don't want the current thread to continue until all our prior memory 1546 // accesses (including the new thread state) are visible to other threads. 1547 __ li(R0/*thread_state*/, _thread_in_Java); 1548 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 1549 __ stw(R0/*thread_state*/, thread_(thread_state)); 1550 1551 if (support_vthread_preemption) { 1552 // Check preemption for Object.wait() 1553 Label not_preempted; 1554 __ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); 1555 __ cmpdi(CR0, R0, 0); 1556 __ beq(CR0, not_preempted); 1557 __ mtlr(R0); 1558 __ li(R0, 0); 1559 __ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread); 1560 __ blr(); 1561 1562 // Execution will be resumed here when the vthread becomes runnable again. 1563 __ bind(*resume_pc); 1564 __ restore_after_resume(R11_scratch1 /* fp */); 1565 // We saved the result handler before the call 1566 __ ld(result_handler_addr, _ijava_state_neg(lresult), R11_scratch1); 1567 #ifdef ASSERT 1568 // Clobber result slots. Only native methods returning void can be preemted currently. 1569 __ load_const(R3_RET, UCONST64(0xbad01001)); 1570 __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 1571 __ std(R3_RET, _ijava_state_neg(fresult), R11_scratch1); 1572 // reset_last_Java_frame() below asserts that a last java sp is set 1573 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_sp_offset()), 1574 R16_thread, FILE_AND_LINE ": Last java sp should not be set when resuming"); 1575 __ std(R3_RET, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread); 1576 #endif 1577 __ bind(not_preempted); 1578 } 1579 1580 if (CheckJNICalls) { 1581 // clear_pending_jni_exception_check 1582 __ load_const_optimized(R0, 0L); 1583 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 1584 } 1585 1586 __ reset_last_Java_frame(); 1587 1588 // Jvmdi/jvmpi support. Whether we've got an exception pending or 1589 // not, and whether unlocking throws an exception or not, we notify 1590 // on native method exit. If we do have an exception, we'll end up 1591 // in the caller's context to handle it, so if we don't do the 1592 // notify here, we'll drop it on the floor. 1593 __ notify_method_exit(true/*native method*/, 1594 ilgl /*illegal state (not used for native methods)*/, 1595 InterpreterMacroAssembler::NotifyJVMTI, 1596 false /*check_exceptions*/); 1597 1598 //============================================================================= 1599 // Handle exceptions 1600 1601 if (synchronized) { 1602 __ unlock_object(R26_monitor); // Can also unlock methods. 1603 } 1604 1605 // Reset active handles after returning from native. 1606 // thread->active_handles()->clear(); 1607 __ ld(active_handles, thread_(active_handles)); 1608 // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 1609 __ li(R0, 0); 1610 __ stw(R0, in_bytes(JNIHandleBlock::top_offset()), active_handles); 1611 1612 Label exception_return_sync_check_already_unlocked; 1613 __ ld(R0/*pending_exception*/, thread_(pending_exception)); 1614 __ cmpdi(CR0, R0/*pending_exception*/, 0); 1615 __ bne(CR0, exception_return_sync_check_already_unlocked); 1616 1617 //----------------------------------------------------------------------------- 1618 // No exception pending. 1619 1620 // Move native method result back into proper registers and return. 1621 // Invoke result handler (may unbox/promote). 1622 __ ld(R11_scratch1, 0, R1_SP); 1623 __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 1624 __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 1625 __ call_stub(result_handler_addr); 1626 1627 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1628 1629 // Must use the return pc which was loaded from the caller's frame 1630 // as the VM uses return-pc-patching for deoptimization. 1631 __ mtlr(R0); 1632 __ blr(); 1633 1634 //----------------------------------------------------------------------------- 1635 // An exception is pending. We call into the runtime only if the 1636 // caller was not interpreted. If it was interpreted the 1637 // interpreter will do the correct thing. If it isn't interpreted 1638 // (call stub/compiled code) we will change our return and continue. 1639 1640 BIND(exception_return_sync_check); 1641 1642 if (synchronized) { 1643 __ unlock_object(R26_monitor); // Can also unlock methods. 1644 } 1645 BIND(exception_return_sync_check_already_unlocked); 1646 1647 const Register return_pc = R31; 1648 1649 __ ld(return_pc, 0, R1_SP); 1650 __ ld(return_pc, _abi0(lr), return_pc); 1651 1652 // Get the address of the exception handler. 1653 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1654 R16_thread, 1655 return_pc /* return pc */); 1656 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2); 1657 1658 // Load the PC of the exception handler into LR. 1659 __ mtlr(R3_RET); 1660 1661 // Load exception into R3_ARG1 and clear pending exception in thread. 1662 __ ld(R3_ARG1/*exception*/, thread_(pending_exception)); 1663 __ li(R4_ARG2, 0); 1664 __ std(R4_ARG2, thread_(pending_exception)); 1665 1666 // Load the original return pc into R4_ARG2. 1667 __ mr(R4_ARG2/*issuing_pc*/, return_pc); 1668 1669 // Return to exception handler. 1670 __ blr(); 1671 1672 //============================================================================= 1673 // Counter overflow. 1674 1675 if (inc_counter) { 1676 // Handle invocation counter overflow. 1677 __ bind(invocation_counter_overflow); 1678 1679 generate_counter_overflow(continue_after_compile); 1680 } 1681 1682 return entry; 1683 } 1684 1685 // Generic interpreted method entry to (asm) interpreter. 1686 // 1687 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1688 bool inc_counter = UseCompiler || CountCompiledCalls; 1689 address entry = __ pc(); 1690 // Generate the code to allocate the interpreter stack frame. 1691 Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame. 1692 Rsize_of_locals = R5_ARG3; // Written by generate_fixed_frame. 1693 1694 // Does also a stack check to assure this frame fits on the stack. 1695 generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals); 1696 1697 // -------------------------------------------------------------------------- 1698 // Zero out non-parameter locals. 1699 // Note: *Always* zero out non-parameter locals as Sparc does. It's not 1700 // worth to ask the flag, just do it. 1701 Register Rslot_addr = R6_ARG4, 1702 Rnum = R7_ARG5; 1703 Label Lno_locals, Lzero_loop; 1704 1705 // Set up the zeroing loop. 1706 __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals); 1707 __ subf(Rslot_addr, Rsize_of_parameters, R18_locals); 1708 __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize); 1709 __ beq(CR0, Lno_locals); 1710 __ li(R0, 0); 1711 __ mtctr(Rnum); 1712 1713 // The zero locals loop. 1714 __ bind(Lzero_loop); 1715 __ std(R0, 0, Rslot_addr); 1716 __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize); 1717 __ bdnz(Lzero_loop); 1718 1719 __ bind(Lno_locals); 1720 1721 // -------------------------------------------------------------------------- 1722 // Counter increment and overflow check. 1723 Label invocation_counter_overflow; 1724 Label continue_after_compile; 1725 if (inc_counter || ProfileInterpreter) { 1726 1727 Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1; 1728 if (synchronized) { 1729 // Since at this point in the method invocation the exception handler 1730 // would try to exit the monitor of synchronized methods which hasn't 1731 // been entered yet, we set the thread local variable 1732 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1733 // runtime, exception handling i.e. unlock_if_synchronized_method will 1734 // check this thread local flag. 1735 // This flag has two effects, one is to force an unwind in the topmost 1736 // interpreter frame and not perform an unlock while doing so. 1737 __ li(R0, 1); 1738 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1739 } 1740 1741 // Argument and return type profiling. 1742 __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4); 1743 1744 // Increment invocation counter and check for overflow. 1745 if (inc_counter) { 1746 generate_counter_incr(&invocation_counter_overflow); 1747 } 1748 1749 __ bind(continue_after_compile); 1750 } 1751 1752 bang_stack_shadow_pages(false); 1753 1754 if (inc_counter || ProfileInterpreter) { 1755 // Reset the _do_not_unlock_if_synchronized flag. 1756 if (synchronized) { 1757 __ li(R0, 0); 1758 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1759 } 1760 } 1761 1762 // -------------------------------------------------------------------------- 1763 // Locking of synchronized methods. Must happen AFTER invocation_counter 1764 // check and stack overflow check, so method is not locked if overflows. 1765 if (synchronized) { 1766 lock_method(R3_ARG1, R4_ARG2, R5_ARG3); 1767 } 1768 #ifdef ASSERT 1769 else { 1770 Label Lok; 1771 __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1772 __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED); 1773 __ asm_assert_eq("method needs synchronization"); 1774 __ bind(Lok); 1775 } 1776 #endif // ASSERT 1777 1778 // -------------------------------------------------------------------------- 1779 // JVMTI support 1780 __ notify_method_entry(); 1781 1782 // -------------------------------------------------------------------------- 1783 // Start executing instructions. 1784 __ dispatch_next(vtos); 1785 1786 // -------------------------------------------------------------------------- 1787 if (inc_counter) { 1788 // Handle invocation counter overflow. 1789 __ bind(invocation_counter_overflow); 1790 generate_counter_overflow(continue_after_compile); 1791 } 1792 return entry; 1793 } 1794 1795 // CRC32 Intrinsics. 1796 // 1797 // Contract on scratch and work registers. 1798 // ======================================= 1799 // 1800 // On ppc, the register set {R2..R12} is available in the interpreter as scratch/work registers. 1801 // You should, however, keep in mind that {R3_ARG1..R10_ARG8} is the C-ABI argument register set. 1802 // You can't rely on these registers across calls. 1803 // 1804 // The generators for CRC32_update and for CRC32_updateBytes use the 1805 // scratch/work register set internally, passing the work registers 1806 // as arguments to the MacroAssembler emitters as required. 1807 // 1808 // R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments. 1809 // Their contents is not constant but may change according to the requirements 1810 // of the emitted code. 1811 // 1812 // All other registers from the scratch/work register set are used "internally" 1813 // and contain garbage (i.e. unpredictable values) once blr() is reached. 1814 // Basically, only R3_RET contains a defined value which is the function result. 1815 // 1816 /** 1817 * Method entry for static native methods: 1818 * int java.util.zip.CRC32.update(int crc, int b) 1819 */ 1820 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 1821 assert(UseCRC32Intrinsics, "this intrinsic is not supported"); 1822 address start = __ pc(); // Remember stub start address (is rtn value). 1823 Label slow_path; 1824 1825 // Safepoint check 1826 const Register sync_state = R11_scratch1; 1827 __ safepoint_poll(slow_path, sync_state, false /* at_return */, false /* in_nmethod */); 1828 1829 // We don't generate local frame and don't align stack because 1830 // we not even call stub code (we generate the code inline) 1831 // and there is no safepoint on this path. 1832 1833 // Load java parameters. 1834 // R15_esp is callers operand stack pointer, i.e. it points to the parameters. 1835 const Register argP = R15_esp; 1836 const Register crc = R3_ARG1; // crc value 1837 const Register data = R4_ARG2; 1838 const Register table = R5_ARG3; // address of crc32 table 1839 1840 BLOCK_COMMENT("CRC32_update {"); 1841 1842 // Arguments are reversed on java expression stack 1843 #ifdef VM_LITTLE_ENDIAN 1844 int data_offs = 0+1*wordSize; // (stack) address of byte value. Emitter expects address, not value. 1845 // Being passed as an int, the single byte is at offset +0. 1846 #else 1847 int data_offs = 3+1*wordSize; // (stack) address of byte value. Emitter expects address, not value. 1848 // Being passed from java as an int, the single byte is at offset +3. 1849 #endif 1850 __ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register. 1851 __ lbz(data, data_offs, argP); // Byte from buffer, zero-extended. 1852 __ load_const_optimized(table, StubRoutines::crc_table_addr(), R0); 1853 __ kernel_crc32_singleByteReg(crc, data, table, true); 1854 1855 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1856 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1857 __ blr(); 1858 1859 // Generate a vanilla native entry as the slow path. 1860 BLOCK_COMMENT("} CRC32_update"); 1861 BIND(slow_path); 1862 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); 1863 return start; 1864 } 1865 1866 /** 1867 * Method entry for static native methods: 1868 * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len) 1869 * int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len) 1870 */ 1871 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1872 assert(UseCRC32Intrinsics, "this intrinsic is not supported"); 1873 address start = __ pc(); // Remember stub start address (is rtn value). 1874 Label slow_path; 1875 1876 // Safepoint check 1877 const Register sync_state = R11_scratch1; 1878 __ safepoint_poll(slow_path, sync_state, false /* at_return */, false /* in_nmethod */); 1879 1880 // We don't generate local frame and don't align stack because 1881 // we not even call stub code (we generate the code inline) 1882 // and there is no safepoint on this path. 1883 1884 // Load parameters. 1885 // Z_esp is callers operand stack pointer, i.e. it points to the parameters. 1886 const Register argP = R15_esp; 1887 const Register crc = R3_ARG1; // crc value 1888 const Register data = R4_ARG2; // address of java byte array 1889 const Register dataLen = R5_ARG3; // source data len 1890 const Register tmp = R11_scratch1; 1891 1892 // Arguments are reversed on java expression stack. 1893 // Calculate address of start element. 1894 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct". 1895 BLOCK_COMMENT("CRC32_updateByteBuffer {"); 1896 // crc @ (SP + 5W) (32bit) 1897 // buf @ (SP + 3W) (64bit ptr to long array) 1898 // off @ (SP + 2W) (32bit) 1899 // dataLen @ (SP + 1W) (32bit) 1900 // data = buf + off 1901 __ ld( data, 3*wordSize, argP); // start of byte buffer 1902 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1903 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1904 __ lwz( crc, 5*wordSize, argP); // current crc state 1905 __ add( data, data, tmp); // Add byte buffer offset. 1906 } else { // Used for "updateBytes update". 1907 BLOCK_COMMENT("CRC32_updateBytes {"); 1908 // crc @ (SP + 4W) (32bit) 1909 // buf @ (SP + 3W) (64bit ptr to byte array) 1910 // off @ (SP + 2W) (32bit) 1911 // dataLen @ (SP + 1W) (32bit) 1912 // data = buf + off + base_offset 1913 __ ld( data, 3*wordSize, argP); // start of byte buffer 1914 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1915 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1916 __ add( data, data, tmp); // add byte buffer offset 1917 __ lwz( crc, 4*wordSize, argP); // current crc state 1918 __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 1919 } 1920 1921 __ crc32(crc, data, dataLen, R2, R6, R7, R8, R9, R10, R11, R12, false); 1922 1923 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1924 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1925 __ blr(); 1926 1927 // Generate a vanilla native entry as the slow path. 1928 BLOCK_COMMENT("} CRC32_updateBytes(Buffer)"); 1929 BIND(slow_path); 1930 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); 1931 return start; 1932 } 1933 1934 1935 /** 1936 * Method entry for intrinsic-candidate (non-native) methods: 1937 * int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end) 1938 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end) 1939 * Unlike CRC32, CRC32C does not have any methods marked as native 1940 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses 1941 **/ 1942 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1943 assert(UseCRC32CIntrinsics, "this intrinsic is not supported"); 1944 address start = __ pc(); // Remember stub start address (is rtn value). 1945 1946 // We don't generate local frame and don't align stack because 1947 // we not even call stub code (we generate the code inline) 1948 // and there is no safepoint on this path. 1949 1950 // Load parameters. 1951 // Z_esp is callers operand stack pointer, i.e. it points to the parameters. 1952 const Register argP = R15_esp; 1953 const Register crc = R3_ARG1; // crc value 1954 const Register data = R4_ARG2; // address of java byte array 1955 const Register dataLen = R5_ARG3; // source data len 1956 const Register tmp = R11_scratch1; 1957 1958 // Arguments are reversed on java expression stack. 1959 // Calculate address of start element. 1960 if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateDirectByteBuffer". 1961 BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {"); 1962 // crc @ (SP + 5W) (32bit) 1963 // buf @ (SP + 3W) (64bit ptr to long array) 1964 // off @ (SP + 2W) (32bit) 1965 // dataLen @ (SP + 1W) (32bit) 1966 // data = buf + off 1967 __ ld( data, 3*wordSize, argP); // start of byte buffer 1968 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1969 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1970 __ lwz( crc, 5*wordSize, argP); // current crc state 1971 __ add( data, data, tmp); // Add byte buffer offset. 1972 __ sub( dataLen, dataLen, tmp); // (end_index - offset) 1973 } else { // Used for "updateBytes update". 1974 BLOCK_COMMENT("CRC32C_updateBytes {"); 1975 // crc @ (SP + 4W) (32bit) 1976 // buf @ (SP + 3W) (64bit ptr to byte array) 1977 // off @ (SP + 2W) (32bit) 1978 // dataLen @ (SP + 1W) (32bit) 1979 // data = buf + off + base_offset 1980 __ ld( data, 3*wordSize, argP); // start of byte buffer 1981 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1982 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1983 __ add( data, data, tmp); // add byte buffer offset 1984 __ sub( dataLen, dataLen, tmp); // (end_index - offset) 1985 __ lwz( crc, 4*wordSize, argP); // current crc state 1986 __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 1987 } 1988 1989 __ crc32(crc, data, dataLen, R2, R6, R7, R8, R9, R10, R11, R12, true); 1990 1991 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1992 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1993 __ blr(); 1994 1995 BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}"); 1996 return start; 1997 } 1998 1999 // Not supported 2000 address TemplateInterpreterGenerator::generate_currentThread() { return nullptr; } 2001 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; } 2002 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; } 2003 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; } 2004 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; } 2005 2006 // ============================================================================= 2007 // Exceptions 2008 2009 void TemplateInterpreterGenerator::generate_throw_exception() { 2010 Register Rexception = R17_tos, 2011 Rcontinuation = R3_RET; 2012 2013 // -------------------------------------------------------------------------- 2014 // Entry point if an method returns with a pending exception (rethrow). 2015 Interpreter::_rethrow_exception_entry = __ pc(); 2016 { 2017 __ restore_interpreter_state(R11_scratch1, false /*bcp_and_mdx_only*/, true /*restore_top_frame_sp*/); 2018 2019 // Compiled code destroys templateTableBase, reload. 2020 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 2021 } 2022 2023 // Entry point if a interpreted method throws an exception (throw). 2024 Interpreter::_throw_exception_entry = __ pc(); 2025 { 2026 __ mr(Rexception, R3_RET); 2027 2028 __ verify_oop(Rexception); 2029 2030 // Expression stack must be empty before entering the VM in case of an exception. 2031 __ empty_expression_stack(); 2032 // Find exception handler address and preserve exception oop. 2033 // Call C routine to find handler and jump to it. 2034 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception); 2035 __ mtctr(Rcontinuation); 2036 // Push exception for exception handler bytecodes. 2037 __ push_ptr(Rexception); 2038 2039 // Jump to exception handler (may be remove activation entry!). 2040 __ bctr(); 2041 } 2042 2043 // If the exception is not handled in the current frame the frame is 2044 // removed and the exception is rethrown (i.e. exception 2045 // continuation is _rethrow_exception). 2046 // 2047 // Note: At this point the bci is still the bxi for the instruction 2048 // which caused the exception and the expression stack is 2049 // empty. Thus, for any VM calls at this point, GC will find a legal 2050 // oop map (with empty expression stack). 2051 2052 // In current activation 2053 // tos: exception 2054 // bcp: exception bcp 2055 2056 // -------------------------------------------------------------------------- 2057 // JVMTI PopFrame support 2058 2059 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 2060 { 2061 // Set the popframe_processing bit in popframe_condition indicating that we are 2062 // currently handling popframe, so that call_VMs that may happen later do not 2063 // trigger new popframe handling cycles. 2064 __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 2065 __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit); 2066 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 2067 2068 // Empty the expression stack, as in normal exception handling. 2069 __ empty_expression_stack(); 2070 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 2071 2072 // Check to see whether we are returning to a deoptimized frame. 2073 // (The PopFrame call ensures that the caller of the popped frame is 2074 // either interpreted or compiled and deoptimizes it if compiled.) 2075 // Note that we don't compare the return PC against the 2076 // deoptimization blob's unpack entry because of the presence of 2077 // adapter frames in C2. 2078 Label Lcaller_not_deoptimized; 2079 Register return_pc = R3_ARG1; 2080 __ ld(return_pc, 0, R1_SP); 2081 __ ld(return_pc, _abi0(lr), return_pc); 2082 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc); 2083 __ cmpdi(CR0, R3_RET, 0); 2084 __ bne(CR0, Lcaller_not_deoptimized); 2085 2086 // The deoptimized case. 2087 // In this case, we can't call dispatch_next() after the frame is 2088 // popped, but instead must save the incoming arguments and restore 2089 // them after deoptimization has occurred. 2090 __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method); 2091 __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2); 2092 __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize); 2093 __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize); 2094 __ subf(R5_ARG3, R4_ARG2, R5_ARG3); 2095 // Save these arguments. 2096 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3); 2097 2098 // Inform deoptimization that it is responsible for restoring these arguments. 2099 __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit); 2100 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 2101 2102 // Return from the current method into the deoptimization blob. Will eventually 2103 // end up in the deopt interpreter entry, deoptimization prepared everything that 2104 // we will reexecute the call that called us. 2105 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2); 2106 __ mtlr(return_pc); 2107 __ pop_cont_fastpath(); 2108 __ blr(); 2109 2110 // The non-deoptimized case. 2111 __ bind(Lcaller_not_deoptimized); 2112 2113 // Clear the popframe condition flag. 2114 __ li(R0, 0); 2115 __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 2116 2117 // Get out of the current method and re-execute the call that called us. 2118 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 2119 __ pop_cont_fastpath(); 2120 __ restore_interpreter_state(R11_scratch1, false /*bcp_and_mdx_only*/, true /*restore_top_frame_sp*/); 2121 if (ProfileInterpreter) { 2122 __ set_method_data_pointer_for_bcp(); 2123 __ ld(R11_scratch1, 0, R1_SP); 2124 __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1); 2125 } 2126 #if INCLUDE_JVMTI 2127 Label L_done; 2128 2129 __ lbz(R11_scratch1, 0, R14_bcp); 2130 __ cmpwi(CR0, R11_scratch1, Bytecodes::_invokestatic); 2131 __ bne(CR0, L_done); 2132 2133 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 2134 // Detect such a case in the InterpreterRuntime function and return the member name argument, or null. 2135 __ ld(R4_ARG2, 0, R18_locals); 2136 __ call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp); 2137 2138 __ cmpdi(CR0, R4_ARG2, 0); 2139 __ beq(CR0, L_done); 2140 __ std(R4_ARG2, wordSize, R15_esp); 2141 __ bind(L_done); 2142 #endif // INCLUDE_JVMTI 2143 __ dispatch_next(vtos); 2144 } 2145 // end of JVMTI PopFrame support 2146 2147 // -------------------------------------------------------------------------- 2148 // Remove activation exception entry. 2149 // This is jumped to if an interpreted method can't handle an exception itself 2150 // (we come from the throw/rethrow exception entry above). We're going to call 2151 // into the VM to find the exception handler in the caller, pop the current 2152 // frame and return the handler we calculated. 2153 Interpreter::_remove_activation_entry = __ pc(); 2154 { 2155 __ pop_ptr(Rexception); 2156 __ verify_oop(Rexception); 2157 __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread); 2158 2159 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true); 2160 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false); 2161 2162 __ get_vm_result(Rexception); 2163 2164 // We are done with this activation frame; find out where to go next. 2165 // The continuation point will be an exception handler, which expects 2166 // the following registers set up: 2167 // 2168 // RET: exception oop 2169 // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled. 2170 2171 Register return_pc = R31; // Needs to survive the runtime call. 2172 __ ld(return_pc, 0, R1_SP); 2173 __ ld(return_pc, _abi0(lr), return_pc); 2174 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc); 2175 2176 // Remove the current activation. 2177 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 2178 __ pop_cont_fastpath(); 2179 2180 __ mr(R4_ARG2, return_pc); 2181 __ mtlr(R3_RET); 2182 __ mr(R3_RET, Rexception); 2183 __ blr(); 2184 } 2185 } 2186 2187 // JVMTI ForceEarlyReturn support. 2188 // Returns "in the middle" of a method with a "fake" return value. 2189 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 2190 2191 Register Rscratch1 = R11_scratch1, 2192 Rscratch2 = R12_scratch2; 2193 2194 address entry = __ pc(); 2195 __ empty_expression_stack(); 2196 2197 __ load_earlyret_value(state, Rscratch1); 2198 2199 __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread); 2200 // Clear the earlyret state. 2201 __ li(R0, 0); 2202 __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1); 2203 2204 __ remove_activation(state, false, false); 2205 // Copied from TemplateTable::_return. 2206 // Restoration of lr done by remove_activation. 2207 switch (state) { 2208 // Narrow result if state is itos but result type is smaller. 2209 case btos: 2210 case ztos: 2211 case ctos: 2212 case stos: 2213 case itos: __ narrow(R17_tos); /* fall through */ 2214 case ltos: 2215 case atos: __ mr(R3_RET, R17_tos); break; 2216 case ftos: 2217 case dtos: __ fmr(F1_RET, F15_ftos); break; 2218 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2219 // to get visible before the reference to the object gets stored anywhere. 2220 __ membar(Assembler::StoreStore); break; 2221 default : ShouldNotReachHere(); 2222 } 2223 __ blr(); 2224 2225 return entry; 2226 } // end of ForceEarlyReturn support 2227 2228 //----------------------------------------------------------------------------- 2229 // Helper for vtos entry point generation 2230 2231 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 2232 address& bep, 2233 address& cep, 2234 address& sep, 2235 address& aep, 2236 address& iep, 2237 address& lep, 2238 address& fep, 2239 address& dep, 2240 address& vep) { 2241 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 2242 Label L; 2243 2244 aep = __ pc(); __ push_ptr(); __ b(L); 2245 fep = __ pc(); __ push_f(); __ b(L); 2246 dep = __ pc(); __ push_d(); __ b(L); 2247 lep = __ pc(); __ push_l(); __ b(L); 2248 __ align(32, 12, 24); // align L 2249 bep = cep = sep = 2250 iep = __ pc(); __ push_i(); 2251 vep = __ pc(); 2252 __ bind(L); 2253 generate_and_dispatch(t); 2254 } 2255 2256 //----------------------------------------------------------------------------- 2257 2258 // Non-product code 2259 #ifndef PRODUCT 2260 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2261 //__ flush_bundle(); 2262 address entry = __ pc(); 2263 2264 const char *bname = nullptr; 2265 uint tsize = 0; 2266 switch(state) { 2267 case ftos: 2268 bname = "trace_code_ftos {"; 2269 tsize = 2; 2270 break; 2271 case btos: 2272 bname = "trace_code_btos {"; 2273 tsize = 2; 2274 break; 2275 case ztos: 2276 bname = "trace_code_ztos {"; 2277 tsize = 2; 2278 break; 2279 case ctos: 2280 bname = "trace_code_ctos {"; 2281 tsize = 2; 2282 break; 2283 case stos: 2284 bname = "trace_code_stos {"; 2285 tsize = 2; 2286 break; 2287 case itos: 2288 bname = "trace_code_itos {"; 2289 tsize = 2; 2290 break; 2291 case ltos: 2292 bname = "trace_code_ltos {"; 2293 tsize = 3; 2294 break; 2295 case atos: 2296 bname = "trace_code_atos {"; 2297 tsize = 2; 2298 break; 2299 case vtos: 2300 // Note: In case of vtos, the topmost of stack value could be a int or doubl 2301 // In case of a double (2 slots) we won't see the 2nd stack value. 2302 // Maybe we simply should print the topmost 3 stack slots to cope with the problem. 2303 bname = "trace_code_vtos {"; 2304 tsize = 2; 2305 2306 break; 2307 case dtos: 2308 bname = "trace_code_dtos {"; 2309 tsize = 3; 2310 break; 2311 default: 2312 ShouldNotReachHere(); 2313 } 2314 BLOCK_COMMENT(bname); 2315 2316 // Support short-cut for TraceBytecodesAt. 2317 // Don't call into the VM if we don't want to trace to speed up things. 2318 Label Lskip_vm_call; 2319 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 2320 int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true); 2321 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 2322 __ ld(R11_scratch1, offs1, R11_scratch1); 2323 __ lwa(R12_scratch2, offs2, R12_scratch2); 2324 __ cmpd(CR0, R12_scratch2, R11_scratch1); 2325 __ blt(CR0, Lskip_vm_call); 2326 } 2327 2328 __ push(state); 2329 // Load 2 topmost expression stack values. 2330 __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp); 2331 __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp); 2332 __ mflr(R31); 2333 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false); 2334 __ mtlr(R31); 2335 __ pop(state); 2336 2337 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 2338 __ bind(Lskip_vm_call); 2339 } 2340 __ blr(); 2341 BLOCK_COMMENT("} trace_code"); 2342 return entry; 2343 } 2344 2345 void TemplateInterpreterGenerator::count_bytecode() { 2346 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true); 2347 __ lwz(R12_scratch2, offs, R11_scratch1); 2348 __ addi(R12_scratch2, R12_scratch2, 1); 2349 __ stw(R12_scratch2, offs, R11_scratch1); 2350 } 2351 2352 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2353 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true); 2354 __ lwz(R12_scratch2, offs, R11_scratch1); 2355 __ addi(R12_scratch2, R12_scratch2, 1); 2356 __ stw(R12_scratch2, offs, R11_scratch1); 2357 } 2358 2359 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2360 const Register addr = R11_scratch1, 2361 tmp = R12_scratch2; 2362 // Get index, shift out old bytecode, bring in new bytecode, and store it. 2363 // _index = (_index >> log2_number_of_codes) | 2364 // (bytecode << log2_number_of_codes); 2365 int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true); 2366 __ lwz(tmp, offs1, addr); 2367 __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes); 2368 __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 2369 __ stw(tmp, offs1, addr); 2370 2371 // Bump bucket contents. 2372 // _counters[_index] ++; 2373 int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true); 2374 __ sldi(tmp, tmp, LogBytesPerInt); 2375 __ add(addr, tmp, addr); 2376 __ lwz(tmp, offs2, addr); 2377 __ addi(tmp, tmp, 1); 2378 __ stw(tmp, offs2, addr); 2379 } 2380 2381 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2382 // Call a little run-time stub to avoid blow-up for each bytecode. 2383 // The run-time runtime saves the right registers, depending on 2384 // the tosca in-state for the given template. 2385 2386 assert(Interpreter::trace_code(t->tos_in()) != nullptr, 2387 "entry must have been generated"); 2388 2389 // Note: we destroy LR here. 2390 __ bl(Interpreter::trace_code(t->tos_in())); 2391 } 2392 2393 void TemplateInterpreterGenerator::stop_interpreter_at() { 2394 Label L; 2395 int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true); 2396 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 2397 __ ld(R11_scratch1, offs1, R11_scratch1); 2398 __ lwa(R12_scratch2, offs2, R12_scratch2); 2399 __ cmpd(CR0, R12_scratch2, R11_scratch1); 2400 __ bne(CR0, L); 2401 __ illtrap(); 2402 __ bind(L); 2403 } 2404 2405 #endif // !PRODUCT