1 /* 2 * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2015, 2024 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "classfile/javaClasses.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/bytecodeHistogram.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "interpreter/interp_masm.hpp" 35 #include "interpreter/templateInterpreterGenerator.hpp" 36 #include "interpreter/templateTable.hpp" 37 #include "oops/arrayOop.hpp" 38 #include "oops/method.hpp" 39 #include "oops/methodCounters.hpp" 40 #include "oops/methodData.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/resolvedIndyEntry.hpp" 43 #include "oops/resolvedMethodEntry.hpp" 44 #include "prims/jvmtiExport.hpp" 45 #include "prims/jvmtiThreadState.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/frame.inline.hpp" 49 #include "runtime/jniHandles.hpp" 50 #include "runtime/sharedRuntime.hpp" 51 #include "runtime/stubRoutines.hpp" 52 #include "runtime/synchronizer.hpp" 53 #include "runtime/timer.hpp" 54 #include "runtime/vframeArray.hpp" 55 #include "runtime/vm_version.hpp" 56 #include "utilities/debug.hpp" 57 #include "utilities/macros.hpp" 58 59 #undef __ 60 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 61 62 // Size of interpreter code. Increase if too small. Interpreter will 63 // fail with a guarantee ("not enough space for interpreter generation"); 64 // if too small. 65 // Run with +PrintInterpreter to get the VM to print out the size. 66 // Max size with JVMTI 67 int TemplateInterpreter::InterpreterCodeSize = 256*K; 68 69 #ifdef PRODUCT 70 #define BLOCK_COMMENT(str) /* nothing */ 71 #else 72 #define BLOCK_COMMENT(str) __ block_comment(str) 73 #endif 74 75 #define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":") 76 77 //----------------------------------------------------------------------------- 78 79 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 80 // Slow_signature handler that respects the PPC C calling conventions. 81 // 82 // We get called by the native entry code with our output register 83 // area == 8. First we call InterpreterRuntime::get_result_handler 84 // to copy the pointer to the signature string temporarily to the 85 // first C-argument and to return the result_handler in 86 // R3_RET. Since native_entry will copy the jni-pointer to the 87 // first C-argument slot later on, it is OK to occupy this slot 88 // temporarily. Then we copy the argument list on the java 89 // expression stack into native varargs format on the native stack 90 // and load arguments into argument registers. Integer arguments in 91 // the varargs vector will be sign-extended to 8 bytes. 92 // 93 // On entry: 94 // R3_ARG1 - intptr_t* Address of java argument list in memory. 95 // R15_prev_state - BytecodeInterpreter* Address of interpreter state for 96 // this method 97 // R19_method 98 // 99 // On exit (just before return instruction): 100 // R3_RET - contains the address of the result_handler. 101 // R4_ARG2 - is not updated for static methods and contains "this" otherwise. 102 // R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double, 103 // ARGi contains this argument. Otherwise, ARGi is not updated. 104 // F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double. 105 106 const int LogSizeOfTwoInstructions = 3; 107 108 // FIXME: use Argument:: GL: Argument names different numbers! 109 const int max_fp_register_arguments = 13; 110 const int max_int_register_arguments = 6; // first 2 are reserved 111 112 const Register arg_java = R21_tmp1; 113 const Register arg_c = R22_tmp2; 114 const Register signature = R23_tmp3; // is string 115 const Register sig_byte = R24_tmp4; 116 const Register fpcnt = R25_tmp5; 117 const Register argcnt = R26_tmp6; 118 const Register intSlot = R27_tmp7; 119 const Register target_sp = R28_tmp8; 120 const FloatRegister floatSlot = F0; 121 122 address entry = __ function_entry(); 123 124 __ save_LR(R0); 125 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 126 // We use target_sp for storing arguments in the C frame. 127 __ mr(target_sp, R1_SP); 128 __ push_frame_reg_args_nonvolatiles(0, R11_scratch1); 129 130 __ mr(arg_java, R3_ARG1); 131 132 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method); 133 134 // Signature is in R3_RET. Signature is callee saved. 135 __ mr(signature, R3_RET); 136 137 // Get the result handler. 138 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method); 139 140 { 141 Label L; 142 // test if static 143 // _access_flags._flags must be at offset 0. 144 // TODO PPC port: requires change in shared code. 145 //assert(in_bytes(AccessFlags::flags_offset()) == 0, 146 // "MethodDesc._access_flags == MethodDesc._access_flags._flags"); 147 // _access_flags must be a 32 bit value. 148 assert(sizeof(AccessFlags) == 4, "wrong size"); 149 __ lwa(R11_scratch1/*access_flags*/, method_(access_flags)); 150 // testbit with condition register. 151 __ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT); 152 __ btrue(CCR0, L); 153 // For non-static functions, pass "this" in R4_ARG2 and copy it 154 // to 2nd C-arg slot. 155 // We need to box the Java object here, so we use arg_java 156 // (address of current Java stack slot) as argument and don't 157 // dereference it as in case of ints, floats, etc. 158 __ mr(R4_ARG2, arg_java); 159 __ addi(arg_java, arg_java, -BytesPerWord); 160 __ std(R4_ARG2, _abi0(carg_2), target_sp); 161 __ bind(L); 162 } 163 164 // Will be incremented directly after loop_start. argcnt=0 165 // corresponds to 3rd C argument. 166 __ li(argcnt, -1); 167 // arg_c points to 3rd C argument 168 __ addi(arg_c, target_sp, _abi0(carg_3)); 169 // no floating-point args parsed so far 170 __ li(fpcnt, 0); 171 172 Label move_intSlot_to_ARG, move_floatSlot_to_FARG; 173 Label loop_start, loop_end; 174 Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed; 175 176 // signature points to '(' at entry 177 #ifdef ASSERT 178 __ lbz(sig_byte, 0, signature); 179 __ cmplwi(CCR0, sig_byte, '('); 180 __ bne(CCR0, do_dontreachhere); 181 #endif 182 183 __ bind(loop_start); 184 185 __ addi(argcnt, argcnt, 1); 186 __ lbzu(sig_byte, 1, signature); 187 188 __ cmplwi(CCR0, sig_byte, ')'); // end of signature 189 __ beq(CCR0, loop_end); 190 191 __ cmplwi(CCR0, sig_byte, 'B'); // byte 192 __ beq(CCR0, do_int); 193 194 __ cmplwi(CCR0, sig_byte, 'C'); // char 195 __ beq(CCR0, do_int); 196 197 __ cmplwi(CCR0, sig_byte, 'D'); // double 198 __ beq(CCR0, do_double); 199 200 __ cmplwi(CCR0, sig_byte, 'F'); // float 201 __ beq(CCR0, do_float); 202 203 __ cmplwi(CCR0, sig_byte, 'I'); // int 204 __ beq(CCR0, do_int); 205 206 __ cmplwi(CCR0, sig_byte, 'J'); // long 207 __ beq(CCR0, do_long); 208 209 __ cmplwi(CCR0, sig_byte, 'S'); // short 210 __ beq(CCR0, do_int); 211 212 __ cmplwi(CCR0, sig_byte, 'Z'); // boolean 213 __ beq(CCR0, do_int); 214 215 __ cmplwi(CCR0, sig_byte, 'L'); // object 216 __ beq(CCR0, do_object); 217 218 __ cmplwi(CCR0, sig_byte, '['); // array 219 __ beq(CCR0, do_array); 220 221 // __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type 222 // __ beq(CCR0, do_void); 223 224 __ bind(do_dontreachhere); 225 226 __ unimplemented("ShouldNotReachHere in slow_signature_handler"); 227 228 __ bind(do_array); 229 230 { 231 Label start_skip, end_skip; 232 233 __ bind(start_skip); 234 __ lbzu(sig_byte, 1, signature); 235 __ cmplwi(CCR0, sig_byte, '['); 236 __ beq(CCR0, start_skip); // skip further brackets 237 __ cmplwi(CCR0, sig_byte, '9'); 238 __ bgt(CCR0, end_skip); // no optional size 239 __ cmplwi(CCR0, sig_byte, '0'); 240 __ bge(CCR0, start_skip); // skip optional size 241 __ bind(end_skip); 242 243 __ cmplwi(CCR0, sig_byte, 'L'); 244 __ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped 245 __ b(do_boxed); // otherwise, go directly to do_boxed 246 } 247 248 __ bind(do_object); 249 { 250 Label L; 251 __ bind(L); 252 __ lbzu(sig_byte, 1, signature); 253 __ cmplwi(CCR0, sig_byte, ';'); 254 __ bne(CCR0, L); 255 } 256 // Need to box the Java object here, so we use arg_java (address of 257 // current Java stack slot) as argument and don't dereference it as 258 // in case of ints, floats, etc. 259 Label do_null; 260 __ bind(do_boxed); 261 __ ld(R0,0, arg_java); 262 __ cmpdi(CCR0, R0, 0); 263 __ li(intSlot,0); 264 __ beq(CCR0, do_null); 265 __ mr(intSlot, arg_java); 266 __ bind(do_null); 267 __ std(intSlot, 0, arg_c); 268 __ addi(arg_java, arg_java, -BytesPerWord); 269 __ addi(arg_c, arg_c, BytesPerWord); 270 __ cmplwi(CCR0, argcnt, max_int_register_arguments); 271 __ blt(CCR0, move_intSlot_to_ARG); 272 __ b(loop_start); 273 274 __ bind(do_int); 275 __ lwa(intSlot, 0, arg_java); 276 __ std(intSlot, 0, arg_c); 277 __ addi(arg_java, arg_java, -BytesPerWord); 278 __ addi(arg_c, arg_c, BytesPerWord); 279 __ cmplwi(CCR0, argcnt, max_int_register_arguments); 280 __ blt(CCR0, move_intSlot_to_ARG); 281 __ b(loop_start); 282 283 __ bind(do_long); 284 __ ld(intSlot, -BytesPerWord, arg_java); 285 __ std(intSlot, 0, arg_c); 286 __ addi(arg_java, arg_java, - 2 * BytesPerWord); 287 __ addi(arg_c, arg_c, BytesPerWord); 288 __ cmplwi(CCR0, argcnt, max_int_register_arguments); 289 __ blt(CCR0, move_intSlot_to_ARG); 290 __ b(loop_start); 291 292 __ bind(do_float); 293 __ lfs(floatSlot, 0, arg_java); 294 __ stfs(floatSlot, Argument::float_on_stack_offset_in_bytes_c, arg_c); 295 __ addi(arg_java, arg_java, -BytesPerWord); 296 __ addi(arg_c, arg_c, BytesPerWord); 297 __ cmplwi(CCR0, fpcnt, max_fp_register_arguments); 298 __ blt(CCR0, move_floatSlot_to_FARG); 299 __ b(loop_start); 300 301 __ bind(do_double); 302 __ lfd(floatSlot, - BytesPerWord, arg_java); 303 __ stfd(floatSlot, 0, arg_c); 304 __ addi(arg_java, arg_java, - 2 * BytesPerWord); 305 __ addi(arg_c, arg_c, BytesPerWord); 306 __ cmplwi(CCR0, fpcnt, max_fp_register_arguments); 307 __ blt(CCR0, move_floatSlot_to_FARG); 308 __ b(loop_start); 309 310 __ bind(loop_end); 311 312 __ pop_frame(); 313 __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 314 __ restore_LR(R0); 315 316 __ blr(); 317 318 Label move_int_arg, move_float_arg; 319 __ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions) 320 __ mr(R5_ARG3, intSlot); __ b(loop_start); 321 __ mr(R6_ARG4, intSlot); __ b(loop_start); 322 __ mr(R7_ARG5, intSlot); __ b(loop_start); 323 __ mr(R8_ARG6, intSlot); __ b(loop_start); 324 __ mr(R9_ARG7, intSlot); __ b(loop_start); 325 __ mr(R10_ARG8, intSlot); __ b(loop_start); 326 327 __ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions) 328 __ fmr(F1_ARG1, floatSlot); __ b(loop_start); 329 __ fmr(F2_ARG2, floatSlot); __ b(loop_start); 330 __ fmr(F3_ARG3, floatSlot); __ b(loop_start); 331 __ fmr(F4_ARG4, floatSlot); __ b(loop_start); 332 __ fmr(F5_ARG5, floatSlot); __ b(loop_start); 333 __ fmr(F6_ARG6, floatSlot); __ b(loop_start); 334 __ fmr(F7_ARG7, floatSlot); __ b(loop_start); 335 __ fmr(F8_ARG8, floatSlot); __ b(loop_start); 336 __ fmr(F9_ARG9, floatSlot); __ b(loop_start); 337 __ fmr(F10_ARG10, floatSlot); __ b(loop_start); 338 __ fmr(F11_ARG11, floatSlot); __ b(loop_start); 339 __ fmr(F12_ARG12, floatSlot); __ b(loop_start); 340 __ fmr(F13_ARG13, floatSlot); __ b(loop_start); 341 342 __ bind(move_intSlot_to_ARG); 343 __ sldi(R0, argcnt, LogSizeOfTwoInstructions); 344 __ load_const(R11_scratch1, move_int_arg); // Label must be bound here. 345 __ add(R11_scratch1, R0, R11_scratch1); 346 __ mtctr(R11_scratch1/*branch_target*/); 347 __ bctr(); 348 __ bind(move_floatSlot_to_FARG); 349 __ sldi(R0, fpcnt, LogSizeOfTwoInstructions); 350 __ addi(fpcnt, fpcnt, 1); 351 __ load_const(R11_scratch1, move_float_arg); // Label must be bound here. 352 __ add(R11_scratch1, R0, R11_scratch1); 353 __ mtctr(R11_scratch1/*branch_target*/); 354 __ bctr(); 355 356 return entry; 357 } 358 359 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 360 // 361 // Registers alive 362 // R3_RET 363 // LR 364 // 365 // Registers updated 366 // R3_RET 367 // 368 369 Label done; 370 address entry = __ pc(); 371 372 switch (type) { 373 case T_BOOLEAN: 374 // convert !=0 to 1 375 __ normalize_bool(R3_RET); 376 break; 377 case T_BYTE: 378 // sign extend 8 bits 379 __ extsb(R3_RET, R3_RET); 380 break; 381 case T_CHAR: 382 // zero extend 16 bits 383 __ clrldi(R3_RET, R3_RET, 48); 384 break; 385 case T_SHORT: 386 // sign extend 16 bits 387 __ extsh(R3_RET, R3_RET); 388 break; 389 case T_INT: 390 // sign extend 32 bits 391 __ extsw(R3_RET, R3_RET); 392 break; 393 case T_LONG: 394 break; 395 case T_OBJECT: 396 // JNIHandles::resolve result. 397 __ resolve_jobject(R3_RET, R11_scratch1, R31, MacroAssembler::PRESERVATION_FRAME_LR); // kills R31 398 break; 399 case T_FLOAT: 400 break; 401 case T_DOUBLE: 402 break; 403 case T_VOID: 404 break; 405 default: ShouldNotReachHere(); 406 } 407 408 BIND(done); 409 __ blr(); 410 411 return entry; 412 } 413 414 // Abstract method entry. 415 // 416 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 417 address entry = __ pc(); 418 419 // 420 // Registers alive 421 // R16_thread - JavaThread* 422 // R19_method - callee's method (method to be invoked) 423 // R1_SP - SP prepared such that caller's outgoing args are near top 424 // LR - return address to caller 425 // 426 // Stack layout at this point: 427 // 428 // 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP 429 // alignment (optional) 430 // [outgoing Java arguments] 431 // ... 432 // PARENT [PARENT_IJAVA_FRAME_ABI] 433 // ... 434 // 435 436 // Can't use call_VM here because we have not set up a new 437 // interpreter state. Make the call to the vm and make it look like 438 // our caller set up the JavaFrameAnchor. 439 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); 440 441 // Push a new C frame and save LR. 442 __ save_LR(R0); 443 __ push_frame_reg_args(0, R11_scratch1); 444 445 // This is not a leaf but we have a JavaFrameAnchor now and we will 446 // check (create) exceptions afterward so this is ok. 447 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), 448 R16_thread, R19_method); 449 450 // Pop the C frame and restore LR. 451 __ pop_frame(); 452 __ restore_LR(R0); 453 454 // Reset JavaFrameAnchor from call_VM_leaf above. 455 __ reset_last_Java_frame(); 456 457 // We don't know our caller, so jump to the general forward exception stub, 458 // which will also pop our full frame off. Satisfy the interface of 459 // SharedRuntime::generate_forward_exception() 460 __ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0); 461 __ mtctr(R11_scratch1); 462 __ bctr(); 463 464 return entry; 465 } 466 467 // Interpreter intrinsic for WeakReference.get(). 468 // 1. Don't push a full blown frame and go on dispatching, but fetch the value 469 // into R8 and return quickly 470 // 2. If G1 is active we *must* execute this intrinsic for corrrectness: 471 // It contains a GC barrier which puts the reference into the satb buffer 472 // to indicate that someone holds a strong reference to the object the 473 // weak ref points to! 474 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 475 // Code: _aload_0, _getfield, _areturn 476 // parameter size = 1 477 // 478 // The code that gets generated by this routine is split into 2 parts: 479 // 1. the "intrinsified" code for G1 (or any SATB based GC), 480 // 2. the slow path - which is an expansion of the regular method entry. 481 // 482 // Notes: 483 // * In the G1 code we do not check whether we need to block for 484 // a safepoint. If G1 is enabled then we must execute the specialized 485 // code for Reference.get (except when the Reference object is null) 486 // so that we can log the value in the referent field with an SATB 487 // update buffer. 488 // If the code for the getfield template is modified so that the 489 // G1 pre-barrier code is executed when the current method is 490 // Reference.get() then going through the normal method entry 491 // will be fine. 492 // * The G1 code can, however, check the receiver object (the instance 493 // of java.lang.Reference) and jump to the slow path if null. If the 494 // Reference object is null then we obviously cannot fetch the referent 495 // and so we don't need to call the G1 pre-barrier. Thus we can use the 496 // regular method entry code to generate the NPE. 497 // 498 499 address entry = __ pc(); 500 501 const int referent_offset = java_lang_ref_Reference::referent_offset(); 502 503 Label slow_path; 504 505 // Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH); 506 507 // In the G1 code we don't check if we need to reach a safepoint. We 508 // continue and the thread will safepoint at the next bytecode dispatch. 509 510 // If the receiver is null then it is OK to jump to the slow path. 511 __ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver 512 513 // Check if receiver == nullptr and go the slow path. 514 __ cmpdi(CCR0, R3_RET, 0); 515 __ beq(CCR0, slow_path); 516 517 __ load_heap_oop(R3_RET, referent_offset, R3_RET, 518 /* non-volatile temp */ R31, R11_scratch1, 519 MacroAssembler::PRESERVATION_FRAME_LR, 520 ON_WEAK_OOP_REF); 521 522 // Generate the G1 pre-barrier code to log the value of 523 // the referent field in an SATB buffer. Note with 524 // these parameters the pre-barrier does not generate 525 // the load of the previous value. 526 527 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 528 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 529 530 __ blr(); 531 532 __ bind(slow_path); 533 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1); 534 return entry; 535 } 536 537 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 538 address entry = __ pc(); 539 540 // Expression stack must be empty before entering the VM if an 541 // exception happened. 542 __ empty_expression_stack(); 543 // Throw exception. 544 __ call_VM(noreg, 545 CAST_FROM_FN_PTR(address, 546 InterpreterRuntime::throw_StackOverflowError)); 547 return entry; 548 } 549 550 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 551 address entry = __ pc(); 552 __ empty_expression_stack(); 553 // R4_ARG2 already contains the array. 554 // Index is in R17_tos. 555 __ mr(R5_ARG3, R17_tos); 556 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R4_ARG2, R5_ARG3); 557 return entry; 558 } 559 560 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 561 address entry = __ pc(); 562 // Expression stack must be empty before entering the VM if an 563 // exception happened. 564 __ empty_expression_stack(); 565 566 // Load exception object. 567 // Thread will be loaded to R3_ARG1. 568 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos); 569 #ifdef ASSERT 570 // Above call must not return here since exception pending. 571 __ should_not_reach_here(); 572 #endif 573 return entry; 574 } 575 576 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 577 address entry = __ pc(); 578 //__ untested("generate_exception_handler_common"); 579 Register Rexception = R17_tos; 580 581 // Expression stack must be empty before entering the VM if an exception happened. 582 __ empty_expression_stack(); 583 584 __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1); 585 if (pass_oop) { 586 __ mr(R5_ARG3, Rexception); 587 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception)); 588 } else { 589 __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1); 590 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception)); 591 } 592 593 // Throw exception. 594 __ mr(R3_ARG1, Rexception); 595 __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2); 596 __ mtctr(R11_scratch1); 597 __ bctr(); 598 599 return entry; 600 } 601 602 // This entry is returned to when a call returns to the interpreter. 603 // When we arrive here, we expect that the callee stack frame is already popped. 604 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 605 address entry = __ pc(); 606 607 // Move the value out of the return register back to the TOS cache of current frame. 608 switch (state) { 609 case ltos: 610 case btos: 611 case ztos: 612 case ctos: 613 case stos: 614 case atos: 615 case itos: __ mr(R17_tos, R3_RET); break; // RET -> TOS cache 616 case ftos: 617 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 618 case vtos: break; // Nothing to do, this was a void return. 619 default : ShouldNotReachHere(); 620 } 621 622 __ restore_interpreter_state(R11_scratch1, false /*bcp_and_mdx_only*/, true /*restore_top_frame_sp*/); 623 624 // Compiled code destroys templateTableBase, reload. 625 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); 626 627 if (state == atos) { 628 __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2); 629 } 630 631 const Register cache = R11_scratch1; 632 const Register size = R12_scratch2; 633 if (index_size == sizeof(u4)) { 634 __ load_resolved_indy_entry(cache, size /* tmp */); 635 __ lhz(size, in_bytes(ResolvedIndyEntry::num_parameters_offset()), cache); 636 } else { 637 assert(index_size == sizeof(u2), "Can only be u2"); 638 __ load_method_entry(cache, size /* tmp */); 639 __ lhz(size, in_bytes(ResolvedMethodEntry::num_parameters_offset()), cache); 640 } 641 __ sldi(size, size, Interpreter::logStackElementSize); 642 __ add(R15_esp, R15_esp, size); 643 644 __ check_and_handle_popframe(R11_scratch1); 645 __ check_and_handle_earlyret(R11_scratch1); 646 647 __ dispatch_next(state, step); 648 return entry; 649 } 650 651 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 652 address entry = __ pc(); 653 // If state != vtos, we're returning from a native method, which put it's result 654 // into the result register. So move the value out of the return register back 655 // to the TOS cache of current frame. 656 657 switch (state) { 658 case ltos: 659 case btos: 660 case ztos: 661 case ctos: 662 case stos: 663 case atos: 664 case itos: __ mr(R17_tos, R3_RET); break; // GR_RET -> TOS cache 665 case ftos: 666 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 667 case vtos: break; // Nothing to do, this was a void return. 668 default : ShouldNotReachHere(); 669 } 670 671 // Load LcpoolCache @@@ should be already set! 672 __ get_constant_pool_cache(R27_constPoolCache); 673 674 // Handle a pending exception, fall through if none. 675 __ check_and_forward_exception(R11_scratch1, R12_scratch2); 676 677 // Start executing bytecodes. 678 if (continuation == nullptr) { 679 __ dispatch_next(state, step); 680 } else { 681 __ jump_to_entry(continuation, R11_scratch1); 682 } 683 684 return entry; 685 } 686 687 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 688 address entry = __ pc(); 689 690 __ push(state); 691 __ push_cont_fastpath(); 692 __ call_VM(noreg, runtime_entry); 693 __ pop_cont_fastpath(); 694 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 695 696 return entry; 697 } 698 699 // Helpers for commoning out cases in the various type of method entries. 700 701 // Increment invocation count & check for overflow. 702 // 703 // Note: checking for negative value instead of overflow 704 // so we have a 'sticky' overflow test. 705 // 706 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { 707 // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not. 708 Register Rscratch1 = R11_scratch1; 709 Register Rscratch2 = R12_scratch2; 710 Register R3_counters = R3_ARG1; 711 Label done; 712 713 const int increment = InvocationCounter::count_increment; 714 Label no_mdo; 715 if (ProfileInterpreter) { 716 const Register Rmdo = R3_counters; 717 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 718 __ cmpdi(CCR0, Rmdo, 0); 719 __ beq(CCR0, no_mdo); 720 721 // Increment invocation counter in the MDO. 722 const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 723 __ lwz(Rscratch2, mdo_ic_offs, Rmdo); 724 __ lwz(Rscratch1, in_bytes(MethodData::invoke_mask_offset()), Rmdo); 725 __ addi(Rscratch2, Rscratch2, increment); 726 __ stw(Rscratch2, mdo_ic_offs, Rmdo); 727 __ and_(Rscratch1, Rscratch2, Rscratch1); 728 __ bne(CCR0, done); 729 __ b(*overflow); 730 } 731 732 // Increment counter in MethodCounters*. 733 const int mo_ic_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 734 __ bind(no_mdo); 735 __ get_method_counters(R19_method, R3_counters, done); 736 __ lwz(Rscratch2, mo_ic_offs, R3_counters); 737 __ lwz(Rscratch1, in_bytes(MethodCounters::invoke_mask_offset()), R3_counters); 738 __ addi(Rscratch2, Rscratch2, increment); 739 __ stw(Rscratch2, mo_ic_offs, R3_counters); 740 __ and_(Rscratch1, Rscratch2, Rscratch1); 741 __ beq(CCR0, *overflow); 742 743 __ bind(done); 744 } 745 746 // Generate code to initiate compilation on invocation counter overflow. 747 void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) { 748 // Generate code to initiate compilation on the counter overflow. 749 750 // InterpreterRuntime::frequency_counter_overflow takes one arguments, 751 // which indicates if the counter overflow occurs at a backwards branch (null bcp) 752 // We pass zero in. 753 // The call returns the address of the verified entry point for the method or null 754 // if the compilation did not complete (either went background or bailed out). 755 // 756 // Unlike the C++ interpreter above: Check exceptions! 757 // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed 758 // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur. 759 760 __ li(R4_ARG2, 0); 761 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 762 763 // Returns verified_entry_point or null. 764 // We ignore it in any case. 765 __ b(continue_entry); 766 } 767 768 // See if we've got enough room on the stack for locals plus overhead below 769 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 770 // without going through the signal handler, i.e., reserved and yellow zones 771 // will not be made usable. The shadow zone must suffice to handle the 772 // overflow. 773 // 774 // Kills Rmem_frame_size, Rscratch1. 775 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) { 776 Label done; 777 assert_different_registers(Rmem_frame_size, Rscratch1); 778 779 BLOCK_COMMENT("stack_overflow_check_with_compare {"); 780 __ sub(Rmem_frame_size, R1_SP, Rmem_frame_size); 781 __ ld(Rscratch1, thread_(stack_overflow_limit)); 782 __ cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1); 783 __ bgt(CCR0/*is_stack_overflow*/, done); 784 785 // The stack overflows. Load target address of the runtime stub and call it. 786 assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "generated in wrong order"); 787 __ load_const_optimized(Rscratch1, (SharedRuntime::throw_StackOverflowError_entry()), R0); 788 __ mtctr(Rscratch1); 789 // Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame). 790 #ifdef ASSERT 791 Label frame_not_shrunk; 792 __ cmpld(CCR0, R1_SP, R21_sender_SP); 793 __ ble(CCR0, frame_not_shrunk); 794 __ stop("frame shrunk"); 795 __ bind(frame_not_shrunk); 796 __ ld(Rscratch1, 0, R1_SP); 797 __ ld(R0, 0, R21_sender_SP); 798 __ cmpd(CCR0, R0, Rscratch1); 799 __ asm_assert_eq("backlink"); 800 #endif // ASSERT 801 __ mr(R1_SP, R21_sender_SP); 802 __ bctr(); 803 804 __ align(32, 12); 805 __ bind(done); 806 BLOCK_COMMENT("} stack_overflow_check_with_compare"); 807 } 808 809 // Lock the current method, interpreter register window must be set up! 810 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) { 811 const Register Robj_to_lock = Rscratch2; 812 813 { 814 if (!flags_preloaded) { 815 __ lwz(Rflags, method_(access_flags)); 816 } 817 818 #ifdef ASSERT 819 // Check if methods needs synchronization. 820 { 821 Label Lok; 822 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT); 823 __ btrue(CCR0,Lok); 824 __ stop("method doesn't need synchronization"); 825 __ bind(Lok); 826 } 827 #endif // ASSERT 828 } 829 830 // Get synchronization object to Rscratch2. 831 { 832 Label Lstatic; 833 Label Ldone; 834 835 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT); 836 __ btrue(CCR0, Lstatic); 837 838 // Non-static case: load receiver obj from stack and we're done. 839 __ ld(Robj_to_lock, R18_locals); 840 __ b(Ldone); 841 842 __ bind(Lstatic); // Static case: Lock the java mirror 843 // Load mirror from interpreter frame. 844 __ ld(Robj_to_lock, _abi0(callers_sp), R1_SP); 845 __ ld(Robj_to_lock, _ijava_state_neg(mirror), Robj_to_lock); 846 847 __ bind(Ldone); 848 __ verify_oop(Robj_to_lock); 849 } 850 851 // Got the oop to lock => execute! 852 __ add_monitor_to_stack(true, Rscratch1, R0); 853 854 __ std(Robj_to_lock, in_bytes(BasicObjectLock::obj_offset()), R26_monitor); 855 __ lock_object(R26_monitor, Robj_to_lock); 856 } 857 858 // Generate a fixed interpreter frame for pure interpreter 859 // and I2N native transition frames. 860 // 861 // Before (stack grows downwards): 862 // 863 // | ... | 864 // |------------- | 865 // | java arg0 | 866 // | ... | 867 // | java argn | 868 // | | <- R15_esp 869 // | | 870 // |--------------| 871 // | abi_112 | 872 // | | <- R1_SP 873 // |==============| 874 // 875 // 876 // After: 877 // 878 // | ... | 879 // | java arg0 |<- R18_locals 880 // | ... | 881 // | java argn | 882 // |--------------| 883 // | | 884 // | java locals | 885 // | | 886 // |--------------| 887 // | abi_48 | 888 // |==============| 889 // | | 890 // | istate | 891 // | | 892 // |--------------| 893 // | monitor |<- R26_monitor 894 // |--------------| 895 // | |<- R15_esp 896 // | expression | 897 // | stack | 898 // | | 899 // |--------------| 900 // | | 901 // | abi_112 |<- R1_SP 902 // |==============| 903 // 904 // The top most frame needs an abi space of 112 bytes. This space is needed, 905 // since we call to c. The c function may spill their arguments to the caller 906 // frame. When we call to java, we don't need these spill slots. In order to save 907 // space on the stack, we resize the caller. However, java locals reside in 908 // the caller frame and the frame has to be increased. The frame_size for the 909 // current frame was calculated based on max_stack as size for the expression 910 // stack. At the call, just a part of the expression stack might be used. 911 // We don't want to waste this space and cut the frame back accordingly. 912 // The resulting amount for resizing is calculated as follows: 913 // resize = (number_of_locals - number_of_arguments) * slot_size 914 // + (R1_SP - R15_esp) + 48 915 // 916 // The size for the callee frame is calculated: 917 // framesize = 112 + max_stack + monitor + state_size 918 // 919 // maxstack: Max number of slots on the expression stack, loaded from the method. 920 // monitor: We statically reserve room for one monitor object. 921 // state_size: We save the current state of the interpreter to this area. 922 // 923 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) { 924 Register Rparent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes. 925 Rtop_frame_size = R7_ARG5, 926 Rconst_method = R8_ARG6, 927 Rconst_pool = R9_ARG7, 928 Rmirror = R10_ARG8; 929 930 assert_different_registers(Rsize_of_parameters, Rsize_of_locals, Rparent_frame_resize, Rtop_frame_size, 931 Rconst_method, Rconst_pool); 932 933 __ ld(Rconst_method, method_(const)); 934 __ lhz(Rsize_of_parameters /* number of params */, 935 in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method); 936 if (native_call) { 937 // If we're calling a native method, we reserve space for the worst-case signature 938 // handler varargs vector, which is max(Argument::n_int_register_parameters_c, parameter_count+2). 939 // We add two slots to the parameter_count, one for the jni 940 // environment and one for a possible native mirror. 941 Label skip_native_calculate_max_stack; 942 __ addi(Rtop_frame_size, Rsize_of_parameters, 2); 943 __ cmpwi(CCR0, Rtop_frame_size, Argument::n_int_register_parameters_c); 944 __ bge(CCR0, skip_native_calculate_max_stack); 945 __ li(Rtop_frame_size, Argument::n_int_register_parameters_c); 946 __ bind(skip_native_calculate_max_stack); 947 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 948 __ sldi(Rtop_frame_size, Rtop_frame_size, Interpreter::logStackElementSize); 949 __ sub(Rparent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 950 assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters. 951 } else { 952 __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method); 953 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 954 __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize); 955 __ lhz(Rtop_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method); 956 __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0 957 __ sub(Rparent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 958 __ sldi(Rtop_frame_size, Rtop_frame_size, Interpreter::logStackElementSize); 959 __ add(Rparent_frame_resize, Rparent_frame_resize, R11_scratch1); 960 } 961 962 // Compute top frame size. 963 __ addi(Rtop_frame_size, Rtop_frame_size, frame::top_ijava_frame_abi_size + frame::ijava_state_size); 964 965 // Cut back area between esp and max_stack. 966 __ addi(Rparent_frame_resize, Rparent_frame_resize, frame::parent_ijava_frame_abi_size - Interpreter::stackElementSize); 967 968 __ round_to(Rtop_frame_size, frame::alignment_in_bytes); 969 __ round_to(Rparent_frame_resize, frame::alignment_in_bytes); 970 // Rparent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size. 971 // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48. 972 973 if (!native_call) { 974 // Stack overflow check. 975 // Native calls don't need the stack size check since they have no 976 // expression stack and the arguments are already on the stack and 977 // we only add a handful of words to the stack. 978 __ add(R11_scratch1, Rparent_frame_resize, Rtop_frame_size); 979 generate_stack_overflow_check(R11_scratch1, R12_scratch2); 980 } 981 982 // Set up interpreter state registers. 983 984 __ add(R18_locals, R15_esp, Rsize_of_parameters); 985 __ ld(Rconst_pool, in_bytes(ConstMethod::constants_offset()), Rconst_method); 986 __ ld(R27_constPoolCache, ConstantPool::cache_offset(), Rconst_pool); 987 988 // Set method data pointer. 989 if (ProfileInterpreter) { 990 Label zero_continue; 991 __ ld(R28_mdx, method_(method_data)); 992 __ cmpdi(CCR0, R28_mdx, 0); 993 __ beq(CCR0, zero_continue); 994 __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset())); 995 __ bind(zero_continue); 996 } 997 998 if (native_call) { 999 __ li(R14_bcp, 0); // Must initialize. 1000 } else { 1001 __ addi(R14_bcp, Rconst_method, in_bytes(ConstMethod::codes_offset())); 1002 } 1003 1004 // Resize parent frame. 1005 __ mflr(R12_scratch2); 1006 __ neg(Rparent_frame_resize, Rparent_frame_resize); 1007 __ resize_frame(Rparent_frame_resize, R11_scratch1); 1008 __ std(R12_scratch2, _abi0(lr), R1_SP); 1009 1010 // Get mirror and store it in the frame as GC root for this Method*. 1011 __ ld(Rmirror, ConstantPool::pool_holder_offset(), Rconst_pool); 1012 __ ld(Rmirror, in_bytes(Klass::java_mirror_offset()), Rmirror); 1013 __ resolve_oop_handle(Rmirror, R11_scratch1, R12_scratch2, MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS); 1014 1015 __ addi(R26_monitor, R1_SP, -frame::ijava_state_size); 1016 __ addi(R15_esp, R26_monitor, -Interpreter::stackElementSize); 1017 1018 // Store values. 1019 __ std(R19_method, _ijava_state_neg(method), R1_SP); 1020 __ std(Rmirror, _ijava_state_neg(mirror), R1_SP); 1021 __ sub(R12_scratch2, R18_locals, R1_SP); 1022 __ srdi(R12_scratch2, R12_scratch2, Interpreter::logStackElementSize); 1023 // Store relativized R18_locals, see frame::interpreter_frame_locals(). 1024 __ std(R12_scratch2, _ijava_state_neg(locals), R1_SP); 1025 __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP); 1026 1027 // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only 1028 // be found in the frame after save_interpreter_state is done. This is always true 1029 // for non-top frames. But when a signal occurs, dumping the top frame can go wrong, 1030 // because e.g. frame::interpreter_frame_bcp() will not access the correct value 1031 // (Enhanced Stack Trace). 1032 // The signal handler does not save the interpreter state into the frame. 1033 1034 // We have to initialize some of these frame slots for native calls (accessed by GC). 1035 // Also initialize them for non-native calls for better tool support (even though 1036 // you may not get the most recent version as described above). 1037 __ li(R0, 0); 1038 __ li(R12_scratch2, -(frame::ijava_state_size / wordSize)); 1039 __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP); 1040 __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP); 1041 if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); } 1042 __ sub(R12_scratch2, R15_esp, R1_SP); 1043 __ sradi(R12_scratch2, R12_scratch2, Interpreter::logStackElementSize); 1044 __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP); 1045 __ std(R0, _ijava_state_neg(oop_tmp), R1_SP); // only used for native_call 1046 1047 // Store sender's SP and this frame's top SP. 1048 __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP); 1049 __ neg(R12_scratch2, Rtop_frame_size); 1050 __ sradi(R12_scratch2, R12_scratch2, Interpreter::logStackElementSize); 1051 // Store relativized top_frame_sp 1052 __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP); 1053 1054 // Push top frame. 1055 __ push_frame(Rtop_frame_size, R11_scratch1); 1056 } 1057 1058 // End of helpers 1059 1060 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 1061 1062 // Decide what to do: Use same platform specific instructions and runtime calls as compilers. 1063 bool use_instruction = false; 1064 address runtime_entry = nullptr; 1065 int num_args = 1; 1066 bool double_precision = true; 1067 1068 // PPC64 specific: 1069 switch (kind) { 1070 case Interpreter::java_lang_math_sqrt: use_instruction = VM_Version::has_fsqrt(); break; 1071 case Interpreter::java_lang_math_abs: use_instruction = true; break; 1072 case Interpreter::java_lang_math_fmaF: 1073 case Interpreter::java_lang_math_fmaD: use_instruction = UseFMA; break; 1074 default: break; // Fall back to runtime call. 1075 } 1076 1077 switch (kind) { 1078 case Interpreter::java_lang_math_sin : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break; 1079 case Interpreter::java_lang_math_cos : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break; 1080 case Interpreter::java_lang_math_tan : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break; 1081 case Interpreter::java_lang_math_abs : /* run interpreted */ break; 1082 case Interpreter::java_lang_math_sqrt : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt); break; 1083 case Interpreter::java_lang_math_log : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); break; 1084 case Interpreter::java_lang_math_log10: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); break; 1085 case Interpreter::java_lang_math_pow : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); num_args = 2; break; 1086 case Interpreter::java_lang_math_exp : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); break; 1087 case Interpreter::java_lang_math_fmaF : /* run interpreted */ num_args = 3; double_precision = false; break; 1088 case Interpreter::java_lang_math_fmaD : /* run interpreted */ num_args = 3; break; 1089 default: ShouldNotReachHere(); 1090 } 1091 1092 // Use normal entry if neither instruction nor runtime call is used. 1093 if (!use_instruction && runtime_entry == nullptr) return nullptr; 1094 1095 address entry = __ pc(); 1096 1097 // Load arguments 1098 assert(num_args <= 13, "passed in registers"); 1099 if (double_precision) { 1100 int offset = (2 * num_args - 1) * Interpreter::stackElementSize; 1101 for (int i = 0; i < num_args; ++i) { 1102 __ lfd(as_FloatRegister(F1_ARG1->encoding() + i), offset, R15_esp); 1103 offset -= 2 * Interpreter::stackElementSize; 1104 } 1105 } else { 1106 int offset = num_args * Interpreter::stackElementSize; 1107 for (int i = 0; i < num_args; ++i) { 1108 __ lfs(as_FloatRegister(F1_ARG1->encoding() + i), offset, R15_esp); 1109 offset -= Interpreter::stackElementSize; 1110 } 1111 } 1112 1113 if (use_instruction) { 1114 switch (kind) { 1115 case Interpreter::java_lang_math_sqrt: __ fsqrt(F1_RET, F1); break; 1116 case Interpreter::java_lang_math_abs: __ fabs(F1_RET, F1); break; 1117 case Interpreter::java_lang_math_fmaF: __ fmadds(F1_RET, F1, F2, F3); break; 1118 case Interpreter::java_lang_math_fmaD: __ fmadd(F1_RET, F1, F2, F3); break; 1119 default: ShouldNotReachHere(); 1120 } 1121 } else { 1122 // Comment: Can use tail call if the unextended frame is always C ABI compliant: 1123 //__ load_const_optimized(R12_scratch2, runtime_entry, R0); 1124 //__ call_c_and_return_to_caller(R12_scratch2); 1125 1126 // Push a new C frame and save LR. 1127 __ save_LR(R0); 1128 __ push_frame_reg_args(0, R11_scratch1); 1129 1130 __ call_VM_leaf(runtime_entry); 1131 1132 // Pop the C frame and restore LR. 1133 __ pop_frame(); 1134 __ restore_LR(R0); 1135 } 1136 1137 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1138 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1139 __ blr(); 1140 1141 __ flush(); 1142 1143 return entry; 1144 } 1145 1146 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 1147 // Quick & dirty stack overflow checking: bang the stack & handle trap. 1148 // Note that we do the banging after the frame is setup, since the exception 1149 // handling code expects to find a valid interpreter frame on the stack. 1150 // Doing the banging earlier fails if the caller frame is not an interpreter 1151 // frame. 1152 // (Also, the exception throwing code expects to unlock any synchronized 1153 // method receiever, so do the banging after locking the receiver.) 1154 1155 // Bang each page in the shadow zone. We can't assume it's been done for 1156 // an interpreter frame with greater than a page of locals, so each page 1157 // needs to be checked. Only true for non-native. 1158 const size_t page_size = os::vm_page_size(); 1159 const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / page_size; 1160 const int start_page = native_call ? n_shadow_pages : 1; 1161 BLOCK_COMMENT("bang_stack_shadow_pages:"); 1162 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 1163 __ bang_stack_with_offset(pages*page_size); 1164 } 1165 } 1166 1167 // Interpreter stub for calling a native method. (asm interpreter) 1168 // This sets up a somewhat different looking stack for calling the 1169 // native method than the typical interpreter frame setup. 1170 // 1171 // On entry: 1172 // R19_method - method 1173 // R16_thread - JavaThread* 1174 // R15_esp - intptr_t* sender tos 1175 // 1176 // abstract stack (grows up) 1177 // [ IJava (caller of JNI callee) ] <-- ASP 1178 // ... 1179 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 1180 1181 address entry = __ pc(); 1182 1183 const bool inc_counter = UseCompiler || CountCompiledCalls; 1184 1185 // ----------------------------------------------------------------------------- 1186 // Allocate a new frame that represents the native callee (i2n frame). 1187 // This is not a full-blown interpreter frame, but in particular, the 1188 // following registers are valid after this: 1189 // - R19_method 1190 // - R18_local (points to start of arguments to native function) 1191 // 1192 // abstract stack (grows up) 1193 // [ IJava (caller of JNI callee) ] <-- ASP 1194 // ... 1195 1196 const Register signature_handler_fd = R11_scratch1; 1197 const Register pending_exception = R0; 1198 const Register result_handler_addr = R31; 1199 const Register native_method_fd = R11_scratch1; 1200 const Register access_flags = R22_tmp2; 1201 const Register active_handles = R11_scratch1; // R26_monitor saved to state. 1202 const Register sync_state = R12_scratch2; 1203 const Register sync_state_addr = sync_state; // Address is dead after use. 1204 const Register suspend_flags = R11_scratch1; 1205 1206 //============================================================================= 1207 // Allocate new frame and initialize interpreter state. 1208 1209 Label exception_return; 1210 Label exception_return_sync_check; 1211 Label stack_overflow_return; 1212 1213 // Generate new interpreter state and jump to stack_overflow_return in case of 1214 // a stack overflow. 1215 //generate_compute_interpreter_state(stack_overflow_return); 1216 1217 Register size_of_parameters = R22_tmp2; 1218 1219 generate_fixed_frame(true, size_of_parameters, noreg /* unused */); 1220 1221 //============================================================================= 1222 // Increment invocation counter. On overflow, entry to JNI method 1223 // will be compiled. 1224 Label invocation_counter_overflow, continue_after_compile; 1225 if (inc_counter) { 1226 if (synchronized) { 1227 // Since at this point in the method invocation the exception handler 1228 // would try to exit the monitor of synchronized methods which hasn't 1229 // been entered yet, we set the thread local variable 1230 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1231 // runtime, exception handling i.e. unlock_if_synchronized_method will 1232 // check this thread local flag. 1233 // This flag has two effects, one is to force an unwind in the topmost 1234 // interpreter frame and not perform an unlock while doing so. 1235 __ li(R0, 1); 1236 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1237 } 1238 generate_counter_incr(&invocation_counter_overflow); 1239 1240 BIND(continue_after_compile); 1241 } 1242 1243 bang_stack_shadow_pages(true); 1244 1245 if (inc_counter) { 1246 // Reset the _do_not_unlock_if_synchronized flag. 1247 if (synchronized) { 1248 __ li(R0, 0); 1249 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1250 } 1251 } 1252 1253 // access_flags = method->access_flags(); 1254 // Load access flags. 1255 assert(access_flags->is_nonvolatile(), 1256 "access_flags must be in a non-volatile register"); 1257 // Type check. 1258 assert(4 == sizeof(AccessFlags), "unexpected field size"); 1259 __ lwz(access_flags, method_(access_flags)); 1260 1261 // We don't want to reload R19_method and access_flags after calls 1262 // to some helper functions. 1263 assert(R19_method->is_nonvolatile(), 1264 "R19_method must be a non-volatile register"); 1265 1266 // Check for synchronized methods. Must happen AFTER invocation counter 1267 // check, so method is not locked if counter overflows. 1268 1269 if (synchronized) { 1270 lock_method(access_flags, R11_scratch1, R12_scratch2, true); 1271 1272 // Update monitor in state. 1273 __ ld(R11_scratch1, 0, R1_SP); 1274 __ sub(R12_scratch2, R26_monitor, R11_scratch1); 1275 __ sradi(R12_scratch2, R12_scratch2, Interpreter::logStackElementSize); 1276 __ std(R12_scratch2, _ijava_state_neg(monitors), R11_scratch1); 1277 } 1278 1279 // jvmti/jvmpi support 1280 __ notify_method_entry(); 1281 1282 //============================================================================= 1283 // Get and call the signature handler. 1284 1285 __ ld(signature_handler_fd, method_(signature_handler)); 1286 Label call_signature_handler; 1287 1288 __ cmpdi(CCR0, signature_handler_fd, 0); 1289 __ bne(CCR0, call_signature_handler); 1290 1291 // Method has never been called. Either generate a specialized 1292 // handler or point to the slow one. 1293 // 1294 // Pass parameter 'false' to avoid exception check in call_VM. 1295 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false); 1296 1297 // Check for an exception while looking up the target method. If we 1298 // incurred one, bail. 1299 __ ld(pending_exception, thread_(pending_exception)); 1300 __ cmpdi(CCR0, pending_exception, 0); 1301 __ bne(CCR0, exception_return_sync_check); // Has pending exception. 1302 1303 // Reload signature handler, it may have been created/assigned in the meanwhile. 1304 __ ld(signature_handler_fd, method_(signature_handler)); 1305 __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below). 1306 1307 BIND(call_signature_handler); 1308 1309 // Before we call the signature handler we push a new frame to 1310 // protect the interpreter frame volatile registers when we return 1311 // from jni but before we can get back to Java. 1312 1313 // First set the frame anchor while the SP/FP registers are 1314 // convenient and the slow signature handler can use this same frame 1315 // anchor. 1316 1317 // We have a TOP_IJAVA_FRAME here, which belongs to us. 1318 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); 1319 1320 // Now the interpreter frame (and its call chain) have been 1321 // invalidated and flushed. We are now protected against eager 1322 // being enabled in native code. Even if it goes eager the 1323 // registers will be reloaded as clean and we will invalidate after 1324 // the call so no spurious flush should be possible. 1325 1326 // Call signature handler and pass locals address. 1327 // 1328 // Our signature handlers copy required arguments to the C stack 1329 // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13. 1330 __ mr(R3_ARG1, R18_locals); 1331 #if !defined(ABI_ELFv2) 1332 __ ld(signature_handler_fd, 0, signature_handler_fd); 1333 #endif 1334 1335 __ call_stub(signature_handler_fd); 1336 1337 // Remove the register parameter varargs slots we allocated in 1338 // compute_interpreter_state. SP+16 ends up pointing to the ABI 1339 // outgoing argument area. 1340 // 1341 // Not needed on PPC64. 1342 //__ add(SP, SP, Argument::n_int_register_parameters_c*BytesPerWord); 1343 1344 assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register"); 1345 // Save across call to native method. 1346 __ mr(result_handler_addr, R3_RET); 1347 1348 __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror. 1349 1350 // Set up fixed parameters and call the native method. 1351 // If the method is static, get mirror into R4_ARG2. 1352 { 1353 Label method_is_not_static; 1354 // Access_flags is non-volatile and still, no need to restore it. 1355 1356 // Restore access flags. 1357 __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT); 1358 __ bfalse(CCR0, method_is_not_static); 1359 1360 __ ld(R11_scratch1, _abi0(callers_sp), R1_SP); 1361 // Load mirror from interpreter frame. 1362 __ ld(R12_scratch2, _ijava_state_neg(mirror), R11_scratch1); 1363 // R4_ARG2 = &state->_oop_temp; 1364 __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp)); 1365 __ std(R12_scratch2/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); 1366 BIND(method_is_not_static); 1367 } 1368 1369 // At this point, arguments have been copied off the stack into 1370 // their JNI positions. Oops are boxed in-place on the stack, with 1371 // handles copied to arguments. The result handler address is in a 1372 // register. 1373 1374 // Pass JNIEnv address as first parameter. 1375 __ addir(R3_ARG1, thread_(jni_environment)); 1376 1377 // Load the native_method entry before we change the thread state. 1378 __ ld(native_method_fd, method_(native_function)); 1379 1380 //============================================================================= 1381 // Transition from _thread_in_Java to _thread_in_native. As soon as 1382 // we make this change the safepoint code needs to be certain that 1383 // the last Java frame we established is good. The pc in that frame 1384 // just needs to be near here not an actual return address. 1385 1386 // We use release_store_fence to update values like the thread state, where 1387 // we don't want the current thread to continue until all our prior memory 1388 // accesses (including the new thread state) are visible to other threads. 1389 __ li(R0, _thread_in_native); 1390 __ release(); 1391 1392 // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 1393 __ stw(R0, thread_(thread_state)); 1394 1395 //============================================================================= 1396 // Call the native method. Argument registers must not have been 1397 // overwritten since "__ call_stub(signature_handler);" (except for 1398 // ARG1 and ARG2 for static methods). 1399 __ call_c(native_method_fd); 1400 1401 __ li(R0, 0); 1402 __ ld(R11_scratch1, 0, R1_SP); 1403 __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 1404 __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 1405 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset 1406 1407 // Note: C++ interpreter needs the following here: 1408 // The frame_manager_lr field, which we use for setting the last 1409 // java frame, gets overwritten by the signature handler. Restore 1410 // it now. 1411 //__ get_PC_trash_LR(R11_scratch1); 1412 //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP); 1413 1414 // Because of GC R19_method may no longer be valid. 1415 1416 // Block, if necessary, before resuming in _thread_in_Java state. 1417 // In order for GC to work, don't clear the last_Java_sp until after 1418 // blocking. 1419 1420 //============================================================================= 1421 // Switch thread to "native transition" state before reading the 1422 // synchronization state. This additional state is necessary 1423 // because reading and testing the synchronization state is not 1424 // atomic w.r.t. GC, as this scenario demonstrates: Java thread A, 1425 // in _thread_in_native state, loads _not_synchronized and is 1426 // preempted. VM thread changes sync state to synchronizing and 1427 // suspends threads for GC. Thread A is resumed to finish this 1428 // native method, but doesn't block here since it didn't see any 1429 // synchronization in progress, and escapes. 1430 1431 // We use release_store_fence to update values like the thread state, where 1432 // we don't want the current thread to continue until all our prior memory 1433 // accesses (including the new thread state) are visible to other threads. 1434 __ li(R0/*thread_state*/, _thread_in_native_trans); 1435 __ release(); 1436 __ stw(R0/*thread_state*/, thread_(thread_state)); 1437 if (!UseSystemMemoryBarrier) { 1438 __ fence(); 1439 } 1440 1441 // Now before we return to java we must look for a current safepoint 1442 // (a new safepoint can not start since we entered native_trans). 1443 // We must check here because a current safepoint could be modifying 1444 // the callers registers right this moment. 1445 1446 // Acquire isn't strictly necessary here because of the fence, but 1447 // sync_state is declared to be volatile, so we do it anyway 1448 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 1449 1450 Label do_safepoint, sync_check_done; 1451 // No synchronization in progress nor yet synchronized. 1452 __ safepoint_poll(do_safepoint, sync_state, true /* at_return */, false /* in_nmethod */); 1453 1454 // Not suspended. 1455 // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 1456 __ lwz(suspend_flags, thread_(suspend_flags)); 1457 __ cmpwi(CCR1, suspend_flags, 0); 1458 __ beq(CCR1, sync_check_done); 1459 1460 __ bind(do_safepoint); 1461 __ isync(); 1462 // Block. We do the call directly and leave the current 1463 // last_Java_frame setup undisturbed. We must save any possible 1464 // native result across the call. No oop is present. 1465 1466 __ mr(R3_ARG1, R16_thread); 1467 #if defined(ABI_ELFv2) 1468 __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1469 relocInfo::none); 1470 #else 1471 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans), 1472 relocInfo::none); 1473 #endif 1474 1475 __ bind(sync_check_done); 1476 1477 //============================================================================= 1478 // <<<<<< Back in Interpreter Frame >>>>> 1479 1480 // We are in thread_in_native_trans here and back in the normal 1481 // interpreter frame. We don't have to do anything special about 1482 // safepoints and we can switch to Java mode anytime we are ready. 1483 1484 // Note: frame::interpreter_frame_result has a dependency on how the 1485 // method result is saved across the call to post_method_exit. For 1486 // native methods it assumes that the non-FPU/non-void result is 1487 // saved in _native_lresult and a FPU result in _native_fresult. If 1488 // this changes then the interpreter_frame_result implementation 1489 // will need to be updated too. 1490 1491 // On PPC64, we have stored the result directly after the native call. 1492 1493 //============================================================================= 1494 // Back in Java 1495 1496 // We use release_store_fence to update values like the thread state, where 1497 // we don't want the current thread to continue until all our prior memory 1498 // accesses (including the new thread state) are visible to other threads. 1499 __ li(R0/*thread_state*/, _thread_in_Java); 1500 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 1501 __ stw(R0/*thread_state*/, thread_(thread_state)); 1502 1503 if (CheckJNICalls) { 1504 // clear_pending_jni_exception_check 1505 __ load_const_optimized(R0, 0L); 1506 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 1507 } 1508 1509 __ reset_last_Java_frame(); 1510 1511 // Jvmdi/jvmpi support. Whether we've got an exception pending or 1512 // not, and whether unlocking throws an exception or not, we notify 1513 // on native method exit. If we do have an exception, we'll end up 1514 // in the caller's context to handle it, so if we don't do the 1515 // notify here, we'll drop it on the floor. 1516 __ notify_method_exit(true/*native method*/, 1517 ilgl /*illegal state (not used for native methods)*/, 1518 InterpreterMacroAssembler::NotifyJVMTI, 1519 false /*check_exceptions*/); 1520 1521 //============================================================================= 1522 // Handle exceptions 1523 1524 if (synchronized) { 1525 __ unlock_object(R26_monitor); // Can also unlock methods. 1526 } 1527 1528 // Reset active handles after returning from native. 1529 // thread->active_handles()->clear(); 1530 __ ld(active_handles, thread_(active_handles)); 1531 // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 1532 __ li(R0, 0); 1533 __ stw(R0, in_bytes(JNIHandleBlock::top_offset()), active_handles); 1534 1535 Label exception_return_sync_check_already_unlocked; 1536 __ ld(R0/*pending_exception*/, thread_(pending_exception)); 1537 __ cmpdi(CCR0, R0/*pending_exception*/, 0); 1538 __ bne(CCR0, exception_return_sync_check_already_unlocked); 1539 1540 //----------------------------------------------------------------------------- 1541 // No exception pending. 1542 1543 // Move native method result back into proper registers and return. 1544 // Invoke result handler (may unbox/promote). 1545 __ ld(R11_scratch1, 0, R1_SP); 1546 __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 1547 __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 1548 __ call_stub(result_handler_addr); 1549 1550 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1551 1552 // Must use the return pc which was loaded from the caller's frame 1553 // as the VM uses return-pc-patching for deoptimization. 1554 __ mtlr(R0); 1555 __ blr(); 1556 1557 //----------------------------------------------------------------------------- 1558 // An exception is pending. We call into the runtime only if the 1559 // caller was not interpreted. If it was interpreted the 1560 // interpreter will do the correct thing. If it isn't interpreted 1561 // (call stub/compiled code) we will change our return and continue. 1562 1563 BIND(exception_return_sync_check); 1564 1565 if (synchronized) { 1566 __ unlock_object(R26_monitor); // Can also unlock methods. 1567 } 1568 BIND(exception_return_sync_check_already_unlocked); 1569 1570 const Register return_pc = R31; 1571 1572 __ ld(return_pc, 0, R1_SP); 1573 __ ld(return_pc, _abi0(lr), return_pc); 1574 1575 // Get the address of the exception handler. 1576 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1577 R16_thread, 1578 return_pc /* return pc */); 1579 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2); 1580 1581 // Load the PC of the exception handler into LR. 1582 __ mtlr(R3_RET); 1583 1584 // Load exception into R3_ARG1 and clear pending exception in thread. 1585 __ ld(R3_ARG1/*exception*/, thread_(pending_exception)); 1586 __ li(R4_ARG2, 0); 1587 __ std(R4_ARG2, thread_(pending_exception)); 1588 1589 // Load the original return pc into R4_ARG2. 1590 __ mr(R4_ARG2/*issuing_pc*/, return_pc); 1591 1592 // Return to exception handler. 1593 __ blr(); 1594 1595 //============================================================================= 1596 // Counter overflow. 1597 1598 if (inc_counter) { 1599 // Handle invocation counter overflow. 1600 __ bind(invocation_counter_overflow); 1601 1602 generate_counter_overflow(continue_after_compile); 1603 } 1604 1605 return entry; 1606 } 1607 1608 // Generic interpreted method entry to (asm) interpreter. 1609 // 1610 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1611 bool inc_counter = UseCompiler || CountCompiledCalls; 1612 address entry = __ pc(); 1613 // Generate the code to allocate the interpreter stack frame. 1614 Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame. 1615 Rsize_of_locals = R5_ARG3; // Written by generate_fixed_frame. 1616 1617 // Does also a stack check to assure this frame fits on the stack. 1618 generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals); 1619 1620 // -------------------------------------------------------------------------- 1621 // Zero out non-parameter locals. 1622 // Note: *Always* zero out non-parameter locals as Sparc does. It's not 1623 // worth to ask the flag, just do it. 1624 Register Rslot_addr = R6_ARG4, 1625 Rnum = R7_ARG5; 1626 Label Lno_locals, Lzero_loop; 1627 1628 // Set up the zeroing loop. 1629 __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals); 1630 __ subf(Rslot_addr, Rsize_of_parameters, R18_locals); 1631 __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize); 1632 __ beq(CCR0, Lno_locals); 1633 __ li(R0, 0); 1634 __ mtctr(Rnum); 1635 1636 // The zero locals loop. 1637 __ bind(Lzero_loop); 1638 __ std(R0, 0, Rslot_addr); 1639 __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize); 1640 __ bdnz(Lzero_loop); 1641 1642 __ bind(Lno_locals); 1643 1644 // -------------------------------------------------------------------------- 1645 // Counter increment and overflow check. 1646 Label invocation_counter_overflow; 1647 Label continue_after_compile; 1648 if (inc_counter || ProfileInterpreter) { 1649 1650 Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1; 1651 if (synchronized) { 1652 // Since at this point in the method invocation the exception handler 1653 // would try to exit the monitor of synchronized methods which hasn't 1654 // been entered yet, we set the thread local variable 1655 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1656 // runtime, exception handling i.e. unlock_if_synchronized_method will 1657 // check this thread local flag. 1658 // This flag has two effects, one is to force an unwind in the topmost 1659 // interpreter frame and not perform an unlock while doing so. 1660 __ li(R0, 1); 1661 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1662 } 1663 1664 // Argument and return type profiling. 1665 __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4); 1666 1667 // Increment invocation counter and check for overflow. 1668 if (inc_counter) { 1669 generate_counter_incr(&invocation_counter_overflow); 1670 } 1671 1672 __ bind(continue_after_compile); 1673 } 1674 1675 bang_stack_shadow_pages(false); 1676 1677 if (inc_counter || ProfileInterpreter) { 1678 // Reset the _do_not_unlock_if_synchronized flag. 1679 if (synchronized) { 1680 __ li(R0, 0); 1681 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1682 } 1683 } 1684 1685 // -------------------------------------------------------------------------- 1686 // Locking of synchronized methods. Must happen AFTER invocation_counter 1687 // check and stack overflow check, so method is not locked if overflows. 1688 if (synchronized) { 1689 lock_method(R3_ARG1, R4_ARG2, R5_ARG3); 1690 } 1691 #ifdef ASSERT 1692 else { 1693 Label Lok; 1694 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1695 __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED); 1696 __ asm_assert_eq("method needs synchronization"); 1697 __ bind(Lok); 1698 } 1699 #endif // ASSERT 1700 1701 // -------------------------------------------------------------------------- 1702 // JVMTI support 1703 __ notify_method_entry(); 1704 1705 // -------------------------------------------------------------------------- 1706 // Start executing instructions. 1707 __ dispatch_next(vtos); 1708 1709 // -------------------------------------------------------------------------- 1710 if (inc_counter) { 1711 // Handle invocation counter overflow. 1712 __ bind(invocation_counter_overflow); 1713 generate_counter_overflow(continue_after_compile); 1714 } 1715 return entry; 1716 } 1717 1718 // CRC32 Intrinsics. 1719 // 1720 // Contract on scratch and work registers. 1721 // ======================================= 1722 // 1723 // On ppc, the register set {R2..R12} is available in the interpreter as scratch/work registers. 1724 // You should, however, keep in mind that {R3_ARG1..R10_ARG8} is the C-ABI argument register set. 1725 // You can't rely on these registers across calls. 1726 // 1727 // The generators for CRC32_update and for CRC32_updateBytes use the 1728 // scratch/work register set internally, passing the work registers 1729 // as arguments to the MacroAssembler emitters as required. 1730 // 1731 // R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments. 1732 // Their contents is not constant but may change according to the requirements 1733 // of the emitted code. 1734 // 1735 // All other registers from the scratch/work register set are used "internally" 1736 // and contain garbage (i.e. unpredictable values) once blr() is reached. 1737 // Basically, only R3_RET contains a defined value which is the function result. 1738 // 1739 /** 1740 * Method entry for static native methods: 1741 * int java.util.zip.CRC32.update(int crc, int b) 1742 */ 1743 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 1744 assert(UseCRC32Intrinsics, "this intrinsic is not supported"); 1745 address start = __ pc(); // Remember stub start address (is rtn value). 1746 Label slow_path; 1747 1748 // Safepoint check 1749 const Register sync_state = R11_scratch1; 1750 __ safepoint_poll(slow_path, sync_state, false /* at_return */, false /* in_nmethod */); 1751 1752 // We don't generate local frame and don't align stack because 1753 // we not even call stub code (we generate the code inline) 1754 // and there is no safepoint on this path. 1755 1756 // Load java parameters. 1757 // R15_esp is callers operand stack pointer, i.e. it points to the parameters. 1758 const Register argP = R15_esp; 1759 const Register crc = R3_ARG1; // crc value 1760 const Register data = R4_ARG2; 1761 const Register table = R5_ARG3; // address of crc32 table 1762 1763 BLOCK_COMMENT("CRC32_update {"); 1764 1765 // Arguments are reversed on java expression stack 1766 #ifdef VM_LITTLE_ENDIAN 1767 int data_offs = 0+1*wordSize; // (stack) address of byte value. Emitter expects address, not value. 1768 // Being passed as an int, the single byte is at offset +0. 1769 #else 1770 int data_offs = 3+1*wordSize; // (stack) address of byte value. Emitter expects address, not value. 1771 // Being passed from java as an int, the single byte is at offset +3. 1772 #endif 1773 __ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register. 1774 __ lbz(data, data_offs, argP); // Byte from buffer, zero-extended. 1775 __ load_const_optimized(table, StubRoutines::crc_table_addr(), R0); 1776 __ kernel_crc32_singleByteReg(crc, data, table, true); 1777 1778 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1779 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1780 __ blr(); 1781 1782 // Generate a vanilla native entry as the slow path. 1783 BLOCK_COMMENT("} CRC32_update"); 1784 BIND(slow_path); 1785 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); 1786 return start; 1787 } 1788 1789 /** 1790 * Method entry for static native methods: 1791 * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len) 1792 * int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len) 1793 */ 1794 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1795 assert(UseCRC32Intrinsics, "this intrinsic is not supported"); 1796 address start = __ pc(); // Remember stub start address (is rtn value). 1797 Label slow_path; 1798 1799 // Safepoint check 1800 const Register sync_state = R11_scratch1; 1801 __ safepoint_poll(slow_path, sync_state, false /* at_return */, false /* in_nmethod */); 1802 1803 // We don't generate local frame and don't align stack because 1804 // we not even call stub code (we generate the code inline) 1805 // and there is no safepoint on this path. 1806 1807 // Load parameters. 1808 // Z_esp is callers operand stack pointer, i.e. it points to the parameters. 1809 const Register argP = R15_esp; 1810 const Register crc = R3_ARG1; // crc value 1811 const Register data = R4_ARG2; // address of java byte array 1812 const Register dataLen = R5_ARG3; // source data len 1813 const Register tmp = R11_scratch1; 1814 1815 // Arguments are reversed on java expression stack. 1816 // Calculate address of start element. 1817 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct". 1818 BLOCK_COMMENT("CRC32_updateByteBuffer {"); 1819 // crc @ (SP + 5W) (32bit) 1820 // buf @ (SP + 3W) (64bit ptr to long array) 1821 // off @ (SP + 2W) (32bit) 1822 // dataLen @ (SP + 1W) (32bit) 1823 // data = buf + off 1824 __ ld( data, 3*wordSize, argP); // start of byte buffer 1825 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1826 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1827 __ lwz( crc, 5*wordSize, argP); // current crc state 1828 __ add( data, data, tmp); // Add byte buffer offset. 1829 } else { // Used for "updateBytes update". 1830 BLOCK_COMMENT("CRC32_updateBytes {"); 1831 // crc @ (SP + 4W) (32bit) 1832 // buf @ (SP + 3W) (64bit ptr to byte array) 1833 // off @ (SP + 2W) (32bit) 1834 // dataLen @ (SP + 1W) (32bit) 1835 // data = buf + off + base_offset 1836 __ ld( data, 3*wordSize, argP); // start of byte buffer 1837 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1838 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1839 __ add( data, data, tmp); // add byte buffer offset 1840 __ lwz( crc, 4*wordSize, argP); // current crc state 1841 __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 1842 } 1843 1844 __ crc32(crc, data, dataLen, R2, R6, R7, R8, R9, R10, R11, R12, false); 1845 1846 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1847 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1848 __ blr(); 1849 1850 // Generate a vanilla native entry as the slow path. 1851 BLOCK_COMMENT("} CRC32_updateBytes(Buffer)"); 1852 BIND(slow_path); 1853 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); 1854 return start; 1855 } 1856 1857 1858 /** 1859 * Method entry for intrinsic-candidate (non-native) methods: 1860 * int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end) 1861 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end) 1862 * Unlike CRC32, CRC32C does not have any methods marked as native 1863 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses 1864 **/ 1865 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1866 assert(UseCRC32CIntrinsics, "this intrinsic is not supported"); 1867 address start = __ pc(); // Remember stub start address (is rtn value). 1868 1869 // We don't generate local frame and don't align stack because 1870 // we not even call stub code (we generate the code inline) 1871 // and there is no safepoint on this path. 1872 1873 // Load parameters. 1874 // Z_esp is callers operand stack pointer, i.e. it points to the parameters. 1875 const Register argP = R15_esp; 1876 const Register crc = R3_ARG1; // crc value 1877 const Register data = R4_ARG2; // address of java byte array 1878 const Register dataLen = R5_ARG3; // source data len 1879 const Register tmp = R11_scratch1; 1880 1881 // Arguments are reversed on java expression stack. 1882 // Calculate address of start element. 1883 if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateDirectByteBuffer". 1884 BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {"); 1885 // crc @ (SP + 5W) (32bit) 1886 // buf @ (SP + 3W) (64bit ptr to long array) 1887 // off @ (SP + 2W) (32bit) 1888 // dataLen @ (SP + 1W) (32bit) 1889 // data = buf + off 1890 __ ld( data, 3*wordSize, argP); // start of byte buffer 1891 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1892 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1893 __ lwz( crc, 5*wordSize, argP); // current crc state 1894 __ add( data, data, tmp); // Add byte buffer offset. 1895 __ sub( dataLen, dataLen, tmp); // (end_index - offset) 1896 } else { // Used for "updateBytes update". 1897 BLOCK_COMMENT("CRC32C_updateBytes {"); 1898 // crc @ (SP + 4W) (32bit) 1899 // buf @ (SP + 3W) (64bit ptr to byte array) 1900 // off @ (SP + 2W) (32bit) 1901 // dataLen @ (SP + 1W) (32bit) 1902 // data = buf + off + base_offset 1903 __ ld( data, 3*wordSize, argP); // start of byte buffer 1904 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1905 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1906 __ add( data, data, tmp); // add byte buffer offset 1907 __ sub( dataLen, dataLen, tmp); // (end_index - offset) 1908 __ lwz( crc, 4*wordSize, argP); // current crc state 1909 __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 1910 } 1911 1912 __ crc32(crc, data, dataLen, R2, R6, R7, R8, R9, R10, R11, R12, true); 1913 1914 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1915 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1916 __ blr(); 1917 1918 BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}"); 1919 return start; 1920 } 1921 1922 // Not supported 1923 address TemplateInterpreterGenerator::generate_currentThread() { return nullptr; } 1924 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; } 1925 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; } 1926 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; } 1927 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; } 1928 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; } 1929 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; } 1930 1931 // ============================================================================= 1932 // Exceptions 1933 1934 void TemplateInterpreterGenerator::generate_throw_exception() { 1935 Register Rexception = R17_tos, 1936 Rcontinuation = R3_RET; 1937 1938 // -------------------------------------------------------------------------- 1939 // Entry point if an method returns with a pending exception (rethrow). 1940 Interpreter::_rethrow_exception_entry = __ pc(); 1941 { 1942 __ restore_interpreter_state(R11_scratch1, false /*bcp_and_mdx_only*/, true /*restore_top_frame_sp*/); 1943 1944 // Compiled code destroys templateTableBase, reload. 1945 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 1946 } 1947 1948 // Entry point if a interpreted method throws an exception (throw). 1949 Interpreter::_throw_exception_entry = __ pc(); 1950 { 1951 __ mr(Rexception, R3_RET); 1952 1953 __ verify_oop(Rexception); 1954 1955 // Expression stack must be empty before entering the VM in case of an exception. 1956 __ empty_expression_stack(); 1957 // Find exception handler address and preserve exception oop. 1958 // Call C routine to find handler and jump to it. 1959 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception); 1960 __ mtctr(Rcontinuation); 1961 // Push exception for exception handler bytecodes. 1962 __ push_ptr(Rexception); 1963 1964 // Jump to exception handler (may be remove activation entry!). 1965 __ bctr(); 1966 } 1967 1968 // If the exception is not handled in the current frame the frame is 1969 // removed and the exception is rethrown (i.e. exception 1970 // continuation is _rethrow_exception). 1971 // 1972 // Note: At this point the bci is still the bxi for the instruction 1973 // which caused the exception and the expression stack is 1974 // empty. Thus, for any VM calls at this point, GC will find a legal 1975 // oop map (with empty expression stack). 1976 1977 // In current activation 1978 // tos: exception 1979 // bcp: exception bcp 1980 1981 // -------------------------------------------------------------------------- 1982 // JVMTI PopFrame support 1983 1984 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1985 { 1986 // Set the popframe_processing bit in popframe_condition indicating that we are 1987 // currently handling popframe, so that call_VMs that may happen later do not 1988 // trigger new popframe handling cycles. 1989 __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1990 __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit); 1991 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 1992 1993 // Empty the expression stack, as in normal exception handling. 1994 __ empty_expression_stack(); 1995 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 1996 1997 // Check to see whether we are returning to a deoptimized frame. 1998 // (The PopFrame call ensures that the caller of the popped frame is 1999 // either interpreted or compiled and deoptimizes it if compiled.) 2000 // Note that we don't compare the return PC against the 2001 // deoptimization blob's unpack entry because of the presence of 2002 // adapter frames in C2. 2003 Label Lcaller_not_deoptimized; 2004 Register return_pc = R3_ARG1; 2005 __ ld(return_pc, 0, R1_SP); 2006 __ ld(return_pc, _abi0(lr), return_pc); 2007 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc); 2008 __ cmpdi(CCR0, R3_RET, 0); 2009 __ bne(CCR0, Lcaller_not_deoptimized); 2010 2011 // The deoptimized case. 2012 // In this case, we can't call dispatch_next() after the frame is 2013 // popped, but instead must save the incoming arguments and restore 2014 // them after deoptimization has occurred. 2015 __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method); 2016 __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2); 2017 __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize); 2018 __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize); 2019 __ subf(R5_ARG3, R4_ARG2, R5_ARG3); 2020 // Save these arguments. 2021 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3); 2022 2023 // Inform deoptimization that it is responsible for restoring these arguments. 2024 __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit); 2025 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 2026 2027 // Return from the current method into the deoptimization blob. Will eventually 2028 // end up in the deopt interpreter entry, deoptimization prepared everything that 2029 // we will reexecute the call that called us. 2030 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2); 2031 __ mtlr(return_pc); 2032 __ pop_cont_fastpath(); 2033 __ blr(); 2034 2035 // The non-deoptimized case. 2036 __ bind(Lcaller_not_deoptimized); 2037 2038 // Clear the popframe condition flag. 2039 __ li(R0, 0); 2040 __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 2041 2042 // Get out of the current method and re-execute the call that called us. 2043 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 2044 __ pop_cont_fastpath(); 2045 __ restore_interpreter_state(R11_scratch1, false /*bcp_and_mdx_only*/, true /*restore_top_frame_sp*/); 2046 if (ProfileInterpreter) { 2047 __ set_method_data_pointer_for_bcp(); 2048 __ ld(R11_scratch1, 0, R1_SP); 2049 __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1); 2050 } 2051 #if INCLUDE_JVMTI 2052 Label L_done; 2053 2054 __ lbz(R11_scratch1, 0, R14_bcp); 2055 __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic); 2056 __ bne(CCR0, L_done); 2057 2058 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 2059 // Detect such a case in the InterpreterRuntime function and return the member name argument, or null. 2060 __ ld(R4_ARG2, 0, R18_locals); 2061 __ call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp); 2062 2063 __ cmpdi(CCR0, R4_ARG2, 0); 2064 __ beq(CCR0, L_done); 2065 __ std(R4_ARG2, wordSize, R15_esp); 2066 __ bind(L_done); 2067 #endif // INCLUDE_JVMTI 2068 __ dispatch_next(vtos); 2069 } 2070 // end of JVMTI PopFrame support 2071 2072 // -------------------------------------------------------------------------- 2073 // Remove activation exception entry. 2074 // This is jumped to if an interpreted method can't handle an exception itself 2075 // (we come from the throw/rethrow exception entry above). We're going to call 2076 // into the VM to find the exception handler in the caller, pop the current 2077 // frame and return the handler we calculated. 2078 Interpreter::_remove_activation_entry = __ pc(); 2079 { 2080 __ pop_ptr(Rexception); 2081 __ verify_oop(Rexception); 2082 __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread); 2083 2084 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true); 2085 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false); 2086 2087 __ get_vm_result(Rexception); 2088 2089 // We are done with this activation frame; find out where to go next. 2090 // The continuation point will be an exception handler, which expects 2091 // the following registers set up: 2092 // 2093 // RET: exception oop 2094 // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled. 2095 2096 Register return_pc = R31; // Needs to survive the runtime call. 2097 __ ld(return_pc, 0, R1_SP); 2098 __ ld(return_pc, _abi0(lr), return_pc); 2099 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc); 2100 2101 // Remove the current activation. 2102 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 2103 __ pop_cont_fastpath(); 2104 2105 __ mr(R4_ARG2, return_pc); 2106 __ mtlr(R3_RET); 2107 __ mr(R3_RET, Rexception); 2108 __ blr(); 2109 } 2110 } 2111 2112 // JVMTI ForceEarlyReturn support. 2113 // Returns "in the middle" of a method with a "fake" return value. 2114 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 2115 2116 Register Rscratch1 = R11_scratch1, 2117 Rscratch2 = R12_scratch2; 2118 2119 address entry = __ pc(); 2120 __ empty_expression_stack(); 2121 2122 __ load_earlyret_value(state, Rscratch1); 2123 2124 __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread); 2125 // Clear the earlyret state. 2126 __ li(R0, 0); 2127 __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1); 2128 2129 __ remove_activation(state, false, false); 2130 // Copied from TemplateTable::_return. 2131 // Restoration of lr done by remove_activation. 2132 switch (state) { 2133 // Narrow result if state is itos but result type is smaller. 2134 case btos: 2135 case ztos: 2136 case ctos: 2137 case stos: 2138 case itos: __ narrow(R17_tos); /* fall through */ 2139 case ltos: 2140 case atos: __ mr(R3_RET, R17_tos); break; 2141 case ftos: 2142 case dtos: __ fmr(F1_RET, F15_ftos); break; 2143 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2144 // to get visible before the reference to the object gets stored anywhere. 2145 __ membar(Assembler::StoreStore); break; 2146 default : ShouldNotReachHere(); 2147 } 2148 __ blr(); 2149 2150 return entry; 2151 } // end of ForceEarlyReturn support 2152 2153 //----------------------------------------------------------------------------- 2154 // Helper for vtos entry point generation 2155 2156 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 2157 address& bep, 2158 address& cep, 2159 address& sep, 2160 address& aep, 2161 address& iep, 2162 address& lep, 2163 address& fep, 2164 address& dep, 2165 address& vep) { 2166 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 2167 Label L; 2168 2169 aep = __ pc(); __ push_ptr(); __ b(L); 2170 fep = __ pc(); __ push_f(); __ b(L); 2171 dep = __ pc(); __ push_d(); __ b(L); 2172 lep = __ pc(); __ push_l(); __ b(L); 2173 __ align(32, 12, 24); // align L 2174 bep = cep = sep = 2175 iep = __ pc(); __ push_i(); 2176 vep = __ pc(); 2177 __ bind(L); 2178 generate_and_dispatch(t); 2179 } 2180 2181 //----------------------------------------------------------------------------- 2182 2183 void TemplateInterpreterGenerator::count_bytecode() { 2184 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true); 2185 __ lwz(R12_scratch2, offs, R11_scratch1); 2186 __ addi(R12_scratch2, R12_scratch2, 1); 2187 __ stw(R12_scratch2, offs, R11_scratch1); 2188 } 2189 2190 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2191 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true); 2192 __ lwz(R12_scratch2, offs, R11_scratch1); 2193 __ addi(R12_scratch2, R12_scratch2, 1); 2194 __ stw(R12_scratch2, offs, R11_scratch1); 2195 } 2196 2197 // Non-product code 2198 #ifndef PRODUCT 2199 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2200 //__ flush_bundle(); 2201 address entry = __ pc(); 2202 2203 const char *bname = nullptr; 2204 uint tsize = 0; 2205 switch(state) { 2206 case ftos: 2207 bname = "trace_code_ftos {"; 2208 tsize = 2; 2209 break; 2210 case btos: 2211 bname = "trace_code_btos {"; 2212 tsize = 2; 2213 break; 2214 case ztos: 2215 bname = "trace_code_ztos {"; 2216 tsize = 2; 2217 break; 2218 case ctos: 2219 bname = "trace_code_ctos {"; 2220 tsize = 2; 2221 break; 2222 case stos: 2223 bname = "trace_code_stos {"; 2224 tsize = 2; 2225 break; 2226 case itos: 2227 bname = "trace_code_itos {"; 2228 tsize = 2; 2229 break; 2230 case ltos: 2231 bname = "trace_code_ltos {"; 2232 tsize = 3; 2233 break; 2234 case atos: 2235 bname = "trace_code_atos {"; 2236 tsize = 2; 2237 break; 2238 case vtos: 2239 // Note: In case of vtos, the topmost of stack value could be a int or doubl 2240 // In case of a double (2 slots) we won't see the 2nd stack value. 2241 // Maybe we simply should print the topmost 3 stack slots to cope with the problem. 2242 bname = "trace_code_vtos {"; 2243 tsize = 2; 2244 2245 break; 2246 case dtos: 2247 bname = "trace_code_dtos {"; 2248 tsize = 3; 2249 break; 2250 default: 2251 ShouldNotReachHere(); 2252 } 2253 BLOCK_COMMENT(bname); 2254 2255 // Support short-cut for TraceBytecodesAt. 2256 // Don't call into the VM if we don't want to trace to speed up things. 2257 Label Lskip_vm_call; 2258 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 2259 int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true); 2260 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 2261 __ ld(R11_scratch1, offs1, R11_scratch1); 2262 __ lwa(R12_scratch2, offs2, R12_scratch2); 2263 __ cmpd(CCR0, R12_scratch2, R11_scratch1); 2264 __ blt(CCR0, Lskip_vm_call); 2265 } 2266 2267 __ push(state); 2268 // Load 2 topmost expression stack values. 2269 __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp); 2270 __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp); 2271 __ mflr(R31); 2272 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false); 2273 __ mtlr(R31); 2274 __ pop(state); 2275 2276 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 2277 __ bind(Lskip_vm_call); 2278 } 2279 __ blr(); 2280 BLOCK_COMMENT("} trace_code"); 2281 return entry; 2282 } 2283 2284 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2285 const Register addr = R11_scratch1, 2286 tmp = R12_scratch2; 2287 // Get index, shift out old bytecode, bring in new bytecode, and store it. 2288 // _index = (_index >> log2_number_of_codes) | 2289 // (bytecode << log2_number_of_codes); 2290 int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true); 2291 __ lwz(tmp, offs1, addr); 2292 __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes); 2293 __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 2294 __ stw(tmp, offs1, addr); 2295 2296 // Bump bucket contents. 2297 // _counters[_index] ++; 2298 int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true); 2299 __ sldi(tmp, tmp, LogBytesPerInt); 2300 __ add(addr, tmp, addr); 2301 __ lwz(tmp, offs2, addr); 2302 __ addi(tmp, tmp, 1); 2303 __ stw(tmp, offs2, addr); 2304 } 2305 2306 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2307 // Call a little run-time stub to avoid blow-up for each bytecode. 2308 // The run-time runtime saves the right registers, depending on 2309 // the tosca in-state for the given template. 2310 2311 assert(Interpreter::trace_code(t->tos_in()) != nullptr, 2312 "entry must have been generated"); 2313 2314 // Note: we destroy LR here. 2315 __ bl(Interpreter::trace_code(t->tos_in())); 2316 } 2317 2318 void TemplateInterpreterGenerator::stop_interpreter_at() { 2319 Label L; 2320 int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true); 2321 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 2322 __ ld(R11_scratch1, offs1, R11_scratch1); 2323 __ lwa(R12_scratch2, offs2, R12_scratch2); 2324 __ cmpd(CCR0, R12_scratch2, R11_scratch1); 2325 __ bne(CCR0, L); 2326 __ illtrap(); 2327 __ bind(L); 2328 } 2329 2330 #endif // !PRODUCT