1 /* 2 * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "classfile/javaClasses.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/bytecodeHistogram.hpp" 32 #include "interpreter/bytecodeTracer.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "interpreter/interpreterRuntime.hpp" 36 #include "interpreter/templateInterpreterGenerator.hpp" 37 #include "interpreter/templateTable.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/arrayOop.hpp" 40 #include "oops/method.inline.hpp" 41 #include "oops/methodData.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "oops/resolvedIndyEntry.hpp" 44 #include "oops/resolvedMethodEntry.hpp" 45 #include "prims/jvmtiExport.hpp" 46 #include "prims/jvmtiThreadState.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/deoptimization.hpp" 49 #include "runtime/frame.inline.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/jniHandles.hpp" 52 #include "runtime/sharedRuntime.hpp" 53 #include "runtime/stubRoutines.hpp" 54 #include "runtime/synchronizer.hpp" 55 #include "runtime/timer.hpp" 56 #include "runtime/vframeArray.hpp" 57 #include "utilities/checkedCast.hpp" 58 #include "utilities/debug.hpp" 59 #include "utilities/powerOfTwo.hpp" 60 #include <sys/types.h> 61 62 #ifndef PRODUCT 63 #include "oops/method.hpp" 64 #endif // !PRODUCT 65 66 // Size of interpreter code. Increase if too small. Interpreter will 67 // fail with a guarantee ("not enough space for interpreter generation"); 68 // if too small. 69 // Run with +PrintInterpreter to get the VM to print out the size. 70 // Max size with JVMTI 71 int TemplateInterpreter::InterpreterCodeSize = 256 * 1024; 72 73 #define __ _masm-> 74 75 //----------------------------------------------------------------------------- 76 77 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 78 address entry = __ pc(); 79 80 __ andi(esp, esp, -16); 81 __ mv(c_rarg3, esp); 82 // xmethod 83 // xlocals 84 // c_rarg3: first stack arg - wordSize 85 // adjust sp 86 87 __ addi(sp, c_rarg3, -18 * wordSize); 88 __ addi(sp, sp, -2 * wordSize); 89 __ sd(ra, Address(sp, 0)); 90 91 __ call_VM(noreg, 92 CAST_FROM_FN_PTR(address, 93 InterpreterRuntime::slow_signature_handler), 94 xmethod, xlocals, c_rarg3); 95 96 // x10: result handler 97 98 // Stack layout: 99 // sp: return address <- sp 100 // 1 garbage 101 // 8 integer args (if static first is unused) 102 // 1 float/double identifiers 103 // 8 double args 104 // stack args <- esp 105 // garbage 106 // expression stack bottom 107 // bcp (null) 108 // ... 109 110 // Restore ra 111 __ ld(ra, Address(sp, 0)); 112 __ addi(sp, sp , 2 * wordSize); 113 114 // Do FP first so we can use c_rarg3 as temp 115 __ lwu(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers 116 117 for (int i = 0; i < Argument::n_float_register_parameters_c; i++) { 118 const FloatRegister r = g_FPArgReg[i]; 119 Label d, done; 120 121 __ test_bit(t0, c_rarg3, i); 122 __ bnez(t0, d); 123 __ flw(r, Address(sp, (10 + i) * wordSize)); 124 __ j(done); 125 __ bind(d); 126 __ fld(r, Address(sp, (10 + i) * wordSize)); 127 __ bind(done); 128 } 129 130 // c_rarg0 contains the result from the call of 131 // InterpreterRuntime::slow_signature_handler so we don't touch it 132 // here. It will be loaded with the JNIEnv* later. 133 for (int i = 1; i < Argument::n_int_register_parameters_c; i++) { 134 const Register rm = g_INTArgReg[i]; 135 __ ld(rm, Address(sp, i * wordSize)); 136 } 137 138 __ addi(sp, sp, 18 * wordSize); 139 __ ret(); 140 141 return entry; 142 } 143 144 // Various method entries 145 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 146 // xmethod: Method* 147 // x19_sender_sp: sender sp 148 // esp: args 149 150 // These don't need a safepoint check because they aren't virtually 151 // callable. We won't enter these intrinsics from compiled code. 152 // If in the future we added an intrinsic which was virtually callable 153 // we'd have to worry about how to safepoint so that this code is used. 154 155 // mathematical functions inlined by compiler 156 // (interpreter must provide identical implementation 157 // in order to avoid monotonicity bugs when switching 158 // from interpreter to compiler in the middle of some 159 // computation) 160 // 161 // stack: 162 // [ arg ] <-- esp 163 // [ arg ] 164 // retaddr in ra 165 166 address fn = nullptr; 167 address entry_point = nullptr; 168 Register continuation = ra; 169 switch (kind) { 170 case Interpreter::java_lang_math_abs: 171 entry_point = __ pc(); 172 __ fld(f10, Address(esp)); 173 __ fabs_d(f10, f10); 174 __ mv(sp, x19_sender_sp); // Restore caller's SP 175 break; 176 case Interpreter::java_lang_math_sqrt: 177 entry_point = __ pc(); 178 __ fld(f10, Address(esp)); 179 __ fsqrt_d(f10, f10); 180 __ mv(sp, x19_sender_sp); 181 break; 182 case Interpreter::java_lang_math_sin : 183 entry_point = __ pc(); 184 __ fld(f10, Address(esp)); 185 __ mv(sp, x19_sender_sp); 186 __ mv(x9, ra); 187 continuation = x9; // The first callee-saved register 188 if (StubRoutines::dsin() == nullptr) { 189 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 190 } else { 191 fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin()); 192 } 193 __ call(fn); 194 break; 195 case Interpreter::java_lang_math_cos : 196 entry_point = __ pc(); 197 __ fld(f10, Address(esp)); 198 __ mv(sp, x19_sender_sp); 199 __ mv(x9, ra); 200 continuation = x9; // The first callee-saved register 201 if (StubRoutines::dcos() == nullptr) { 202 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 203 } else { 204 fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos()); 205 } 206 __ call(fn); 207 break; 208 case Interpreter::java_lang_math_tan : 209 entry_point = __ pc(); 210 __ fld(f10, Address(esp)); 211 __ mv(sp, x19_sender_sp); 212 __ mv(x9, ra); 213 continuation = x9; // The first callee-saved register 214 if (StubRoutines::dtan() == nullptr) { 215 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 216 } else { 217 fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan()); 218 } 219 __ call(fn); 220 break; 221 case Interpreter::java_lang_math_log : 222 entry_point = __ pc(); 223 __ fld(f10, Address(esp)); 224 __ mv(sp, x19_sender_sp); 225 __ mv(x9, ra); 226 continuation = x9; // The first callee-saved register 227 if (StubRoutines::dlog() == nullptr) { 228 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 229 } else { 230 fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog()); 231 } 232 __ call(fn); 233 break; 234 case Interpreter::java_lang_math_log10 : 235 entry_point = __ pc(); 236 __ fld(f10, Address(esp)); 237 __ mv(sp, x19_sender_sp); 238 __ mv(x9, ra); 239 continuation = x9; // The first callee-saved register 240 if (StubRoutines::dlog10() == nullptr) { 241 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 242 } else { 243 fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10()); 244 } 245 __ call(fn); 246 break; 247 case Interpreter::java_lang_math_exp : 248 entry_point = __ pc(); 249 __ fld(f10, Address(esp)); 250 __ mv(sp, x19_sender_sp); 251 __ mv(x9, ra); 252 continuation = x9; // The first callee-saved register 253 if (StubRoutines::dexp() == nullptr) { 254 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 255 } else { 256 fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp()); 257 } 258 __ call(fn); 259 break; 260 case Interpreter::java_lang_math_pow : 261 entry_point = __ pc(); 262 __ mv(x9, ra); 263 continuation = x9; 264 __ fld(f10, Address(esp, 2 * Interpreter::stackElementSize)); 265 __ fld(f11, Address(esp)); 266 __ mv(sp, x19_sender_sp); 267 if (StubRoutines::dpow() == nullptr) { 268 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 269 } else { 270 fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow()); 271 } 272 __ call(fn); 273 break; 274 case Interpreter::java_lang_math_fmaD : 275 if (UseFMA) { 276 entry_point = __ pc(); 277 __ fld(f10, Address(esp, 4 * Interpreter::stackElementSize)); 278 __ fld(f11, Address(esp, 2 * Interpreter::stackElementSize)); 279 __ fld(f12, Address(esp)); 280 __ fmadd_d(f10, f10, f11, f12); 281 __ mv(sp, x19_sender_sp); // Restore caller's SP 282 } 283 break; 284 case Interpreter::java_lang_math_fmaF : 285 if (UseFMA) { 286 entry_point = __ pc(); 287 __ flw(f10, Address(esp, 2 * Interpreter::stackElementSize)); 288 __ flw(f11, Address(esp, Interpreter::stackElementSize)); 289 __ flw(f12, Address(esp)); 290 __ fmadd_s(f10, f10, f11, f12); 291 __ mv(sp, x19_sender_sp); // Restore caller's SP 292 } 293 break; 294 default: 295 ; 296 } 297 if (entry_point != nullptr) { 298 __ jr(continuation); 299 } 300 301 return entry_point; 302 } 303 304 // Abstract method entry 305 // Attempt to execute abstract method. Throw exception 306 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 307 // xmethod: Method* 308 // x19_sender_sp: sender SP 309 310 address entry_point = __ pc(); 311 312 // abstract method entry 313 314 // pop return address, reset last_sp to null 315 __ empty_expression_stack(); 316 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) 317 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 318 319 // throw exception 320 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 321 InterpreterRuntime::throw_AbstractMethodErrorWithMethod), 322 xmethod); 323 // the call_VM checks for exception, so we should never return here. 324 __ should_not_reach_here(); 325 326 return entry_point; 327 } 328 329 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 330 address entry = __ pc(); 331 332 #ifdef ASSERT 333 { 334 Label L; 335 __ ld(t0, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 336 __ shadd(t0, t0, fp, t0, LogBytesPerWord); 337 // maximal sp for current fp (stack grows negative) 338 // check if frame is complete 339 __ bge(t0, sp, L); 340 __ stop ("interpreter frame not set up"); 341 __ bind(L); 342 } 343 #endif // ASSERT 344 // Restore bcp under the assumption that the current frame is still 345 // interpreted 346 __ restore_bcp(); 347 348 // expression stack must be empty before entering the VM if an 349 // exception happened 350 __ empty_expression_stack(); 351 // throw exception 352 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 353 return entry; 354 } 355 356 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 357 address entry = __ pc(); 358 // expression stack must be empty before entering the VM if an 359 // exception happened 360 __ empty_expression_stack(); 361 // setup parameters 362 363 // convention: expect aberrant index in register x11 364 __ zero_extend(c_rarg2, x11, 32); 365 // convention: expect array in register x13 366 __ mv(c_rarg1, x13); 367 __ call_VM(noreg, 368 CAST_FROM_FN_PTR(address, 369 InterpreterRuntime:: 370 throw_ArrayIndexOutOfBoundsException), 371 c_rarg1, c_rarg2); 372 return entry; 373 } 374 375 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 376 address entry = __ pc(); 377 378 // object is at TOS 379 __ pop_reg(c_rarg1); 380 381 // expression stack must be empty before entering the VM if an 382 // exception happened 383 __ empty_expression_stack(); 384 385 __ call_VM(noreg, 386 CAST_FROM_FN_PTR(address, 387 InterpreterRuntime:: 388 throw_ClassCastException), 389 c_rarg1); 390 return entry; 391 } 392 393 address TemplateInterpreterGenerator::generate_exception_handler_common( 394 const char* name, const char* message, bool pass_oop) { 395 assert(!pass_oop || message == nullptr, "either oop or message but not both"); 396 address entry = __ pc(); 397 if (pass_oop) { 398 // object is at TOS 399 __ pop_reg(c_rarg2); 400 } 401 // expression stack must be empty before entering the VM if an 402 // exception happened 403 __ empty_expression_stack(); 404 // setup parameters 405 __ la(c_rarg1, Address((address)name)); 406 if (pass_oop) { 407 __ call_VM(x10, CAST_FROM_FN_PTR(address, 408 InterpreterRuntime:: 409 create_klass_exception), 410 c_rarg1, c_rarg2); 411 } else { 412 // kind of lame ExternalAddress can't take null because 413 // external_word_Relocation will assert. 414 if (message != nullptr) { 415 __ la(c_rarg2, Address((address)message)); 416 } else { 417 __ mv(c_rarg2, NULL_WORD); 418 } 419 __ call_VM(x10, 420 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 421 c_rarg1, c_rarg2); 422 } 423 // throw exception 424 __ j(address(Interpreter::throw_exception_entry())); 425 return entry; 426 } 427 428 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 429 address entry = __ pc(); 430 431 // Restore stack bottom in case i2c adjusted stack 432 __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 433 __ shadd(esp, t0, fp, t0, LogBytesPerWord); 434 // and null it as marker that esp is now tos until next java call 435 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 436 __ restore_bcp(); 437 __ restore_locals(); 438 __ restore_constant_pool_cache(); 439 __ get_method(xmethod); 440 441 if (state == atos) { 442 Register obj = x10; 443 Register mdp = x11; 444 Register tmp = x12; 445 __ ld(mdp, Address(xmethod, Method::method_data_offset())); 446 __ profile_return_type(mdp, obj, tmp); 447 } 448 449 const Register cache = x11; 450 const Register index = x12; 451 452 if (index_size == sizeof(u4)) { 453 __ load_resolved_indy_entry(cache, index); 454 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset()))); 455 __ shadd(esp, cache, esp, t0, 3); 456 } else { 457 // Pop N words from the stack 458 assert(index_size == sizeof(u2), "Can only be u2"); 459 __ load_method_entry(cache, index); 460 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset()))); 461 462 __ shadd(esp, cache, esp, t0, 3); 463 } 464 465 // Restore machine SP 466 __ restore_sp_after_call(); 467 468 __ check_and_handle_popframe(xthread); 469 __ check_and_handle_earlyret(xthread); 470 471 __ get_dispatch(); 472 __ dispatch_next(state, step); 473 474 return entry; 475 } 476 477 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 478 int step, 479 address continuation) { 480 address entry = __ pc(); 481 __ restore_bcp(); 482 __ restore_locals(); 483 __ restore_constant_pool_cache(); 484 __ get_method(xmethod); 485 __ get_dispatch(); 486 487 __ restore_sp_after_call(); // Restore SP to extended SP 488 489 // Restore expression stack pointer 490 __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 491 __ shadd(esp, t0, fp, t0, LogBytesPerWord); 492 // null last_sp until next java call 493 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 494 495 // handle exceptions 496 { 497 Label L; 498 __ ld(t0, Address(xthread, Thread::pending_exception_offset())); 499 __ beqz(t0, L); 500 __ call_VM(noreg, 501 CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 502 __ should_not_reach_here(); 503 __ bind(L); 504 } 505 506 if (continuation == nullptr) { 507 __ dispatch_next(state, step); 508 } else { 509 __ jump_to_entry(continuation); 510 } 511 return entry; 512 } 513 514 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 515 address entry = __ pc(); 516 if (type == T_OBJECT) { 517 // retrieve result from frame 518 __ ld(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); 519 // and verify it 520 __ verify_oop(x10); 521 } else { 522 __ cast_primitive_type(type, x10); 523 } 524 525 __ ret(); // return from result handler 526 return entry; 527 } 528 529 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, 530 address runtime_entry) { 531 assert_cond(runtime_entry != nullptr); 532 address entry = __ pc(); 533 __ push(state); 534 __ push_cont_fastpath(xthread); 535 __ call_VM(noreg, runtime_entry); 536 __ pop_cont_fastpath(xthread); 537 __ membar(MacroAssembler::AnyAny); 538 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 539 return entry; 540 } 541 542 // Helpers for commoning out cases in the various type of method entries. 543 // 544 545 546 // increment invocation count & check for overflow 547 // 548 // Note: checking for negative value instead of overflow 549 // so we have a 'sticky' overflow test 550 // 551 // xmethod: method 552 // 553 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { 554 Label done; 555 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 556 int increment = InvocationCounter::count_increment; 557 Label no_mdo; 558 if (ProfileInterpreter) { 559 // Are we profiling? 560 __ ld(x10, Address(xmethod, Method::method_data_offset())); 561 __ beqz(x10, no_mdo); 562 // Increment counter in the MDO 563 const Address mdo_invocation_counter(x10, in_bytes(MethodData::invocation_counter_offset()) + 564 in_bytes(InvocationCounter::counter_offset())); 565 const Address mask(x10, in_bytes(MethodData::invoke_mask_offset())); 566 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, t0, t1, false, overflow); 567 __ j(done); 568 } 569 __ bind(no_mdo); 570 // Increment counter in MethodCounters 571 const Address invocation_counter(t1, 572 MethodCounters::invocation_counter_offset() + 573 InvocationCounter::counter_offset()); 574 __ get_method_counters(xmethod, t1, done); 575 const Address mask(t1, in_bytes(MethodCounters::invoke_mask_offset())); 576 __ increment_mask_and_jump(invocation_counter, increment, mask, t0, x11, false, overflow); 577 __ bind(done); 578 } 579 580 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 581 __ mv(c_rarg1, zr); 582 __ call_VM(noreg, 583 CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), c_rarg1); 584 __ j(do_continue); 585 } 586 587 // See if we've got enough room on the stack for locals plus overhead 588 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 589 // without going through the signal handler, i.e., reserved and yellow zones 590 // will not be made usable. The shadow zone must suffice to handle the 591 // overflow. 592 // The expression stack grows down incrementally, so the normal guard 593 // page mechanism will work for that. 594 // 595 // NOTE: Since the additional locals are also always pushed (wasn't 596 // obvious in generate_method_entry) so the guard should work for them 597 // too. 598 // 599 // Args: 600 // x13: number of additional locals this frame needs (what we must check) 601 // xmethod: Method* 602 // 603 // Kills: 604 // x10 605 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 606 607 // monitor entry size: see picture of stack set 608 // (generate_method_entry) and frame_amd64.hpp 609 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 610 611 // total overhead size: entry_size + (saved fp through expr stack 612 // bottom). be sure to change this if you add/subtract anything 613 // to/from the overhead area 614 const int overhead_size = 615 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 616 617 const int page_size = (int)os::vm_page_size(); 618 619 Label after_frame_check; 620 621 // see if the frame is greater than one page in size. If so, 622 // then we need to verify there is enough stack space remaining 623 // for the additional locals. 624 __ mv(t0, (page_size - overhead_size) / Interpreter::stackElementSize); 625 __ bleu(x13, t0, after_frame_check); 626 627 // compute sp as if this were going to be the last frame on 628 // the stack before the red zone 629 630 // locals + overhead, in bytes 631 __ mv(x10, overhead_size); 632 __ shadd(x10, x13, x10, t0, Interpreter::logStackElementSize); // 2 slots per parameter. 633 634 const Address stack_limit(xthread, JavaThread::stack_overflow_limit_offset()); 635 __ ld(t0, stack_limit); 636 637 #ifdef ASSERT 638 Label limit_okay; 639 // Verify that thread stack limit is non-zero. 640 __ bnez(t0, limit_okay); 641 __ stop("stack overflow limit is zero"); 642 __ bind(limit_okay); 643 #endif 644 645 // Add stack limit to locals. 646 __ add(x10, x10, t0); 647 648 // Check against the current stack bottom. 649 __ bgtu(sp, x10, after_frame_check); 650 651 // Remove the incoming args, peeling the machine SP back to where it 652 // was in the caller. This is not strictly necessary, but unless we 653 // do so the stack frame may have a garbage FP; this ensures a 654 // correct call stack that we can always unwind. The ANDI should be 655 // unnecessary because the sender SP in x19 is always aligned, but 656 // it doesn't hurt. 657 __ andi(sp, x19_sender_sp, -16); 658 659 // Note: the restored frame is not necessarily interpreted. 660 // Use the shared runtime version of the StackOverflowError. 661 assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); 662 __ far_jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); 663 664 // all done with frame size check 665 __ bind(after_frame_check); 666 } 667 668 // Allocate monitor and lock method (asm interpreter) 669 // 670 // Args: 671 // xmethod: Method* 672 // xlocals: locals 673 // 674 // Kills: 675 // x10 676 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 677 // t0, t1 (temporary regs) 678 void TemplateInterpreterGenerator::lock_method() { 679 // synchronize method 680 const Address access_flags(xmethod, Method::access_flags_offset()); 681 const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 682 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 683 684 #ifdef ASSERT 685 __ lwu(x10, access_flags); 686 __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method doesn't need synchronization", false); 687 #endif // ASSERT 688 689 // get synchronization object 690 { 691 Label done; 692 __ lwu(x10, access_flags); 693 __ andi(t0, x10, JVM_ACC_STATIC); 694 // get receiver (assume this is frequent case) 695 __ ld(x10, Address(xlocals, Interpreter::local_offset_in_bytes(0))); 696 __ beqz(t0, done); 697 __ load_mirror(x10, xmethod, x15, t1); 698 699 #ifdef ASSERT 700 { 701 Label L; 702 __ bnez(x10, L); 703 __ stop("synchronization object is null"); 704 __ bind(L); 705 } 706 #endif // ASSERT 707 708 __ bind(done); 709 } 710 711 // add space for monitor & lock 712 __ check_extended_sp(); 713 __ add(sp, sp, - entry_size); // add space for a monitor entry 714 __ add(esp, esp, - entry_size); 715 __ sub(t0, sp, fp); 716 __ srai(t0, t0, Interpreter::logStackElementSize); 717 __ sd(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize)); 718 __ sub(t0, esp, fp); 719 __ srai(t0, t0, Interpreter::logStackElementSize); 720 __ sd(t0, monitor_block_top); // set new monitor block top 721 // store object 722 __ sd(x10, Address(esp, BasicObjectLock::obj_offset())); 723 __ mv(c_rarg1, esp); // object address 724 __ lock_object(c_rarg1); 725 } 726 727 // Generate a fixed interpreter frame. This is identical setup for 728 // interpreted methods and for native methods hence the shared code. 729 // 730 // Args: 731 // ra: return address 732 // xmethod: Method* 733 // xlocals: pointer to locals 734 // xcpool: cp cache 735 // stack_pointer: previous sp 736 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 737 // initialize fixed part of activation frame 738 if (native_call) { 739 __ add(esp, sp, - 14 * wordSize); 740 __ mv(xbcp, zr); 741 __ add(sp, sp, - 14 * wordSize); 742 // add 2 zero-initialized slots for native calls 743 __ sd(zr, Address(sp, 13 * wordSize)); 744 __ sd(zr, Address(sp, 12 * wordSize)); 745 } else { 746 __ add(esp, sp, - 12 * wordSize); 747 __ ld(t0, Address(xmethod, Method::const_offset())); // get ConstMethod 748 __ add(xbcp, t0, in_bytes(ConstMethod::codes_offset())); // get codebase 749 __ add(sp, sp, - 12 * wordSize); 750 } 751 __ sd(xbcp, Address(sp, wordSize)); 752 __ mv(t0, frame::interpreter_frame_initial_sp_offset); 753 __ sd(t0, Address(sp, 0)); 754 755 if (ProfileInterpreter) { 756 Label method_data_continue; 757 __ ld(t0, Address(xmethod, Method::method_data_offset())); 758 __ beqz(t0, method_data_continue); 759 __ la(t0, Address(t0, in_bytes(MethodData::data_offset()))); 760 __ bind(method_data_continue); 761 } 762 763 __ sd(xmethod, Address(sp, 7 * wordSize)); 764 __ sd(ProfileInterpreter ? t0 : zr, Address(sp, 6 * wordSize)); 765 766 __ sd(ra, Address(sp, 11 * wordSize)); 767 __ sd(fp, Address(sp, 10 * wordSize)); 768 __ la(fp, Address(sp, 12 * wordSize)); // include ra & fp 769 770 __ ld(xcpool, Address(xmethod, Method::const_offset())); 771 __ ld(xcpool, Address(xcpool, ConstMethod::constants_offset())); 772 __ ld(xcpool, Address(xcpool, ConstantPool::cache_offset())); 773 __ sd(xcpool, Address(sp, 3 * wordSize)); 774 __ sub(t0, xlocals, fp); 775 __ srai(t0, t0, Interpreter::logStackElementSize); // t0 = xlocals - fp(); 776 // Store relativized xlocals, see frame::interpreter_frame_locals(). 777 __ sd(t0, Address(sp, 2 * wordSize)); 778 779 // set sender sp 780 // leave last_sp as null 781 __ sd(x19_sender_sp, Address(sp, 9 * wordSize)); 782 __ sd(zr, Address(sp, 8 * wordSize)); 783 784 // Get mirror and store it in the frame as GC root for this Method* 785 __ load_mirror(t2, xmethod, x15, t1); 786 __ sd(t2, Address(sp, 4 * wordSize)); 787 788 if (!native_call) { 789 __ ld(t0, Address(xmethod, Method::const_offset())); 790 __ lhu(t0, Address(t0, ConstMethod::max_stack_offset())); 791 __ add(t0, t0, MAX2(3, Method::extra_stack_entries())); 792 __ slli(t0, t0, 3); 793 __ sub(t0, sp, t0); 794 __ andi(t0, t0, -16); 795 __ sub(t1, t0, fp); 796 __ srai(t1, t1, Interpreter::logStackElementSize); 797 // Store extended SP 798 __ sd(t1, Address(sp, 5 * wordSize)); 799 // Move SP out of the way 800 __ mv(sp, t0); 801 } else { 802 // Make sure there is room for the exception oop pushed in case method throws 803 // an exception (see TemplateInterpreterGenerator::generate_throw_exception()) 804 __ sub(t0, sp, 2 * wordSize); 805 __ sub(t1, t0, fp); 806 __ srai(t1, t1, Interpreter::logStackElementSize); 807 __ sd(t1, Address(sp, 5 * wordSize)); 808 __ mv(sp, t0); 809 } 810 } 811 812 // End of helpers 813 814 // Various method entries 815 //------------------------------------------------------------------------------------------------------------------------ 816 // 817 // 818 819 // Method entry for java.lang.ref.Reference.get. 820 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 821 // Code: _aload_0, _getfield, _areturn 822 // parameter size = 1 823 // 824 // The code that gets generated by this routine is split into 2 parts: 825 // 1. The "intrinsified" code for G1 (or any SATB based GC), 826 // 2. The slow path - which is an expansion of the regular method entry. 827 // 828 // Notes:- 829 // * In the G1 code we do not check whether we need to block for 830 // a safepoint. If G1 is enabled then we must execute the specialized 831 // code for Reference.get (except when the Reference object is null) 832 // so that we can log the value in the referent field with an SATB 833 // update buffer. 834 // If the code for the getfield template is modified so that the 835 // G1 pre-barrier code is executed when the current method is 836 // Reference.get() then going through the normal method entry 837 // will be fine. 838 // * The G1 code can, however, check the receiver object (the instance 839 // of java.lang.Reference) and jump to the slow path if null. If the 840 // Reference object is null then we obviously cannot fetch the referent 841 // and so we don't need to call the G1 pre-barrier. Thus we can use the 842 // regular method entry code to generate the NPE. 843 // 844 // This code is based on generate_accessor_entry. 845 // 846 // xmethod: Method* 847 // x19_sender_sp: senderSP must preserve for slow path, set SP to it on fast path 848 849 // ra is live. It must be saved around calls. 850 851 address entry = __ pc(); 852 853 const int referent_offset = java_lang_ref_Reference::referent_offset(); 854 guarantee(referent_offset > 0, "referent offset not initialized"); 855 856 Label slow_path; 857 const Register local_0 = c_rarg0; 858 // Check if local 0 isn't null 859 // If the receiver is null then it is OK to jump to the slow path. 860 __ ld(local_0, Address(esp, 0)); 861 __ beqz(local_0, slow_path); 862 863 // Load the value of the referent field. 864 const Address field_address(local_0, referent_offset); 865 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 866 bs->load_at(_masm, IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ t0, /*tmp2*/ t1); 867 868 // areturn 869 __ andi(sp, x19_sender_sp, -16); // done with stack 870 __ ret(); 871 872 // generate a vanilla interpreter entry as the slow path 873 __ bind(slow_path); 874 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 875 return entry; 876 } 877 878 /** 879 * Method entry for static native methods: 880 * int java.util.zip.CRC32.update(int crc, int b) 881 */ 882 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 883 // TODO: Unimplemented generate_CRC32_update_entry 884 return nullptr; 885 } 886 887 /** 888 * Method entry for static native methods: 889 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 890 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 891 */ 892 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 893 // TODO: Unimplemented generate_CRC32_updateBytes_entry 894 return nullptr; 895 } 896 897 /** 898 * Method entry for intrinsic-candidate (non-native) methods: 899 * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end) 900 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end) 901 * Unlike CRC32, CRC32C does not have any methods marked as native 902 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses 903 */ 904 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 905 // TODO: Unimplemented generate_CRC32C_updateBytes_entry 906 return nullptr; 907 } 908 909 // Not supported 910 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; } 911 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; } 912 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; } 913 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; } 914 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; } 915 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; } 916 917 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 918 // See more discussion in stackOverflow.hpp. 919 920 const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size()); 921 const int page_size = (int)os::vm_page_size(); 922 const int n_shadow_pages = shadow_zone_size / page_size; 923 924 #ifdef ASSERT 925 Label L_good_limit; 926 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit())); 927 __ bnez(t0, L_good_limit); 928 __ stop("shadow zone safe limit is not initialized"); 929 __ bind(L_good_limit); 930 931 Label L_good_watermark; 932 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark())); 933 __ bnez(t0, L_good_watermark); 934 __ stop("shadow zone growth watermark is not initialized"); 935 __ bind(L_good_watermark); 936 #endif 937 938 Label L_done; 939 940 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark())); 941 __ bgtu(sp, t0, L_done); 942 943 for (int p = 1; p <= n_shadow_pages; p++) { 944 __ bang_stack_with_offset(p * page_size); 945 } 946 947 // Record the new watermark, but only if the update is above the safe limit. 948 // Otherwise, the next time around the check above would pass the safe limit. 949 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit())); 950 __ bleu(sp, t0, L_done); 951 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark())); 952 953 __ bind(L_done); 954 } 955 956 // Interpreter stub for calling a native method. (asm interpreter) 957 // This sets up a somewhat different looking stack for calling the 958 // native method than the typical interpreter frame setup. 959 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 960 // determine code generation flags 961 bool inc_counter = UseCompiler || CountCompiledCalls; 962 963 // x11: Method* 964 // x30: sender sp 965 966 address entry_point = __ pc(); 967 968 const Address constMethod (xmethod, Method::const_offset()); 969 const Address access_flags (xmethod, Method::access_flags_offset()); 970 const Address size_of_parameters(x12, ConstMethod:: 971 size_of_parameters_offset()); 972 973 // get parameter size (always needed) 974 __ ld(x12, constMethod); 975 __ load_unsigned_short(x12, size_of_parameters); 976 977 // Native calls don't need the stack size check since they have no 978 // expression stack and the arguments are already on the stack and 979 // we only add a handful of words to the stack. 980 981 // xmethod: Method* 982 // x12: size of parameters 983 // x30: sender sp 984 985 // for natives the size of locals is zero 986 987 // compute beginning of parameters (xlocals) 988 __ shadd(xlocals, x12, esp, xlocals, 3); 989 __ addi(xlocals, xlocals, -wordSize); 990 991 // Pull SP back to minimum size: this avoids holes in the stack 992 __ andi(sp, esp, -16); 993 994 // initialize fixed part of activation frame 995 generate_fixed_frame(true); 996 997 // make sure method is native & not abstract 998 #ifdef ASSERT 999 __ lwu(x10, access_flags); 1000 __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute non-native method as native", false); 1001 __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter"); 1002 #endif 1003 1004 // Since at this point in the method invocation the exception 1005 // handler would try to exit the monitor of synchronized methods 1006 // which hasn't been entered yet, we set the thread local variable 1007 // _do_not_unlock_if_synchronized to true. The remove_activation 1008 // will check this flag. 1009 1010 const Address do_not_unlock_if_synchronized(xthread, 1011 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1012 __ mv(t1, true); 1013 __ sb(t1, do_not_unlock_if_synchronized); 1014 1015 // increment invocation count & check for overflow 1016 Label invocation_counter_overflow; 1017 if (inc_counter) { 1018 generate_counter_incr(&invocation_counter_overflow); 1019 } 1020 1021 Label continue_after_compile; 1022 __ bind(continue_after_compile); 1023 1024 bang_stack_shadow_pages(true); 1025 1026 // reset the _do_not_unlock_if_synchronized flag 1027 __ sb(zr, do_not_unlock_if_synchronized); 1028 1029 // check for synchronized methods 1030 // Must happen AFTER invocation_counter check and stack overflow check, 1031 // so method is not locked if overflows. 1032 if (synchronized) { 1033 lock_method(); 1034 } else { 1035 // no synchronization necessary 1036 #ifdef ASSERT 1037 __ lwu(x10, access_flags); 1038 __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization"); 1039 #endif 1040 } 1041 1042 // start execution 1043 #ifdef ASSERT 1044 __ verify_frame_setup(); 1045 #endif 1046 1047 // jvmti support 1048 __ notify_method_entry(); 1049 1050 // work registers 1051 const Register t = x18; 1052 const Register result_handler = x19; 1053 1054 // allocate space for parameters 1055 __ ld(t, Address(xmethod, Method::const_offset())); 1056 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1057 1058 __ slli(t, t, Interpreter::logStackElementSize); 1059 __ sub(x30, esp, t); 1060 __ andi(sp, x30, -16); 1061 __ mv(esp, x30); 1062 1063 // get signature handler 1064 { 1065 Label L; 1066 __ ld(t, Address(xmethod, Method::signature_handler_offset())); 1067 __ bnez(t, L); 1068 __ call_VM(noreg, 1069 CAST_FROM_FN_PTR(address, 1070 InterpreterRuntime::prepare_native_call), 1071 xmethod); 1072 __ ld(t, Address(xmethod, Method::signature_handler_offset())); 1073 __ bind(L); 1074 } 1075 1076 // call signature handler 1077 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == xlocals, 1078 "adjust this code"); 1079 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp, 1080 "adjust this code"); 1081 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t0, 1082 "adjust this code"); 1083 1084 // The generated handlers do not touch xmethod (the method). 1085 // However, large signatures cannot be cached and are generated 1086 // each time here. The slow-path generator can do a GC on return, 1087 // so we must reload it after the call. 1088 __ jalr(t); 1089 __ get_method(xmethod); // slow path can do a GC, reload xmethod 1090 1091 1092 // result handler is in x10 1093 // set result handler 1094 __ mv(result_handler, x10); 1095 // pass mirror handle if static call 1096 { 1097 Label L; 1098 __ lwu(t, Address(xmethod, Method::access_flags_offset())); 1099 __ test_bit(t0, t, exact_log2(JVM_ACC_STATIC)); 1100 __ beqz(t0, L); 1101 // get mirror 1102 __ load_mirror(t, xmethod, x28, t1); 1103 // copy mirror into activation frame 1104 __ sd(t, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1105 // pass handle to mirror 1106 __ addi(c_rarg1, fp, frame::interpreter_frame_oop_temp_offset * wordSize); 1107 __ bind(L); 1108 } 1109 1110 // get native function entry point in x28 1111 { 1112 Label L; 1113 __ ld(x28, Address(xmethod, Method::native_function_offset())); 1114 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1115 __ la(t, unsatisfied); 1116 __ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8 1117 1118 __ bne(x28, t1, L); 1119 __ call_VM(noreg, 1120 CAST_FROM_FN_PTR(address, 1121 InterpreterRuntime::prepare_native_call), 1122 xmethod); 1123 __ get_method(xmethod); 1124 __ ld(x28, Address(xmethod, Method::native_function_offset())); 1125 __ bind(L); 1126 } 1127 1128 // pass JNIEnv 1129 __ add(c_rarg0, xthread, in_bytes(JavaThread::jni_environment_offset())); 1130 1131 // It is enough that the pc() points into the right code 1132 // segment. It does not have to be the correct return pc. 1133 Label native_return; 1134 __ set_last_Java_frame(esp, fp, native_return, x30); 1135 1136 // change thread state 1137 #ifdef ASSERT 1138 { 1139 Label L; 1140 __ lwu(t, Address(xthread, JavaThread::thread_state_offset())); 1141 __ addi(t0, zr, (u1)_thread_in_Java); 1142 __ beq(t, t0, L); 1143 __ stop("Wrong thread state in native stub"); 1144 __ bind(L); 1145 } 1146 #endif 1147 1148 // Change state to native 1149 __ la(t1, Address(xthread, JavaThread::thread_state_offset())); 1150 __ mv(t0, _thread_in_native); 1151 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1152 __ sw(t0, Address(t1)); 1153 1154 // Call the native method. 1155 __ jalr(x28); 1156 __ bind(native_return); 1157 __ get_method(xmethod); 1158 // result potentially in x10 or f10 1159 1160 // Restore cpu control state after JNI call 1161 __ restore_cpu_control_state_after_jni(t0); 1162 1163 // make room for the pushes we're about to do 1164 __ sub(t0, esp, 4 * wordSize); 1165 __ andi(sp, t0, -16); 1166 1167 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1168 // in order to extract the result of a method call. If the order of these 1169 // pushes change or anything else is added to the stack then the code in 1170 // interpreter_frame_result must also change. 1171 __ push(dtos); 1172 __ push(ltos); 1173 1174 // change thread state 1175 // Force all preceding writes to be observed prior to thread state change 1176 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1177 1178 __ mv(t0, _thread_in_native_trans); 1179 __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); 1180 1181 // Force this write out before the read below 1182 if (!UseSystemMemoryBarrier) { 1183 __ membar(MacroAssembler::AnyAny); 1184 } 1185 1186 // check for safepoint operation in progress and/or pending suspend requests 1187 { 1188 Label L, Continue; 1189 1190 // We need an acquire here to ensure that any subsequent load of the 1191 // global SafepointSynchronize::_state flag is ordered after this load 1192 // of the thread-local polling word. We don't want this poll to 1193 // return false (i.e. not safepointing) and a later poll of the global 1194 // SafepointSynchronize::_state spuriously to return true. 1195 // 1196 // This is to avoid a race when we're in a native->Java transition 1197 // racing the code which wakes up from a safepoint. 1198 __ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */); 1199 __ lwu(t1, Address(xthread, JavaThread::suspend_flags_offset())); 1200 __ beqz(t1, Continue); 1201 __ bind(L); 1202 1203 // Don't use call_VM as it will see a possible pending exception 1204 // and forward it and never return here preventing us from 1205 // clearing _last_native_pc down below. So we do a runtime call by 1206 // hand. 1207 // 1208 __ mv(c_rarg0, xthread); 1209 __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); 1210 __ get_method(xmethod); 1211 __ reinit_heapbase(); 1212 __ bind(Continue); 1213 } 1214 1215 // change thread state 1216 // Force all preceding writes to be observed prior to thread state change 1217 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1218 1219 __ mv(t0, _thread_in_Java); 1220 __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); 1221 1222 // reset_last_Java_frame 1223 __ reset_last_Java_frame(true); 1224 1225 if (CheckJNICalls) { 1226 // clear_pending_jni_exception_check 1227 __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset())); 1228 } 1229 1230 // reset handle block 1231 __ ld(t, Address(xthread, JavaThread::active_handles_offset())); 1232 __ sd(zr, Address(t, JNIHandleBlock::top_offset())); 1233 1234 // If result is an oop unbox and store it in frame where gc will see it 1235 // and result handler will pick it up 1236 1237 { 1238 Label no_oop; 1239 __ la(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1240 __ bne(t, result_handler, no_oop); 1241 // Unbox oop result, e.g. JNIHandles::resolve result. 1242 __ pop(ltos); 1243 __ resolve_jobject(x10, t, t1); 1244 __ sd(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1245 // keep stack depth as expected by pushing oop which will eventually be discarded 1246 __ push(ltos); 1247 __ bind(no_oop); 1248 } 1249 1250 { 1251 Label no_reguard; 1252 __ lwu(t0, Address(xthread, in_bytes(JavaThread::stack_guard_state_offset()))); 1253 __ addi(t1, zr, (u1)StackOverflow::stack_guard_yellow_reserved_disabled); 1254 __ bne(t0, t1, no_reguard); 1255 1256 __ push_call_clobbered_registers(); 1257 __ mv(c_rarg0, xthread); 1258 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 1259 __ pop_call_clobbered_registers(); 1260 __ bind(no_reguard); 1261 } 1262 1263 // The method register is junk from after the thread_in_native transition 1264 // until here. Also can't call_VM until the bcp has been 1265 // restored. Need bcp for throwing exception below so get it now. 1266 __ get_method(xmethod); 1267 1268 // restore bcp to have legal interpreter frame, i.e., bci == 0 <=> 1269 // xbcp == code_base() 1270 __ ld(xbcp, Address(xmethod, Method::const_offset())); // get ConstMethod* 1271 __ add(xbcp, xbcp, in_bytes(ConstMethod::codes_offset())); // get codebase 1272 // handle exceptions (exception handling will handle unlocking!) 1273 { 1274 Label L; 1275 __ ld(t0, Address(xthread, Thread::pending_exception_offset())); 1276 __ beqz(t0, L); 1277 // Note: At some point we may want to unify this with the code 1278 // used in call_VM_base(); i.e., we should use the 1279 // StubRoutines::forward_exception code. For now this doesn't work 1280 // here because the sp is not correctly set at this point. 1281 __ MacroAssembler::call_VM(noreg, 1282 CAST_FROM_FN_PTR(address, 1283 InterpreterRuntime::throw_pending_exception)); 1284 __ should_not_reach_here(); 1285 __ bind(L); 1286 } 1287 1288 // do unlocking if necessary 1289 { 1290 Label L; 1291 __ lwu(t, Address(xmethod, Method::access_flags_offset())); 1292 __ test_bit(t0, t, exact_log2(JVM_ACC_SYNCHRONIZED)); 1293 __ beqz(t0, L); 1294 // the code below should be shared with interpreter macro 1295 // assembler implementation 1296 { 1297 Label unlock; 1298 // BasicObjectLock will be first in list, since this is a 1299 // synchronized method. However, need to check that the object 1300 // has not been unlocked by an explicit monitorexit bytecode. 1301 1302 // monitor expect in c_rarg1 for slow unlock path 1303 __ la(c_rarg1, Address(fp, // address of first monitor 1304 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1305 wordSize - sizeof(BasicObjectLock)))); 1306 1307 __ ld(t, Address(c_rarg1, BasicObjectLock::obj_offset())); 1308 __ bnez(t, unlock); 1309 1310 // Entry already unlocked, need to throw exception 1311 __ MacroAssembler::call_VM(noreg, 1312 CAST_FROM_FN_PTR(address, 1313 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1314 __ should_not_reach_here(); 1315 1316 __ bind(unlock); 1317 __ unlock_object(c_rarg1); 1318 } 1319 __ bind(L); 1320 } 1321 1322 // jvmti support 1323 // Note: This must happen _after_ handling/throwing any exceptions since 1324 // the exception handler code notifies the runtime of method exits 1325 // too. If this happens before, method entry/exit notifications are 1326 // not properly paired (was bug - gri 11/22/99). 1327 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1328 1329 __ pop(ltos); 1330 __ pop(dtos); 1331 1332 __ jalr(result_handler); 1333 1334 // remove activation 1335 __ ld(esp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp 1336 // remove frame anchor 1337 __ leave(); 1338 1339 // restore sender sp 1340 __ mv(sp, esp); 1341 1342 __ ret(); 1343 1344 if (inc_counter) { 1345 // Handle overflow of counter and compile method 1346 __ bind(invocation_counter_overflow); 1347 generate_counter_overflow(continue_after_compile); 1348 } 1349 1350 return entry_point; 1351 } 1352 1353 // 1354 // Generic interpreted method entry to (asm) interpreter 1355 // 1356 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1357 1358 // determine code generation flags 1359 const bool inc_counter = UseCompiler || CountCompiledCalls; 1360 1361 // t0: sender sp 1362 address entry_point = __ pc(); 1363 1364 const Address constMethod(xmethod, Method::const_offset()); 1365 const Address access_flags(xmethod, Method::access_flags_offset()); 1366 const Address size_of_parameters(x13, 1367 ConstMethod::size_of_parameters_offset()); 1368 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset()); 1369 1370 // get parameter size (always needed) 1371 // need to load the const method first 1372 __ ld(x13, constMethod); 1373 __ load_unsigned_short(x12, size_of_parameters); 1374 1375 // x12: size of parameters 1376 1377 __ load_unsigned_short(x13, size_of_locals); // get size of locals in words 1378 __ sub(x13, x13, x12); // x13 = no. of additional locals 1379 1380 // see if we've got enough room on the stack for locals plus overhead. 1381 generate_stack_overflow_check(); 1382 1383 // compute beginning of parameters (xlocals) 1384 __ shadd(xlocals, x12, esp, t1, 3); 1385 __ add(xlocals, xlocals, -wordSize); 1386 1387 // Make room for additional locals 1388 __ slli(t1, x13, 3); 1389 __ sub(t0, esp, t1); 1390 1391 // Padding between locals and fixed part of activation frame to ensure 1392 // SP is always 16-byte aligned. 1393 __ andi(sp, t0, -16); 1394 1395 // x13 - # of additional locals 1396 // allocate space for locals 1397 // explicitly initialize locals 1398 { 1399 Label exit, loop; 1400 __ blez(x13, exit); // do nothing if x13 <= 0 1401 __ bind(loop); 1402 __ sd(zr, Address(t0)); 1403 __ add(t0, t0, wordSize); 1404 __ add(x13, x13, -1); // until everything initialized 1405 __ bnez(x13, loop); 1406 __ bind(exit); 1407 } 1408 1409 // And the base dispatch table 1410 __ get_dispatch(); 1411 1412 // initialize fixed part of activation frame 1413 generate_fixed_frame(false); 1414 1415 // make sure method is not native & not abstract 1416 #ifdef ASSERT 1417 __ lwu(x10, access_flags); 1418 __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute native method as non-native"); 1419 __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter"); 1420 #endif 1421 1422 // Since at this point in the method invocation the exception 1423 // handler would try to exit the monitor of synchronized methods 1424 // which hasn't been entered yet, we set the thread local variable 1425 // _do_not_unlock_if_synchronized to true. The remove_activation 1426 // will check this flag. 1427 1428 const Address do_not_unlock_if_synchronized(xthread, 1429 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1430 __ mv(t1, true); 1431 __ sb(t1, do_not_unlock_if_synchronized); 1432 1433 Label no_mdp; 1434 const Register mdp = x13; 1435 __ ld(mdp, Address(xmethod, Method::method_data_offset())); 1436 __ beqz(mdp, no_mdp); 1437 __ add(mdp, mdp, in_bytes(MethodData::data_offset())); 1438 __ profile_parameters_type(mdp, x11, x12, x14); // use x11, x12, x14 as tmp registers 1439 __ bind(no_mdp); 1440 1441 // increment invocation count & check for overflow 1442 Label invocation_counter_overflow; 1443 if (inc_counter) { 1444 generate_counter_incr(&invocation_counter_overflow); 1445 } 1446 1447 Label continue_after_compile; 1448 __ bind(continue_after_compile); 1449 1450 bang_stack_shadow_pages(false); 1451 1452 // reset the _do_not_unlock_if_synchronized flag 1453 __ sb(zr, do_not_unlock_if_synchronized); 1454 1455 // check for synchronized methods 1456 // Must happen AFTER invocation_counter check and stack overflow check, 1457 // so method is not locked if overflows. 1458 if (synchronized) { 1459 // Allocate monitor and lock method 1460 lock_method(); 1461 } else { 1462 // no synchronization necessary 1463 #ifdef ASSERT 1464 __ lwu(x10, access_flags); 1465 __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization"); 1466 #endif 1467 } 1468 1469 // start execution 1470 #ifdef ASSERT 1471 __ verify_frame_setup(); 1472 #endif 1473 1474 // jvmti support 1475 __ notify_method_entry(); 1476 1477 __ dispatch_next(vtos); 1478 1479 // invocation counter overflow 1480 if (inc_counter) { 1481 // Handle overflow of counter and compile method 1482 __ bind(invocation_counter_overflow); 1483 generate_counter_overflow(continue_after_compile); 1484 } 1485 1486 return entry_point; 1487 } 1488 1489 // Method entry for java.lang.Thread.currentThread 1490 address TemplateInterpreterGenerator::generate_currentThread() { 1491 address entry_point = __ pc(); 1492 1493 __ ld(x10, Address(xthread, JavaThread::vthread_offset())); 1494 __ resolve_oop_handle(x10, t0, t1); 1495 __ ret(); 1496 1497 return entry_point; 1498 } 1499 1500 //----------------------------------------------------------------------------- 1501 // Exceptions 1502 1503 void TemplateInterpreterGenerator::generate_throw_exception() { 1504 // Entry point in previous activation (i.e., if the caller was 1505 // interpreted) 1506 Interpreter::_rethrow_exception_entry = __ pc(); 1507 // Restore sp to interpreter_frame_last_sp even though we are going 1508 // to empty the expression stack for the exception processing. 1509 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 1510 // x10: exception 1511 // x13: return address/pc that threw exception 1512 __ restore_bcp(); // xbcp points to call/send 1513 __ restore_locals(); 1514 __ restore_constant_pool_cache(); 1515 __ reinit_heapbase(); // restore xheapbase as heapbase. 1516 __ get_dispatch(); 1517 1518 // Entry point for exceptions thrown within interpreter code 1519 Interpreter::_throw_exception_entry = __ pc(); 1520 // If we came here via a NullPointerException on the receiver of a 1521 // method, xthread may be corrupt. 1522 __ get_method(xmethod); 1523 // expression stack is undefined here 1524 // x10: exception 1525 // xbcp: exception bcp 1526 __ verify_oop(x10); 1527 __ mv(c_rarg1, x10); 1528 1529 // expression stack must be empty before entering the VM in case of 1530 // an exception 1531 __ empty_expression_stack(); 1532 // find exception handler address and preserve exception oop 1533 __ call_VM(x13, 1534 CAST_FROM_FN_PTR(address, 1535 InterpreterRuntime::exception_handler_for_exception), 1536 c_rarg1); 1537 1538 // Restore machine SP 1539 __ restore_sp_after_call(); 1540 1541 // x10: exception handler entry point 1542 // x13: preserved exception oop 1543 // xbcp: bcp for exception handler 1544 __ push_ptr(x13); // push exception which is now the only value on the stack 1545 __ jr(x10); // jump to exception handler (may be _remove_activation_entry!) 1546 1547 // If the exception is not handled in the current frame the frame is 1548 // removed and the exception is rethrown (i.e. exception 1549 // continuation is _rethrow_exception). 1550 // 1551 // Note: At this point the bci is still the bxi for the instruction 1552 // which caused the exception and the expression stack is 1553 // empty. Thus, for any VM calls at this point, GC will find a legal 1554 // oop map (with empty expression stack). 1555 1556 // 1557 // JVMTI PopFrame support 1558 // 1559 1560 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1561 __ empty_expression_stack(); 1562 // Set the popframe_processing bit in pending_popframe_condition 1563 // indicating that we are currently handling popframe, so that 1564 // call_VMs that may happen later do not trigger new popframe 1565 // handling cycles. 1566 __ lwu(x13, Address(xthread, JavaThread::popframe_condition_offset())); 1567 __ ori(x13, x13, JavaThread::popframe_processing_bit); 1568 __ sw(x13, Address(xthread, JavaThread::popframe_condition_offset())); 1569 1570 { 1571 // Check to see whether we are returning to a deoptimized frame. 1572 // (The PopFrame call ensures that the caller of the popped frame is 1573 // either interpreted or compiled and deoptimizes it if compiled.) 1574 // In this case, we can't call dispatch_next() after the frame is 1575 // popped, but instead must save the incoming arguments and restore 1576 // them after deoptimization has occurred. 1577 // 1578 // Note that we don't compare the return PC against the 1579 // deoptimization blob's unpack entry because of the presence of 1580 // adapter frames in C2. 1581 Label caller_not_deoptimized; 1582 __ ld(c_rarg1, Address(fp, frame::return_addr_offset * wordSize)); 1583 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), c_rarg1); 1584 __ bnez(x10, caller_not_deoptimized); 1585 1586 // Compute size of arguments for saving when returning to 1587 // deoptimized caller 1588 __ get_method(x10); 1589 __ ld(x10, Address(x10, Method::const_offset())); 1590 __ load_unsigned_short(x10, Address(x10, in_bytes(ConstMethod:: 1591 size_of_parameters_offset()))); 1592 __ slli(x10, x10, Interpreter::logStackElementSize); 1593 __ restore_locals(); 1594 __ sub(xlocals, xlocals, x10); 1595 __ add(xlocals, xlocals, wordSize); 1596 // Save these arguments 1597 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1598 Deoptimization:: 1599 popframe_preserve_args), 1600 xthread, x10, xlocals); 1601 1602 __ remove_activation(vtos, 1603 /* throw_monitor_exception */ false, 1604 /* install_monitor_exception */ false, 1605 /* notify_jvmdi */ false); 1606 1607 // Inform deoptimization that it is responsible for restoring 1608 // these arguments 1609 __ mv(t0, JavaThread::popframe_force_deopt_reexecution_bit); 1610 __ sw(t0, Address(xthread, JavaThread::popframe_condition_offset())); 1611 1612 // Continue in deoptimization handler 1613 __ ret(); 1614 1615 __ bind(caller_not_deoptimized); 1616 } 1617 1618 __ remove_activation(vtos, 1619 /* throw_monitor_exception */ false, 1620 /* install_monitor_exception */ false, 1621 /* notify_jvmdi */ false); 1622 1623 // Restore the last_sp and null it out 1624 __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 1625 __ shadd(esp, t0, fp, t0, LogBytesPerWord); 1626 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 1627 1628 __ restore_bcp(); 1629 __ restore_locals(); 1630 __ restore_constant_pool_cache(); 1631 __ get_method(xmethod); 1632 __ get_dispatch(); 1633 1634 // The method data pointer was incremented already during 1635 // call profiling. We have to restore the mdp for the current bcp. 1636 if (ProfileInterpreter) { 1637 __ set_method_data_pointer_for_bcp(); 1638 } 1639 1640 // Clear the popframe condition flag 1641 __ sw(zr, Address(xthread, JavaThread::popframe_condition_offset())); 1642 assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive"); 1643 1644 #if INCLUDE_JVMTI 1645 { 1646 Label L_done; 1647 1648 __ lbu(t0, Address(xbcp, 0)); 1649 __ mv(t1, Bytecodes::_invokestatic); 1650 __ bne(t1, t0, L_done); 1651 1652 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1653 // Detect such a case in the InterpreterRuntime function and return the member name argument,or null. 1654 1655 __ ld(c_rarg0, Address(xlocals, 0)); 1656 __ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp); 1657 1658 __ beqz(x10, L_done); 1659 1660 __ sd(x10, Address(esp, 0)); 1661 __ bind(L_done); 1662 } 1663 #endif // INCLUDE_JVMTI 1664 1665 // Restore machine SP 1666 __ restore_sp_after_call(); 1667 1668 __ dispatch_next(vtos); 1669 // end of PopFrame support 1670 1671 Interpreter::_remove_activation_entry = __ pc(); 1672 1673 // preserve exception over this code sequence 1674 __ pop_ptr(x10); 1675 __ sd(x10, Address(xthread, JavaThread::vm_result_offset())); 1676 // remove the activation (without doing throws on illegalMonitorExceptions) 1677 __ remove_activation(vtos, false, true, false); 1678 // restore exception 1679 __ get_vm_result(x10, xthread); 1680 1681 // In between activations - previous activation type unknown yet 1682 // compute continuation point - the continuation point expects the 1683 // following registers set up: 1684 // 1685 // x10: exception 1686 // ra: return address/pc that threw exception 1687 // sp: expression stack of caller 1688 // fp: fp of caller 1689 // FIXME: There's no point saving ra here because VM calls don't trash it 1690 __ sub(sp, sp, 2 * wordSize); 1691 __ sd(x10, Address(sp, 0)); // save exception 1692 __ sd(ra, Address(sp, wordSize)); // save return address 1693 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1694 SharedRuntime::exception_handler_for_return_address), 1695 xthread, ra); 1696 __ mv(x11, x10); // save exception handler 1697 __ ld(x10, Address(sp, 0)); // restore exception 1698 __ ld(ra, Address(sp, wordSize)); // restore return address 1699 __ add(sp, sp, 2 * wordSize); 1700 // We might be returning to a deopt handler that expects x13 to 1701 // contain the exception pc 1702 __ mv(x13, ra); 1703 // Note that an "issuing PC" is actually the next PC after the call 1704 __ jr(x11); // jump to exception 1705 // handler of caller 1706 } 1707 1708 // 1709 // JVMTI ForceEarlyReturn support 1710 // 1711 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1712 address entry = __ pc(); 1713 1714 __ restore_bcp(); 1715 __ restore_locals(); 1716 __ empty_expression_stack(); 1717 __ load_earlyret_value(state); 1718 1719 __ ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset())); 1720 Address cond_addr(t0, JvmtiThreadState::earlyret_state_offset()); 1721 1722 // Clear the earlyret state 1723 assert(JvmtiThreadState::earlyret_inactive == 0, "should be"); 1724 __ sd(zr, cond_addr); 1725 1726 __ remove_activation(state, 1727 false, /* throw_monitor_exception */ 1728 false, /* install_monitor_exception */ 1729 true); /* notify_jvmdi */ 1730 __ ret(); 1731 1732 return entry; 1733 } 1734 // end of ForceEarlyReturn support 1735 1736 //----------------------------------------------------------------------------- 1737 // Helper for vtos entry point generation 1738 1739 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1740 address& bep, 1741 address& cep, 1742 address& sep, 1743 address& aep, 1744 address& iep, 1745 address& lep, 1746 address& fep, 1747 address& dep, 1748 address& vep) { 1749 assert(t != nullptr && t->is_valid() && t->tos_in() == vtos, "illegal template"); 1750 Label L; 1751 aep = __ pc(); __ push_ptr(); __ j(L); 1752 fep = __ pc(); __ push_f(); __ j(L); 1753 dep = __ pc(); __ push_d(); __ j(L); 1754 lep = __ pc(); __ push_l(); __ j(L); 1755 bep = cep = sep = 1756 iep = __ pc(); __ push_i(); 1757 vep = __ pc(); 1758 __ bind(L); 1759 generate_and_dispatch(t); 1760 } 1761 1762 //----------------------------------------------------------------------------- 1763 1764 // Non-product code 1765 #ifndef PRODUCT 1766 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1767 address entry = __ pc(); 1768 1769 __ push_reg(ra); 1770 __ push(state); 1771 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp); 1772 __ mv(c_rarg2, x10); // Pass itos 1773 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3); 1774 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp); 1775 __ pop(state); 1776 __ pop_reg(ra); 1777 __ ret(); // return from result handler 1778 1779 return entry; 1780 } 1781 1782 void TemplateInterpreterGenerator::count_bytecode() { 1783 __ mv(x7, (address) &BytecodeCounter::_counter_value); 1784 __ atomic_addw(noreg, 1, x7); 1785 } 1786 1787 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1788 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]); 1789 __ atomic_addw(noreg, 1, x7); 1790 } 1791 1792 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1793 // Calculate new index for counter: 1794 // _index = (_index >> log2_number_of_codes) | 1795 // (bytecode << log2_number_of_codes); 1796 Register index_addr = t1; 1797 Register index = t0; 1798 __ mv(index_addr, (address) &BytecodePairHistogram::_index); 1799 __ lw(index, index_addr); 1800 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1801 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes); 1802 __ orrw(index, x7, index); 1803 __ sw(index, index_addr); 1804 // Bump bucket contents: 1805 // _counters[_index] ++; 1806 Register counter_addr = t1; 1807 __ mv(x7, (address) &BytecodePairHistogram::_counters); 1808 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt); 1809 __ atomic_addw(noreg, 1, counter_addr); 1810 } 1811 1812 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1813 // Call a little run-time stub to avoid blow-up for each bytecode. 1814 // The run-time runtime saves the right registers, depending on 1815 // the tosca in-state for the given template. 1816 1817 assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated"); 1818 __ rt_call(Interpreter::trace_code(t->tos_in())); 1819 __ reinit_heapbase(); 1820 } 1821 1822 void TemplateInterpreterGenerator::stop_interpreter_at() { 1823 Label L; 1824 __ push_reg(t0); 1825 __ mv(t0, (address) &BytecodeCounter::_counter_value); 1826 __ ld(t0, Address(t0)); 1827 __ mv(t1, StopInterpreterAt); 1828 __ bne(t0, t1, L); 1829 __ ebreak(); 1830 __ bind(L); 1831 __ pop_reg(t0); 1832 } 1833 1834 #endif // !PRODUCT