1 /* 2 * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "classfile/javaClasses.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "gc/shared/barrierSetAssembler.hpp" 32 #include "interpreter/bytecodeHistogram.hpp" 33 #include "interpreter/bytecodeTracer.hpp" 34 #include "interpreter/interp_masm.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "interpreter/interpreterRuntime.hpp" 37 #include "interpreter/templateInterpreterGenerator.hpp" 38 #include "interpreter/templateTable.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "oops/arrayOop.hpp" 41 #include "oops/method.inline.hpp" 42 #include "oops/methodData.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "oops/resolvedIndyEntry.hpp" 45 #include "oops/resolvedMethodEntry.hpp" 46 #include "prims/jvmtiExport.hpp" 47 #include "prims/jvmtiThreadState.hpp" 48 #include "runtime/arguments.hpp" 49 #include "runtime/deoptimization.hpp" 50 #include "runtime/frame.inline.hpp" 51 #include "runtime/globals.hpp" 52 #include "runtime/jniHandles.hpp" 53 #include "runtime/sharedRuntime.hpp" 54 #include "runtime/stubRoutines.hpp" 55 #include "runtime/synchronizer.hpp" 56 #include "runtime/timer.hpp" 57 #include "runtime/vframeArray.hpp" 58 #include "utilities/checkedCast.hpp" 59 #include "utilities/debug.hpp" 60 #include "utilities/powerOfTwo.hpp" 61 #include <sys/types.h> 62 63 #ifndef PRODUCT 64 #include "oops/method.hpp" 65 #endif // !PRODUCT 66 67 // Size of interpreter code. Increase if too small. Interpreter will 68 // fail with a guarantee ("not enough space for interpreter generation"); 69 // if too small. 70 // Run with +PrintInterpreter to get the VM to print out the size. 71 // Max size with JVMTI 72 int TemplateInterpreter::InterpreterCodeSize = 256 * 1024; 73 74 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 75 76 //----------------------------------------------------------------------------- 77 78 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 79 address entry = __ pc(); 80 81 __ andi(esp, esp, -16); 82 __ mv(c_rarg3, esp); 83 // xmethod 84 // xlocals 85 // c_rarg3: first stack arg - wordSize 86 // adjust sp 87 88 __ addi(sp, c_rarg3, -18 * wordSize); 89 __ addi(sp, sp, -2 * wordSize); 90 __ sd(ra, Address(sp, 0)); 91 92 __ call_VM(noreg, 93 CAST_FROM_FN_PTR(address, 94 InterpreterRuntime::slow_signature_handler), 95 xmethod, xlocals, c_rarg3); 96 97 // x10: result handler 98 99 // Stack layout: 100 // sp: return address <- sp 101 // 1 garbage 102 // 8 integer args (if static first is unused) 103 // 1 float/double identifiers 104 // 8 double args 105 // stack args <- esp 106 // garbage 107 // expression stack bottom 108 // bcp (null) 109 // ... 110 111 // Restore ra 112 __ ld(ra, Address(sp, 0)); 113 __ addi(sp, sp , 2 * wordSize); 114 115 // Do FP first so we can use c_rarg3 as temp 116 __ lwu(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers 117 118 for (int i = 0; i < Argument::n_float_register_parameters_c; i++) { 119 const FloatRegister r = g_FPArgReg[i]; 120 Label d, done; 121 122 __ test_bit(t0, c_rarg3, i); 123 __ bnez(t0, d); 124 __ flw(r, Address(sp, (10 + i) * wordSize)); 125 __ j(done); 126 __ bind(d); 127 __ fld(r, Address(sp, (10 + i) * wordSize)); 128 __ bind(done); 129 } 130 131 // c_rarg0 contains the result from the call of 132 // InterpreterRuntime::slow_signature_handler so we don't touch it 133 // here. It will be loaded with the JNIEnv* later. 134 for (int i = 1; i < Argument::n_int_register_parameters_c; i++) { 135 const Register rm = g_INTArgReg[i]; 136 __ ld(rm, Address(sp, i * wordSize)); 137 } 138 139 __ addi(sp, sp, 18 * wordSize); 140 __ ret(); 141 142 return entry; 143 } 144 145 // Various method entries 146 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 147 // xmethod: Method* 148 // x19_sender_sp: sender sp 149 // esp: args 150 151 // These don't need a safepoint check because they aren't virtually 152 // callable. We won't enter these intrinsics from compiled code. 153 // If in the future we added an intrinsic which was virtually callable 154 // we'd have to worry about how to safepoint so that this code is used. 155 156 // mathematical functions inlined by compiler 157 // (interpreter must provide identical implementation 158 // in order to avoid monotonicity bugs when switching 159 // from interpreter to compiler in the middle of some 160 // computation) 161 // 162 // stack: 163 // [ arg ] <-- esp 164 // [ arg ] 165 // retaddr in ra 166 167 address fn = nullptr; 168 address entry_point = nullptr; 169 Register continuation = ra; 170 switch (kind) { 171 case Interpreter::java_lang_math_abs: 172 entry_point = __ pc(); 173 __ fld(f10, Address(esp)); 174 __ fabs_d(f10, f10); 175 __ mv(sp, x19_sender_sp); // Restore caller's SP 176 break; 177 case Interpreter::java_lang_math_sqrt: 178 entry_point = __ pc(); 179 __ fld(f10, Address(esp)); 180 __ fsqrt_d(f10, f10); 181 __ mv(sp, x19_sender_sp); 182 break; 183 case Interpreter::java_lang_math_sin : 184 entry_point = __ pc(); 185 __ fld(f10, Address(esp)); 186 __ mv(sp, x19_sender_sp); 187 __ mv(x9, ra); 188 continuation = x9; // The first callee-saved register 189 if (StubRoutines::dsin() == nullptr) { 190 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 191 } else { 192 fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin()); 193 } 194 __ call(fn); 195 break; 196 case Interpreter::java_lang_math_cos : 197 entry_point = __ pc(); 198 __ fld(f10, Address(esp)); 199 __ mv(sp, x19_sender_sp); 200 __ mv(x9, ra); 201 continuation = x9; // The first callee-saved register 202 if (StubRoutines::dcos() == nullptr) { 203 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 204 } else { 205 fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos()); 206 } 207 __ call(fn); 208 break; 209 case Interpreter::java_lang_math_tan : 210 entry_point = __ pc(); 211 __ fld(f10, Address(esp)); 212 __ mv(sp, x19_sender_sp); 213 __ mv(x9, ra); 214 continuation = x9; // The first callee-saved register 215 if (StubRoutines::dtan() == nullptr) { 216 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 217 } else { 218 fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan()); 219 } 220 __ call(fn); 221 break; 222 case Interpreter::java_lang_math_log : 223 entry_point = __ pc(); 224 __ fld(f10, Address(esp)); 225 __ mv(sp, x19_sender_sp); 226 __ mv(x9, ra); 227 continuation = x9; // The first callee-saved register 228 if (StubRoutines::dlog() == nullptr) { 229 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 230 } else { 231 fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog()); 232 } 233 __ call(fn); 234 break; 235 case Interpreter::java_lang_math_log10 : 236 entry_point = __ pc(); 237 __ fld(f10, Address(esp)); 238 __ mv(sp, x19_sender_sp); 239 __ mv(x9, ra); 240 continuation = x9; // The first callee-saved register 241 if (StubRoutines::dlog10() == nullptr) { 242 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 243 } else { 244 fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10()); 245 } 246 __ call(fn); 247 break; 248 case Interpreter::java_lang_math_exp : 249 entry_point = __ pc(); 250 __ fld(f10, Address(esp)); 251 __ mv(sp, x19_sender_sp); 252 __ mv(x9, ra); 253 continuation = x9; // The first callee-saved register 254 if (StubRoutines::dexp() == nullptr) { 255 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 256 } else { 257 fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp()); 258 } 259 __ call(fn); 260 break; 261 case Interpreter::java_lang_math_pow : 262 entry_point = __ pc(); 263 __ mv(x9, ra); 264 continuation = x9; 265 __ fld(f10, Address(esp, 2 * Interpreter::stackElementSize)); 266 __ fld(f11, Address(esp)); 267 __ mv(sp, x19_sender_sp); 268 if (StubRoutines::dpow() == nullptr) { 269 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 270 } else { 271 fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow()); 272 } 273 __ call(fn); 274 break; 275 case Interpreter::java_lang_math_fmaD : 276 if (UseFMA) { 277 entry_point = __ pc(); 278 __ fld(f10, Address(esp, 4 * Interpreter::stackElementSize)); 279 __ fld(f11, Address(esp, 2 * Interpreter::stackElementSize)); 280 __ fld(f12, Address(esp)); 281 __ fmadd_d(f10, f10, f11, f12); 282 __ mv(sp, x19_sender_sp); // Restore caller's SP 283 } 284 break; 285 case Interpreter::java_lang_math_fmaF : 286 if (UseFMA) { 287 entry_point = __ pc(); 288 __ flw(f10, Address(esp, 2 * Interpreter::stackElementSize)); 289 __ flw(f11, Address(esp, Interpreter::stackElementSize)); 290 __ flw(f12, Address(esp)); 291 __ fmadd_s(f10, f10, f11, f12); 292 __ mv(sp, x19_sender_sp); // Restore caller's SP 293 } 294 break; 295 default: 296 ; 297 } 298 if (entry_point != nullptr) { 299 __ jr(continuation); 300 } 301 302 return entry_point; 303 } 304 305 // Abstract method entry 306 // Attempt to execute abstract method. Throw exception 307 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 308 // xmethod: Method* 309 // x19_sender_sp: sender SP 310 311 address entry_point = __ pc(); 312 313 // abstract method entry 314 315 // pop return address, reset last_sp to null 316 __ empty_expression_stack(); 317 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) 318 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 319 320 // throw exception 321 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 322 InterpreterRuntime::throw_AbstractMethodErrorWithMethod), 323 xmethod); 324 // the call_VM checks for exception, so we should never return here. 325 __ should_not_reach_here(); 326 327 return entry_point; 328 } 329 330 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 331 address entry = __ pc(); 332 333 #ifdef ASSERT 334 { 335 Label L; 336 __ ld(t0, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 337 __ shadd(t0, t0, fp, t0, LogBytesPerWord); 338 // maximal sp for current fp (stack grows negative) 339 // check if frame is complete 340 __ bge(t0, sp, L); 341 __ stop ("interpreter frame not set up"); 342 __ bind(L); 343 } 344 #endif // ASSERT 345 // Restore bcp under the assumption that the current frame is still 346 // interpreted 347 __ restore_bcp(); 348 349 // expression stack must be empty before entering the VM if an 350 // exception happened 351 __ empty_expression_stack(); 352 // throw exception 353 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 354 return entry; 355 } 356 357 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 358 address entry = __ pc(); 359 // expression stack must be empty before entering the VM if an 360 // exception happened 361 __ empty_expression_stack(); 362 // setup parameters 363 364 // convention: expect aberrant index in register x11 365 __ zero_extend(c_rarg2, x11, 32); 366 // convention: expect array in register x13 367 __ mv(c_rarg1, x13); 368 __ call_VM(noreg, 369 CAST_FROM_FN_PTR(address, 370 InterpreterRuntime:: 371 throw_ArrayIndexOutOfBoundsException), 372 c_rarg1, c_rarg2); 373 return entry; 374 } 375 376 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 377 address entry = __ pc(); 378 379 // object is at TOS 380 __ pop_reg(c_rarg1); 381 382 // expression stack must be empty before entering the VM if an 383 // exception happened 384 __ empty_expression_stack(); 385 386 __ call_VM(noreg, 387 CAST_FROM_FN_PTR(address, 388 InterpreterRuntime:: 389 throw_ClassCastException), 390 c_rarg1); 391 return entry; 392 } 393 394 address TemplateInterpreterGenerator::generate_exception_handler_common( 395 const char* name, const char* message, bool pass_oop) { 396 assert(!pass_oop || message == nullptr, "either oop or message but not both"); 397 address entry = __ pc(); 398 if (pass_oop) { 399 // object is at TOS 400 __ pop_reg(c_rarg2); 401 } 402 // expression stack must be empty before entering the VM if an 403 // exception happened 404 __ empty_expression_stack(); 405 // setup parameters 406 __ la(c_rarg1, Address((address)name)); 407 if (pass_oop) { 408 __ call_VM(x10, CAST_FROM_FN_PTR(address, 409 InterpreterRuntime:: 410 create_klass_exception), 411 c_rarg1, c_rarg2); 412 } else { 413 // kind of lame ExternalAddress can't take null because 414 // external_word_Relocation will assert. 415 if (message != nullptr) { 416 __ la(c_rarg2, Address((address)message)); 417 } else { 418 __ mv(c_rarg2, NULL_WORD); 419 } 420 __ call_VM(x10, 421 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 422 c_rarg1, c_rarg2); 423 } 424 // throw exception 425 __ j(address(Interpreter::throw_exception_entry())); 426 return entry; 427 } 428 429 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 430 address entry = __ pc(); 431 432 // Restore stack bottom in case i2c adjusted stack 433 __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 434 __ shadd(esp, t0, fp, t0, LogBytesPerWord); 435 // and null it as marker that esp is now tos until next java call 436 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 437 __ restore_bcp(); 438 __ restore_locals(); 439 __ restore_constant_pool_cache(); 440 __ get_method(xmethod); 441 442 if (state == atos) { 443 Register obj = x10; 444 Register mdp = x11; 445 Register tmp = x12; 446 __ ld(mdp, Address(xmethod, Method::method_data_offset())); 447 __ profile_return_type(mdp, obj, tmp); 448 } 449 450 const Register cache = x11; 451 const Register index = x12; 452 453 if (index_size == sizeof(u4)) { 454 __ load_resolved_indy_entry(cache, index); 455 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset()))); 456 __ shadd(esp, cache, esp, t0, 3); 457 } else { 458 // Pop N words from the stack 459 assert(index_size == sizeof(u2), "Can only be u2"); 460 __ load_method_entry(cache, index); 461 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset()))); 462 463 __ shadd(esp, cache, esp, t0, 3); 464 } 465 466 // Restore machine SP 467 __ restore_sp_after_call(); 468 469 __ check_and_handle_popframe(xthread); 470 __ check_and_handle_earlyret(xthread); 471 472 __ get_dispatch(); 473 __ dispatch_next(state, step); 474 475 return entry; 476 } 477 478 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 479 int step, 480 address continuation) { 481 address entry = __ pc(); 482 __ restore_bcp(); 483 __ restore_locals(); 484 __ restore_constant_pool_cache(); 485 __ get_method(xmethod); 486 __ get_dispatch(); 487 488 __ restore_sp_after_call(); // Restore SP to extended SP 489 490 // Restore expression stack pointer 491 __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 492 __ shadd(esp, t0, fp, t0, LogBytesPerWord); 493 // null last_sp until next java call 494 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 495 496 // handle exceptions 497 { 498 Label L; 499 __ ld(t0, Address(xthread, Thread::pending_exception_offset())); 500 __ beqz(t0, L); 501 __ call_VM(noreg, 502 CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 503 __ should_not_reach_here(); 504 __ bind(L); 505 } 506 507 if (continuation == nullptr) { 508 __ dispatch_next(state, step); 509 } else { 510 __ jump_to_entry(continuation); 511 } 512 return entry; 513 } 514 515 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 516 address entry = __ pc(); 517 if (type == T_OBJECT) { 518 // retrieve result from frame 519 __ ld(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); 520 // and verify it 521 __ verify_oop(x10); 522 } else { 523 __ cast_primitive_type(type, x10); 524 } 525 526 __ ret(); // return from result handler 527 return entry; 528 } 529 530 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, 531 address runtime_entry) { 532 assert_cond(runtime_entry != nullptr); 533 address entry = __ pc(); 534 __ push(state); 535 __ push_cont_fastpath(xthread); 536 __ call_VM(noreg, runtime_entry); 537 __ pop_cont_fastpath(xthread); 538 __ membar(MacroAssembler::AnyAny); 539 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 540 return entry; 541 } 542 543 // Helpers for commoning out cases in the various type of method entries. 544 // 545 546 547 // increment invocation count & check for overflow 548 // 549 // Note: checking for negative value instead of overflow 550 // so we have a 'sticky' overflow test 551 // 552 // xmethod: method 553 // 554 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { 555 Label done; 556 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 557 int increment = InvocationCounter::count_increment; 558 Label no_mdo; 559 if (ProfileInterpreter) { 560 // Are we profiling? 561 __ ld(x10, Address(xmethod, Method::method_data_offset())); 562 __ beqz(x10, no_mdo); 563 // Increment counter in the MDO 564 const Address mdo_invocation_counter(x10, in_bytes(MethodData::invocation_counter_offset()) + 565 in_bytes(InvocationCounter::counter_offset())); 566 const Address mask(x10, in_bytes(MethodData::invoke_mask_offset())); 567 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, t0, t1, false, overflow); 568 __ j(done); 569 } 570 __ bind(no_mdo); 571 // Increment counter in MethodCounters 572 const Address invocation_counter(t1, 573 MethodCounters::invocation_counter_offset() + 574 InvocationCounter::counter_offset()); 575 __ get_method_counters(xmethod, t1, done); 576 const Address mask(t1, in_bytes(MethodCounters::invoke_mask_offset())); 577 __ increment_mask_and_jump(invocation_counter, increment, mask, t0, x11, false, overflow); 578 __ bind(done); 579 } 580 581 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 582 __ mv(c_rarg1, zr); 583 __ call_VM(noreg, 584 CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), c_rarg1); 585 __ j(do_continue); 586 } 587 588 // See if we've got enough room on the stack for locals plus overhead 589 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 590 // without going through the signal handler, i.e., reserved and yellow zones 591 // will not be made usable. The shadow zone must suffice to handle the 592 // overflow. 593 // The expression stack grows down incrementally, so the normal guard 594 // page mechanism will work for that. 595 // 596 // NOTE: Since the additional locals are also always pushed (wasn't 597 // obvious in generate_method_entry) so the guard should work for them 598 // too. 599 // 600 // Args: 601 // x13: number of additional locals this frame needs (what we must check) 602 // xmethod: Method* 603 // 604 // Kills: 605 // x10 606 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 607 608 // monitor entry size: see picture of stack set 609 // (generate_method_entry) and frame_amd64.hpp 610 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 611 612 // total overhead size: entry_size + (saved fp through expr stack 613 // bottom). be sure to change this if you add/subtract anything 614 // to/from the overhead area 615 const int overhead_size = 616 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 617 618 const int page_size = (int)os::vm_page_size(); 619 620 Label after_frame_check; 621 622 // see if the frame is greater than one page in size. If so, 623 // then we need to verify there is enough stack space remaining 624 // for the additional locals. 625 __ mv(t0, (page_size - overhead_size) / Interpreter::stackElementSize); 626 __ bleu(x13, t0, after_frame_check); 627 628 // compute sp as if this were going to be the last frame on 629 // the stack before the red zone 630 631 // locals + overhead, in bytes 632 __ mv(x10, overhead_size); 633 __ shadd(x10, x13, x10, t0, Interpreter::logStackElementSize); // 2 slots per parameter. 634 635 const Address stack_limit(xthread, JavaThread::stack_overflow_limit_offset()); 636 __ ld(t0, stack_limit); 637 638 #ifdef ASSERT 639 Label limit_okay; 640 // Verify that thread stack limit is non-zero. 641 __ bnez(t0, limit_okay); 642 __ stop("stack overflow limit is zero"); 643 __ bind(limit_okay); 644 #endif 645 646 // Add stack limit to locals. 647 __ add(x10, x10, t0); 648 649 // Check against the current stack bottom. 650 __ bgtu(sp, x10, after_frame_check); 651 652 // Remove the incoming args, peeling the machine SP back to where it 653 // was in the caller. This is not strictly necessary, but unless we 654 // do so the stack frame may have a garbage FP; this ensures a 655 // correct call stack that we can always unwind. The ANDI should be 656 // unnecessary because the sender SP in x19 is always aligned, but 657 // it doesn't hurt. 658 __ andi(sp, x19_sender_sp, -16); 659 660 // Note: the restored frame is not necessarily interpreted. 661 // Use the shared runtime version of the StackOverflowError. 662 assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); 663 __ far_jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); 664 665 // all done with frame size check 666 __ bind(after_frame_check); 667 } 668 669 // Allocate monitor and lock method (asm interpreter) 670 // 671 // Args: 672 // xmethod: Method* 673 // xlocals: locals 674 // 675 // Kills: 676 // x10 677 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 678 // t0, t1 (temporary regs) 679 void TemplateInterpreterGenerator::lock_method() { 680 // synchronize method 681 const Address access_flags(xmethod, Method::access_flags_offset()); 682 const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 683 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 684 685 #ifdef ASSERT 686 __ lwu(x10, access_flags); 687 __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method doesn't need synchronization", false); 688 #endif // ASSERT 689 690 // get synchronization object 691 { 692 Label done; 693 __ lwu(x10, access_flags); 694 __ andi(t0, x10, JVM_ACC_STATIC); 695 // get receiver (assume this is frequent case) 696 __ ld(x10, Address(xlocals, Interpreter::local_offset_in_bytes(0))); 697 __ beqz(t0, done); 698 __ load_mirror(x10, xmethod, x15, t1); 699 700 #ifdef ASSERT 701 { 702 Label L; 703 __ bnez(x10, L); 704 __ stop("synchronization object is null"); 705 __ bind(L); 706 } 707 #endif // ASSERT 708 709 __ bind(done); 710 } 711 712 // add space for monitor & lock 713 __ check_extended_sp(); 714 __ add(sp, sp, - entry_size); // add space for a monitor entry 715 __ add(esp, esp, - entry_size); 716 __ sub(t0, sp, fp); 717 __ srai(t0, t0, Interpreter::logStackElementSize); 718 __ sd(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize)); 719 __ sub(t0, esp, fp); 720 __ srai(t0, t0, Interpreter::logStackElementSize); 721 __ sd(t0, monitor_block_top); // set new monitor block top 722 // store object 723 __ sd(x10, Address(esp, BasicObjectLock::obj_offset())); 724 __ mv(c_rarg1, esp); // object address 725 __ lock_object(c_rarg1); 726 } 727 728 // Generate a fixed interpreter frame. This is identical setup for 729 // interpreted methods and for native methods hence the shared code. 730 // 731 // Args: 732 // ra: return address 733 // xmethod: Method* 734 // xlocals: pointer to locals 735 // xcpool: cp cache 736 // stack_pointer: previous sp 737 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 738 // initialize fixed part of activation frame 739 if (native_call) { 740 __ add(esp, sp, - 14 * wordSize); 741 __ mv(xbcp, zr); 742 __ add(sp, sp, - 14 * wordSize); 743 // add 2 zero-initialized slots for native calls 744 __ sd(zr, Address(sp, 13 * wordSize)); 745 __ sd(zr, Address(sp, 12 * wordSize)); 746 } else { 747 __ add(esp, sp, - 12 * wordSize); 748 __ ld(t0, Address(xmethod, Method::const_offset())); // get ConstMethod 749 __ add(xbcp, t0, in_bytes(ConstMethod::codes_offset())); // get codebase 750 __ add(sp, sp, - 12 * wordSize); 751 } 752 __ sd(xbcp, Address(sp, wordSize)); 753 __ mv(t0, frame::interpreter_frame_initial_sp_offset); 754 __ sd(t0, Address(sp, 0)); 755 756 if (ProfileInterpreter) { 757 Label method_data_continue; 758 __ ld(t0, Address(xmethod, Method::method_data_offset())); 759 __ beqz(t0, method_data_continue); 760 __ la(t0, Address(t0, in_bytes(MethodData::data_offset()))); 761 __ bind(method_data_continue); 762 } 763 764 __ sd(xmethod, Address(sp, 7 * wordSize)); 765 __ sd(ProfileInterpreter ? t0 : zr, Address(sp, 6 * wordSize)); 766 767 __ sd(ra, Address(sp, 11 * wordSize)); 768 __ sd(fp, Address(sp, 10 * wordSize)); 769 __ la(fp, Address(sp, 12 * wordSize)); // include ra & fp 770 771 __ ld(xcpool, Address(xmethod, Method::const_offset())); 772 __ ld(xcpool, Address(xcpool, ConstMethod::constants_offset())); 773 __ ld(xcpool, Address(xcpool, ConstantPool::cache_offset())); 774 __ sd(xcpool, Address(sp, 3 * wordSize)); 775 __ sub(t0, xlocals, fp); 776 __ srai(t0, t0, Interpreter::logStackElementSize); // t0 = xlocals - fp(); 777 // Store relativized xlocals, see frame::interpreter_frame_locals(). 778 __ sd(t0, Address(sp, 2 * wordSize)); 779 780 // set sender sp 781 // leave last_sp as null 782 __ sd(x19_sender_sp, Address(sp, 9 * wordSize)); 783 __ sd(zr, Address(sp, 8 * wordSize)); 784 785 // Get mirror and store it in the frame as GC root for this Method* 786 __ load_mirror(t2, xmethod, x15, t1); 787 __ sd(t2, Address(sp, 4 * wordSize)); 788 789 if (!native_call) { 790 __ ld(t0, Address(xmethod, Method::const_offset())); 791 __ lhu(t0, Address(t0, ConstMethod::max_stack_offset())); 792 __ add(t0, t0, MAX2(3, Method::extra_stack_entries())); 793 __ slli(t0, t0, 3); 794 __ sub(t0, sp, t0); 795 __ andi(t0, t0, -16); 796 __ sub(t1, t0, fp); 797 __ srai(t1, t1, Interpreter::logStackElementSize); 798 // Store extended SP 799 __ sd(t1, Address(sp, 5 * wordSize)); 800 // Move SP out of the way 801 __ mv(sp, t0); 802 } else { 803 // Make sure there is room for the exception oop pushed in case method throws 804 // an exception (see TemplateInterpreterGenerator::generate_throw_exception()) 805 __ sub(t0, sp, 2 * wordSize); 806 __ sub(t1, t0, fp); 807 __ srai(t1, t1, Interpreter::logStackElementSize); 808 __ sd(t1, Address(sp, 5 * wordSize)); 809 __ mv(sp, t0); 810 } 811 } 812 813 // End of helpers 814 815 // Various method entries 816 //------------------------------------------------------------------------------------------------------------------------ 817 // 818 // 819 820 // Method entry for java.lang.ref.Reference.get. 821 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 822 // Code: _aload_0, _getfield, _areturn 823 // parameter size = 1 824 // 825 // The code that gets generated by this routine is split into 2 parts: 826 // 1. The "intrinsified" code for G1 (or any SATB based GC), 827 // 2. The slow path - which is an expansion of the regular method entry. 828 // 829 // Notes:- 830 // * In the G1 code we do not check whether we need to block for 831 // a safepoint. If G1 is enabled then we must execute the specialized 832 // code for Reference.get (except when the Reference object is null) 833 // so that we can log the value in the referent field with an SATB 834 // update buffer. 835 // If the code for the getfield template is modified so that the 836 // G1 pre-barrier code is executed when the current method is 837 // Reference.get() then going through the normal method entry 838 // will be fine. 839 // * The G1 code can, however, check the receiver object (the instance 840 // of java.lang.Reference) and jump to the slow path if null. If the 841 // Reference object is null then we obviously cannot fetch the referent 842 // and so we don't need to call the G1 pre-barrier. Thus we can use the 843 // regular method entry code to generate the NPE. 844 // 845 // This code is based on generate_accessor_entry. 846 // 847 // xmethod: Method* 848 // x19_sender_sp: senderSP must preserve for slow path, set SP to it on fast path 849 850 // ra is live. It must be saved around calls. 851 852 address entry = __ pc(); 853 854 const int referent_offset = java_lang_ref_Reference::referent_offset(); 855 guarantee(referent_offset > 0, "referent offset not initialized"); 856 857 Label slow_path; 858 const Register local_0 = c_rarg0; 859 // Check if local 0 isn't null 860 // If the receiver is null then it is OK to jump to the slow path. 861 __ ld(local_0, Address(esp, 0)); 862 __ beqz(local_0, slow_path); 863 864 // Load the value of the referent field. 865 const Address field_address(local_0, referent_offset); 866 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 867 bs->load_at(_masm, IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ t0, /*tmp2*/ t1); 868 869 // areturn 870 __ andi(sp, x19_sender_sp, -16); // done with stack 871 __ ret(); 872 873 // generate a vanilla interpreter entry as the slow path 874 __ bind(slow_path); 875 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 876 return entry; 877 } 878 879 /** 880 * Method entry for static native methods: 881 * int java.util.zip.CRC32.update(int crc, int b) 882 */ 883 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 884 // TODO: Unimplemented generate_CRC32_update_entry 885 return nullptr; 886 } 887 888 /** 889 * Method entry for static native methods: 890 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 891 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 892 */ 893 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 894 // TODO: Unimplemented generate_CRC32_updateBytes_entry 895 return nullptr; 896 } 897 898 /** 899 * Method entry for intrinsic-candidate (non-native) methods: 900 * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end) 901 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end) 902 * Unlike CRC32, CRC32C does not have any methods marked as native 903 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses 904 */ 905 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 906 // TODO: Unimplemented generate_CRC32C_updateBytes_entry 907 return nullptr; 908 } 909 910 // Not supported 911 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; } 912 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; } 913 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; } 914 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; } 915 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; } 916 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; } 917 918 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 919 // See more discussion in stackOverflow.hpp. 920 921 const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size()); 922 const int page_size = (int)os::vm_page_size(); 923 const int n_shadow_pages = shadow_zone_size / page_size; 924 925 #ifdef ASSERT 926 Label L_good_limit; 927 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit())); 928 __ bnez(t0, L_good_limit); 929 __ stop("shadow zone safe limit is not initialized"); 930 __ bind(L_good_limit); 931 932 Label L_good_watermark; 933 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark())); 934 __ bnez(t0, L_good_watermark); 935 __ stop("shadow zone growth watermark is not initialized"); 936 __ bind(L_good_watermark); 937 #endif 938 939 Label L_done; 940 941 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark())); 942 __ bgtu(sp, t0, L_done); 943 944 for (int p = 1; p <= n_shadow_pages; p++) { 945 __ bang_stack_with_offset(p * page_size); 946 } 947 948 // Record the new watermark, but only if the update is above the safe limit. 949 // Otherwise, the next time around the check above would pass the safe limit. 950 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit())); 951 __ bleu(sp, t0, L_done); 952 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark())); 953 954 __ bind(L_done); 955 } 956 957 // Interpreter stub for calling a native method. (asm interpreter) 958 // This sets up a somewhat different looking stack for calling the 959 // native method than the typical interpreter frame setup. 960 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 961 // determine code generation flags 962 bool inc_counter = UseCompiler || CountCompiledCalls; 963 964 // x11: Method* 965 // x30: sender sp 966 967 address entry_point = __ pc(); 968 969 const Address constMethod (xmethod, Method::const_offset()); 970 const Address access_flags (xmethod, Method::access_flags_offset()); 971 const Address size_of_parameters(x12, ConstMethod:: 972 size_of_parameters_offset()); 973 974 // get parameter size (always needed) 975 __ ld(x12, constMethod); 976 __ load_unsigned_short(x12, size_of_parameters); 977 978 // Native calls don't need the stack size check since they have no 979 // expression stack and the arguments are already on the stack and 980 // we only add a handful of words to the stack. 981 982 // xmethod: Method* 983 // x12: size of parameters 984 // x30: sender sp 985 986 // for natives the size of locals is zero 987 988 // compute beginning of parameters (xlocals) 989 __ shadd(xlocals, x12, esp, xlocals, 3); 990 __ addi(xlocals, xlocals, -wordSize); 991 992 // Pull SP back to minimum size: this avoids holes in the stack 993 __ andi(sp, esp, -16); 994 995 // initialize fixed part of activation frame 996 generate_fixed_frame(true); 997 998 // make sure method is native & not abstract 999 #ifdef ASSERT 1000 __ lwu(x10, access_flags); 1001 __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute non-native method as native", false); 1002 __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter"); 1003 #endif 1004 1005 // Since at this point in the method invocation the exception 1006 // handler would try to exit the monitor of synchronized methods 1007 // which hasn't been entered yet, we set the thread local variable 1008 // _do_not_unlock_if_synchronized to true. The remove_activation 1009 // will check this flag. 1010 1011 const Address do_not_unlock_if_synchronized(xthread, 1012 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1013 __ mv(t1, true); 1014 __ sb(t1, do_not_unlock_if_synchronized); 1015 1016 // increment invocation count & check for overflow 1017 Label invocation_counter_overflow; 1018 if (inc_counter) { 1019 generate_counter_incr(&invocation_counter_overflow); 1020 } 1021 1022 Label continue_after_compile; 1023 __ bind(continue_after_compile); 1024 1025 bang_stack_shadow_pages(true); 1026 1027 // reset the _do_not_unlock_if_synchronized flag 1028 __ sb(zr, do_not_unlock_if_synchronized); 1029 1030 // check for synchronized methods 1031 // Must happen AFTER invocation_counter check and stack overflow check, 1032 // so method is not locked if overflows. 1033 if (synchronized) { 1034 lock_method(); 1035 } else { 1036 // no synchronization necessary 1037 #ifdef ASSERT 1038 __ lwu(x10, access_flags); 1039 __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization"); 1040 #endif 1041 } 1042 1043 // start execution 1044 #ifdef ASSERT 1045 __ verify_frame_setup(); 1046 #endif 1047 1048 // jvmti support 1049 __ notify_method_entry(); 1050 1051 // work registers 1052 const Register t = x18; 1053 const Register result_handler = x19; 1054 1055 // allocate space for parameters 1056 __ ld(t, Address(xmethod, Method::const_offset())); 1057 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1058 1059 __ slli(t, t, Interpreter::logStackElementSize); 1060 __ sub(x30, esp, t); 1061 __ andi(sp, x30, -16); 1062 __ mv(esp, x30); 1063 1064 // get signature handler 1065 { 1066 Label L; 1067 __ ld(t, Address(xmethod, Method::signature_handler_offset())); 1068 __ bnez(t, L); 1069 __ call_VM(noreg, 1070 CAST_FROM_FN_PTR(address, 1071 InterpreterRuntime::prepare_native_call), 1072 xmethod); 1073 __ ld(t, Address(xmethod, Method::signature_handler_offset())); 1074 __ bind(L); 1075 } 1076 1077 // call signature handler 1078 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == xlocals, 1079 "adjust this code"); 1080 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp, 1081 "adjust this code"); 1082 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t0, 1083 "adjust this code"); 1084 1085 // The generated handlers do not touch xmethod (the method). 1086 // However, large signatures cannot be cached and are generated 1087 // each time here. The slow-path generator can do a GC on return, 1088 // so we must reload it after the call. 1089 __ jalr(t); 1090 __ get_method(xmethod); // slow path can do a GC, reload xmethod 1091 1092 1093 // result handler is in x10 1094 // set result handler 1095 __ mv(result_handler, x10); 1096 // pass mirror handle if static call 1097 { 1098 Label L; 1099 __ lwu(t, Address(xmethod, Method::access_flags_offset())); 1100 __ test_bit(t0, t, exact_log2(JVM_ACC_STATIC)); 1101 __ beqz(t0, L); 1102 // get mirror 1103 __ load_mirror(t, xmethod, x28, t1); 1104 // copy mirror into activation frame 1105 __ sd(t, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1106 // pass handle to mirror 1107 __ addi(c_rarg1, fp, frame::interpreter_frame_oop_temp_offset * wordSize); 1108 __ bind(L); 1109 } 1110 1111 // get native function entry point in x28 1112 { 1113 Label L; 1114 __ ld(x28, Address(xmethod, Method::native_function_offset())); 1115 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1116 __ la(t, unsatisfied); 1117 __ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8 1118 1119 __ bne(x28, t1, L); 1120 __ call_VM(noreg, 1121 CAST_FROM_FN_PTR(address, 1122 InterpreterRuntime::prepare_native_call), 1123 xmethod); 1124 __ get_method(xmethod); 1125 __ ld(x28, Address(xmethod, Method::native_function_offset())); 1126 __ bind(L); 1127 } 1128 1129 // pass JNIEnv 1130 __ add(c_rarg0, xthread, in_bytes(JavaThread::jni_environment_offset())); 1131 1132 // It is enough that the pc() points into the right code 1133 // segment. It does not have to be the correct return pc. 1134 Label native_return; 1135 __ set_last_Java_frame(esp, fp, native_return, x30); 1136 1137 // change thread state 1138 #ifdef ASSERT 1139 { 1140 Label L; 1141 __ lwu(t, Address(xthread, JavaThread::thread_state_offset())); 1142 __ addi(t0, zr, (u1)_thread_in_Java); 1143 __ beq(t, t0, L); 1144 __ stop("Wrong thread state in native stub"); 1145 __ bind(L); 1146 } 1147 #endif 1148 1149 // Change state to native 1150 __ la(t1, Address(xthread, JavaThread::thread_state_offset())); 1151 __ mv(t0, _thread_in_native); 1152 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1153 __ sw(t0, Address(t1)); 1154 1155 // Call the native method. 1156 __ jalr(x28); 1157 __ bind(native_return); 1158 __ get_method(xmethod); 1159 // result potentially in x10 or f10 1160 1161 // Restore cpu control state after JNI call 1162 __ restore_cpu_control_state_after_jni(t0); 1163 1164 // make room for the pushes we're about to do 1165 __ sub(t0, esp, 4 * wordSize); 1166 __ andi(sp, t0, -16); 1167 1168 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1169 // in order to extract the result of a method call. If the order of these 1170 // pushes change or anything else is added to the stack then the code in 1171 // interpreter_frame_result must also change. 1172 __ push(dtos); 1173 __ push(ltos); 1174 1175 // change thread state 1176 // Force all preceding writes to be observed prior to thread state change 1177 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1178 1179 __ mv(t0, _thread_in_native_trans); 1180 __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); 1181 1182 // Force this write out before the read below 1183 if (!UseSystemMemoryBarrier) { 1184 __ membar(MacroAssembler::AnyAny); 1185 } 1186 1187 // check for safepoint operation in progress and/or pending suspend requests 1188 { 1189 Label L, Continue; 1190 1191 // We need an acquire here to ensure that any subsequent load of the 1192 // global SafepointSynchronize::_state flag is ordered after this load 1193 // of the thread-local polling word. We don't want this poll to 1194 // return false (i.e. not safepointing) and a later poll of the global 1195 // SafepointSynchronize::_state spuriously to return true. 1196 // 1197 // This is to avoid a race when we're in a native->Java transition 1198 // racing the code which wakes up from a safepoint. 1199 __ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */); 1200 __ lwu(t1, Address(xthread, JavaThread::suspend_flags_offset())); 1201 __ beqz(t1, Continue); 1202 __ bind(L); 1203 1204 // Don't use call_VM as it will see a possible pending exception 1205 // and forward it and never return here preventing us from 1206 // clearing _last_native_pc down below. So we do a runtime call by 1207 // hand. 1208 // 1209 __ mv(c_rarg0, xthread); 1210 __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); 1211 __ get_method(xmethod); 1212 __ reinit_heapbase(); 1213 __ bind(Continue); 1214 } 1215 1216 // change thread state 1217 // Force all preceding writes to be observed prior to thread state change 1218 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1219 1220 __ mv(t0, _thread_in_Java); 1221 __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); 1222 1223 // reset_last_Java_frame 1224 __ reset_last_Java_frame(true); 1225 1226 if (CheckJNICalls) { 1227 // clear_pending_jni_exception_check 1228 __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset())); 1229 } 1230 1231 // reset handle block 1232 __ ld(t, Address(xthread, JavaThread::active_handles_offset())); 1233 __ sd(zr, Address(t, JNIHandleBlock::top_offset())); 1234 1235 // If result is an oop unbox and store it in frame where gc will see it 1236 // and result handler will pick it up 1237 1238 { 1239 Label no_oop; 1240 __ la(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1241 __ bne(t, result_handler, no_oop); 1242 // Unbox oop result, e.g. JNIHandles::resolve result. 1243 __ pop(ltos); 1244 __ resolve_jobject(x10, t, t1); 1245 __ sd(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1246 // keep stack depth as expected by pushing oop which will eventually be discarded 1247 __ push(ltos); 1248 __ bind(no_oop); 1249 } 1250 1251 { 1252 Label no_reguard; 1253 __ lwu(t0, Address(xthread, in_bytes(JavaThread::stack_guard_state_offset()))); 1254 __ addi(t1, zr, (u1)StackOverflow::stack_guard_yellow_reserved_disabled); 1255 __ bne(t0, t1, no_reguard); 1256 1257 __ push_call_clobbered_registers(); 1258 __ mv(c_rarg0, xthread); 1259 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 1260 __ pop_call_clobbered_registers(); 1261 __ bind(no_reguard); 1262 } 1263 1264 // The method register is junk from after the thread_in_native transition 1265 // until here. Also can't call_VM until the bcp has been 1266 // restored. Need bcp for throwing exception below so get it now. 1267 __ get_method(xmethod); 1268 1269 // restore bcp to have legal interpreter frame, i.e., bci == 0 <=> 1270 // xbcp == code_base() 1271 __ ld(xbcp, Address(xmethod, Method::const_offset())); // get ConstMethod* 1272 __ add(xbcp, xbcp, in_bytes(ConstMethod::codes_offset())); // get codebase 1273 // handle exceptions (exception handling will handle unlocking!) 1274 { 1275 Label L; 1276 __ ld(t0, Address(xthread, Thread::pending_exception_offset())); 1277 __ beqz(t0, L); 1278 // Note: At some point we may want to unify this with the code 1279 // used in call_VM_base(); i.e., we should use the 1280 // StubRoutines::forward_exception code. For now this doesn't work 1281 // here because the sp is not correctly set at this point. 1282 __ MacroAssembler::call_VM(noreg, 1283 CAST_FROM_FN_PTR(address, 1284 InterpreterRuntime::throw_pending_exception)); 1285 __ should_not_reach_here(); 1286 __ bind(L); 1287 } 1288 1289 // do unlocking if necessary 1290 { 1291 Label L; 1292 __ lwu(t, Address(xmethod, Method::access_flags_offset())); 1293 __ test_bit(t0, t, exact_log2(JVM_ACC_SYNCHRONIZED)); 1294 __ beqz(t0, L); 1295 // the code below should be shared with interpreter macro 1296 // assembler implementation 1297 { 1298 Label unlock; 1299 // BasicObjectLock will be first in list, since this is a 1300 // synchronized method. However, need to check that the object 1301 // has not been unlocked by an explicit monitorexit bytecode. 1302 1303 // monitor expect in c_rarg1 for slow unlock path 1304 __ la(c_rarg1, Address(fp, // address of first monitor 1305 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1306 wordSize - sizeof(BasicObjectLock)))); 1307 1308 __ ld(t, Address(c_rarg1, BasicObjectLock::obj_offset())); 1309 __ bnez(t, unlock); 1310 1311 // Entry already unlocked, need to throw exception 1312 __ MacroAssembler::call_VM(noreg, 1313 CAST_FROM_FN_PTR(address, 1314 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1315 __ should_not_reach_here(); 1316 1317 __ bind(unlock); 1318 __ unlock_object(c_rarg1); 1319 } 1320 __ bind(L); 1321 } 1322 1323 // jvmti support 1324 // Note: This must happen _after_ handling/throwing any exceptions since 1325 // the exception handler code notifies the runtime of method exits 1326 // too. If this happens before, method entry/exit notifications are 1327 // not properly paired (was bug - gri 11/22/99). 1328 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1329 1330 __ pop(ltos); 1331 __ pop(dtos); 1332 1333 __ jalr(result_handler); 1334 1335 // remove activation 1336 __ ld(esp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp 1337 // remove frame anchor 1338 __ leave(); 1339 1340 // restore sender sp 1341 __ mv(sp, esp); 1342 1343 __ ret(); 1344 1345 if (inc_counter) { 1346 // Handle overflow of counter and compile method 1347 __ bind(invocation_counter_overflow); 1348 generate_counter_overflow(continue_after_compile); 1349 } 1350 1351 return entry_point; 1352 } 1353 1354 // 1355 // Generic interpreted method entry to (asm) interpreter 1356 // 1357 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1358 1359 // determine code generation flags 1360 const bool inc_counter = UseCompiler || CountCompiledCalls; 1361 1362 // t0: sender sp 1363 address entry_point = __ pc(); 1364 1365 const Address constMethod(xmethod, Method::const_offset()); 1366 const Address access_flags(xmethod, Method::access_flags_offset()); 1367 const Address size_of_parameters(x13, 1368 ConstMethod::size_of_parameters_offset()); 1369 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset()); 1370 1371 // get parameter size (always needed) 1372 // need to load the const method first 1373 __ ld(x13, constMethod); 1374 __ load_unsigned_short(x12, size_of_parameters); 1375 1376 // x12: size of parameters 1377 1378 __ load_unsigned_short(x13, size_of_locals); // get size of locals in words 1379 __ sub(x13, x13, x12); // x13 = no. of additional locals 1380 1381 // see if we've got enough room on the stack for locals plus overhead. 1382 generate_stack_overflow_check(); 1383 1384 // compute beginning of parameters (xlocals) 1385 __ shadd(xlocals, x12, esp, t1, 3); 1386 __ add(xlocals, xlocals, -wordSize); 1387 1388 // Make room for additional locals 1389 __ slli(t1, x13, 3); 1390 __ sub(t0, esp, t1); 1391 1392 // Padding between locals and fixed part of activation frame to ensure 1393 // SP is always 16-byte aligned. 1394 __ andi(sp, t0, -16); 1395 1396 // x13 - # of additional locals 1397 // allocate space for locals 1398 // explicitly initialize locals 1399 { 1400 Label exit, loop; 1401 __ blez(x13, exit); // do nothing if x13 <= 0 1402 __ bind(loop); 1403 __ sd(zr, Address(t0)); 1404 __ add(t0, t0, wordSize); 1405 __ add(x13, x13, -1); // until everything initialized 1406 __ bnez(x13, loop); 1407 __ bind(exit); 1408 } 1409 1410 // And the base dispatch table 1411 __ get_dispatch(); 1412 1413 // initialize fixed part of activation frame 1414 generate_fixed_frame(false); 1415 1416 // make sure method is not native & not abstract 1417 #ifdef ASSERT 1418 __ lwu(x10, access_flags); 1419 __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute native method as non-native"); 1420 __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter"); 1421 #endif 1422 1423 // Since at this point in the method invocation the exception 1424 // handler would try to exit the monitor of synchronized methods 1425 // which hasn't been entered yet, we set the thread local variable 1426 // _do_not_unlock_if_synchronized to true. The remove_activation 1427 // will check this flag. 1428 1429 const Address do_not_unlock_if_synchronized(xthread, 1430 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1431 __ mv(t1, true); 1432 __ sb(t1, do_not_unlock_if_synchronized); 1433 1434 Label no_mdp; 1435 const Register mdp = x13; 1436 __ ld(mdp, Address(xmethod, Method::method_data_offset())); 1437 __ beqz(mdp, no_mdp); 1438 __ add(mdp, mdp, in_bytes(MethodData::data_offset())); 1439 __ profile_parameters_type(mdp, x11, x12, x14); // use x11, x12, x14 as tmp registers 1440 __ bind(no_mdp); 1441 1442 // increment invocation count & check for overflow 1443 Label invocation_counter_overflow; 1444 if (inc_counter) { 1445 generate_counter_incr(&invocation_counter_overflow); 1446 } 1447 1448 Label continue_after_compile; 1449 __ bind(continue_after_compile); 1450 1451 bang_stack_shadow_pages(false); 1452 1453 // reset the _do_not_unlock_if_synchronized flag 1454 __ sb(zr, do_not_unlock_if_synchronized); 1455 1456 // check for synchronized methods 1457 // Must happen AFTER invocation_counter check and stack overflow check, 1458 // so method is not locked if overflows. 1459 if (synchronized) { 1460 // Allocate monitor and lock method 1461 lock_method(); 1462 } else { 1463 // no synchronization necessary 1464 #ifdef ASSERT 1465 __ lwu(x10, access_flags); 1466 __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization"); 1467 #endif 1468 } 1469 1470 // start execution 1471 #ifdef ASSERT 1472 __ verify_frame_setup(); 1473 #endif 1474 1475 // jvmti support 1476 __ notify_method_entry(); 1477 1478 __ dispatch_next(vtos); 1479 1480 // invocation counter overflow 1481 if (inc_counter) { 1482 // Handle overflow of counter and compile method 1483 __ bind(invocation_counter_overflow); 1484 generate_counter_overflow(continue_after_compile); 1485 } 1486 1487 return entry_point; 1488 } 1489 1490 // Method entry for java.lang.Thread.currentThread 1491 address TemplateInterpreterGenerator::generate_currentThread() { 1492 address entry_point = __ pc(); 1493 1494 __ ld(x10, Address(xthread, JavaThread::vthread_offset())); 1495 __ resolve_oop_handle(x10, t0, t1); 1496 __ ret(); 1497 1498 return entry_point; 1499 } 1500 1501 //----------------------------------------------------------------------------- 1502 // Exceptions 1503 1504 void TemplateInterpreterGenerator::generate_throw_exception() { 1505 // Entry point in previous activation (i.e., if the caller was 1506 // interpreted) 1507 Interpreter::_rethrow_exception_entry = __ pc(); 1508 // Restore sp to interpreter_frame_last_sp even though we are going 1509 // to empty the expression stack for the exception processing. 1510 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 1511 // x10: exception 1512 // x13: return address/pc that threw exception 1513 __ restore_bcp(); // xbcp points to call/send 1514 __ restore_locals(); 1515 __ restore_constant_pool_cache(); 1516 __ reinit_heapbase(); // restore xheapbase as heapbase. 1517 __ get_dispatch(); 1518 1519 // Entry point for exceptions thrown within interpreter code 1520 Interpreter::_throw_exception_entry = __ pc(); 1521 // If we came here via a NullPointerException on the receiver of a 1522 // method, xthread may be corrupt. 1523 __ get_method(xmethod); 1524 // expression stack is undefined here 1525 // x10: exception 1526 // xbcp: exception bcp 1527 __ verify_oop(x10); 1528 __ mv(c_rarg1, x10); 1529 1530 // expression stack must be empty before entering the VM in case of 1531 // an exception 1532 __ empty_expression_stack(); 1533 // find exception handler address and preserve exception oop 1534 __ call_VM(x13, 1535 CAST_FROM_FN_PTR(address, 1536 InterpreterRuntime::exception_handler_for_exception), 1537 c_rarg1); 1538 1539 // Restore machine SP 1540 __ restore_sp_after_call(); 1541 1542 // x10: exception handler entry point 1543 // x13: preserved exception oop 1544 // xbcp: bcp for exception handler 1545 __ push_ptr(x13); // push exception which is now the only value on the stack 1546 __ jr(x10); // jump to exception handler (may be _remove_activation_entry!) 1547 1548 // If the exception is not handled in the current frame the frame is 1549 // removed and the exception is rethrown (i.e. exception 1550 // continuation is _rethrow_exception). 1551 // 1552 // Note: At this point the bci is still the bxi for the instruction 1553 // which caused the exception and the expression stack is 1554 // empty. Thus, for any VM calls at this point, GC will find a legal 1555 // oop map (with empty expression stack). 1556 1557 // 1558 // JVMTI PopFrame support 1559 // 1560 1561 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1562 __ empty_expression_stack(); 1563 // Set the popframe_processing bit in pending_popframe_condition 1564 // indicating that we are currently handling popframe, so that 1565 // call_VMs that may happen later do not trigger new popframe 1566 // handling cycles. 1567 __ lwu(x13, Address(xthread, JavaThread::popframe_condition_offset())); 1568 __ ori(x13, x13, JavaThread::popframe_processing_bit); 1569 __ sw(x13, Address(xthread, JavaThread::popframe_condition_offset())); 1570 1571 { 1572 // Check to see whether we are returning to a deoptimized frame. 1573 // (The PopFrame call ensures that the caller of the popped frame is 1574 // either interpreted or compiled and deoptimizes it if compiled.) 1575 // In this case, we can't call dispatch_next() after the frame is 1576 // popped, but instead must save the incoming arguments and restore 1577 // them after deoptimization has occurred. 1578 // 1579 // Note that we don't compare the return PC against the 1580 // deoptimization blob's unpack entry because of the presence of 1581 // adapter frames in C2. 1582 Label caller_not_deoptimized; 1583 __ ld(c_rarg1, Address(fp, frame::return_addr_offset * wordSize)); 1584 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), c_rarg1); 1585 __ bnez(x10, caller_not_deoptimized); 1586 1587 // Compute size of arguments for saving when returning to 1588 // deoptimized caller 1589 __ get_method(x10); 1590 __ ld(x10, Address(x10, Method::const_offset())); 1591 __ load_unsigned_short(x10, Address(x10, in_bytes(ConstMethod:: 1592 size_of_parameters_offset()))); 1593 __ slli(x10, x10, Interpreter::logStackElementSize); 1594 __ restore_locals(); 1595 __ sub(xlocals, xlocals, x10); 1596 __ add(xlocals, xlocals, wordSize); 1597 // Save these arguments 1598 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1599 Deoptimization:: 1600 popframe_preserve_args), 1601 xthread, x10, xlocals); 1602 1603 __ remove_activation(vtos, 1604 /* throw_monitor_exception */ false, 1605 /* install_monitor_exception */ false, 1606 /* notify_jvmdi */ false); 1607 1608 // Inform deoptimization that it is responsible for restoring 1609 // these arguments 1610 __ mv(t0, JavaThread::popframe_force_deopt_reexecution_bit); 1611 __ sw(t0, Address(xthread, JavaThread::popframe_condition_offset())); 1612 1613 // Continue in deoptimization handler 1614 __ ret(); 1615 1616 __ bind(caller_not_deoptimized); 1617 } 1618 1619 __ remove_activation(vtos, 1620 /* throw_monitor_exception */ false, 1621 /* install_monitor_exception */ false, 1622 /* notify_jvmdi */ false); 1623 1624 // Restore the last_sp and null it out 1625 __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 1626 __ shadd(esp, t0, fp, t0, LogBytesPerWord); 1627 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 1628 1629 __ restore_bcp(); 1630 __ restore_locals(); 1631 __ restore_constant_pool_cache(); 1632 __ get_method(xmethod); 1633 __ get_dispatch(); 1634 1635 // The method data pointer was incremented already during 1636 // call profiling. We have to restore the mdp for the current bcp. 1637 if (ProfileInterpreter) { 1638 __ set_method_data_pointer_for_bcp(); 1639 } 1640 1641 // Clear the popframe condition flag 1642 __ sw(zr, Address(xthread, JavaThread::popframe_condition_offset())); 1643 assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive"); 1644 1645 #if INCLUDE_JVMTI 1646 { 1647 Label L_done; 1648 1649 __ lbu(t0, Address(xbcp, 0)); 1650 __ mv(t1, Bytecodes::_invokestatic); 1651 __ bne(t1, t0, L_done); 1652 1653 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1654 // Detect such a case in the InterpreterRuntime function and return the member name argument,or null. 1655 1656 __ ld(c_rarg0, Address(xlocals, 0)); 1657 __ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp); 1658 1659 __ beqz(x10, L_done); 1660 1661 __ sd(x10, Address(esp, 0)); 1662 __ bind(L_done); 1663 } 1664 #endif // INCLUDE_JVMTI 1665 1666 // Restore machine SP 1667 __ restore_sp_after_call(); 1668 1669 __ dispatch_next(vtos); 1670 // end of PopFrame support 1671 1672 Interpreter::_remove_activation_entry = __ pc(); 1673 1674 // preserve exception over this code sequence 1675 __ pop_ptr(x10); 1676 __ sd(x10, Address(xthread, JavaThread::vm_result_offset())); 1677 // remove the activation (without doing throws on illegalMonitorExceptions) 1678 __ remove_activation(vtos, false, true, false); 1679 // restore exception 1680 __ get_vm_result(x10, xthread); 1681 1682 // In between activations - previous activation type unknown yet 1683 // compute continuation point - the continuation point expects the 1684 // following registers set up: 1685 // 1686 // x10: exception 1687 // ra: return address/pc that threw exception 1688 // sp: expression stack of caller 1689 // fp: fp of caller 1690 // FIXME: There's no point saving ra here because VM calls don't trash it 1691 __ sub(sp, sp, 2 * wordSize); 1692 __ sd(x10, Address(sp, 0)); // save exception 1693 __ sd(ra, Address(sp, wordSize)); // save return address 1694 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1695 SharedRuntime::exception_handler_for_return_address), 1696 xthread, ra); 1697 __ mv(x11, x10); // save exception handler 1698 __ ld(x10, Address(sp, 0)); // restore exception 1699 __ ld(ra, Address(sp, wordSize)); // restore return address 1700 __ add(sp, sp, 2 * wordSize); 1701 // We might be returning to a deopt handler that expects x13 to 1702 // contain the exception pc 1703 __ mv(x13, ra); 1704 // Note that an "issuing PC" is actually the next PC after the call 1705 __ jr(x11); // jump to exception 1706 // handler of caller 1707 } 1708 1709 // 1710 // JVMTI ForceEarlyReturn support 1711 // 1712 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1713 address entry = __ pc(); 1714 1715 __ restore_bcp(); 1716 __ restore_locals(); 1717 __ empty_expression_stack(); 1718 __ load_earlyret_value(state); 1719 1720 __ ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset())); 1721 Address cond_addr(t0, JvmtiThreadState::earlyret_state_offset()); 1722 1723 // Clear the earlyret state 1724 assert(JvmtiThreadState::earlyret_inactive == 0, "should be"); 1725 __ sd(zr, cond_addr); 1726 1727 __ remove_activation(state, 1728 false, /* throw_monitor_exception */ 1729 false, /* install_monitor_exception */ 1730 true); /* notify_jvmdi */ 1731 __ ret(); 1732 1733 return entry; 1734 } 1735 // end of ForceEarlyReturn support 1736 1737 //----------------------------------------------------------------------------- 1738 // Helper for vtos entry point generation 1739 1740 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1741 address& bep, 1742 address& cep, 1743 address& sep, 1744 address& aep, 1745 address& iep, 1746 address& lep, 1747 address& fep, 1748 address& dep, 1749 address& vep) { 1750 assert(t != nullptr && t->is_valid() && t->tos_in() == vtos, "illegal template"); 1751 Label L; 1752 aep = __ pc(); // atos entry point 1753 __ push_ptr(); 1754 __ j(L); 1755 fep = __ pc(); // ftos entry point 1756 __ push_f(); 1757 __ j(L); 1758 dep = __ pc(); // dtos entry point 1759 __ push_d(); 1760 __ j(L); 1761 lep = __ pc(); // ltos entry point 1762 __ push_l(); 1763 __ j(L); 1764 bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point 1765 __ push_i(); 1766 vep = __ pc(); // vtos entry point 1767 __ bind(L); 1768 generate_and_dispatch(t); 1769 } 1770 1771 //----------------------------------------------------------------------------- 1772 1773 void TemplateInterpreterGenerator::count_bytecode() { 1774 __ mv(x7, (address) &BytecodeCounter::_counter_value); 1775 __ atomic_addw(noreg, 1, x7); 1776 } 1777 1778 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1779 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]); 1780 __ atomic_addw(noreg, 1, x7); 1781 } 1782 1783 // Non-product code 1784 #ifndef PRODUCT 1785 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1786 address entry = __ pc(); 1787 1788 __ push_reg(ra); 1789 __ push(state); 1790 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp); 1791 __ mv(c_rarg2, x10); // Pass itos 1792 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3); 1793 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp); 1794 __ pop(state); 1795 __ pop_reg(ra); 1796 __ ret(); // return from result handler 1797 1798 return entry; 1799 } 1800 1801 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1802 // Calculate new index for counter: 1803 // _index = (_index >> log2_number_of_codes) | 1804 // (bytecode << log2_number_of_codes); 1805 Register index_addr = t1; 1806 Register index = t0; 1807 __ mv(index_addr, (address) &BytecodePairHistogram::_index); 1808 __ lw(index, index_addr); 1809 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1810 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes); 1811 __ orrw(index, x7, index); 1812 __ sw(index, index_addr); 1813 // Bump bucket contents: 1814 // _counters[_index] ++; 1815 Register counter_addr = t1; 1816 __ mv(x7, (address) &BytecodePairHistogram::_counters); 1817 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt); 1818 __ atomic_addw(noreg, 1, counter_addr); 1819 } 1820 1821 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1822 // Call a little run-time stub to avoid blow-up for each bytecode. 1823 // The run-time runtime saves the right registers, depending on 1824 // the tosca in-state for the given template. 1825 1826 assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated"); 1827 __ rt_call(Interpreter::trace_code(t->tos_in())); 1828 __ reinit_heapbase(); 1829 } 1830 1831 void TemplateInterpreterGenerator::stop_interpreter_at() { 1832 Label L; 1833 __ push_reg(t0); 1834 __ mv(t0, (address) &BytecodeCounter::_counter_value); 1835 __ ld(t0, Address(t0)); 1836 __ mv(t1, StopInterpreterAt); 1837 __ bne(t0, t1, L); 1838 __ ebreak(); 1839 __ bind(L); 1840 __ pop_reg(t0); 1841 } 1842 1843 #endif // !PRODUCT