1 /* 2 * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "classfile/javaClasses.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/bytecodeHistogram.hpp" 32 #include "interpreter/bytecodeTracer.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "interpreter/interpreterRuntime.hpp" 36 #include "interpreter/templateInterpreterGenerator.hpp" 37 #include "interpreter/templateTable.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/arrayOop.hpp" 40 #include "oops/method.inline.hpp" 41 #include "oops/methodData.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "oops/resolvedIndyEntry.hpp" 44 #include "oops/resolvedMethodEntry.hpp" 45 #include "prims/jvmtiExport.hpp" 46 #include "prims/jvmtiThreadState.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/deoptimization.hpp" 49 #include "runtime/frame.inline.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/jniHandles.hpp" 52 #include "runtime/sharedRuntime.hpp" 53 #include "runtime/stubRoutines.hpp" 54 #include "runtime/synchronizer.hpp" 55 #include "runtime/timer.hpp" 56 #include "runtime/vframeArray.hpp" 57 #include "utilities/checkedCast.hpp" 58 #include "utilities/debug.hpp" 59 #include "utilities/powerOfTwo.hpp" 60 #include <sys/types.h> 61 62 #ifndef PRODUCT 63 #include "oops/method.hpp" 64 #endif // !PRODUCT 65 66 // Size of interpreter code. Increase if too small. Interpreter will 67 // fail with a guarantee ("not enough space for interpreter generation"); 68 // if too small. 69 // Run with +PrintInterpreter to get the VM to print out the size. 70 // Max size with JVMTI 71 int TemplateInterpreter::InterpreterCodeSize = 256 * 1024; 72 73 #define __ _masm-> 74 75 //----------------------------------------------------------------------------- 76 77 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 78 address entry = __ pc(); 79 80 __ andi(esp, esp, -16); 81 __ mv(c_rarg3, esp); 82 // xmethod 83 // xlocals 84 // c_rarg3: first stack arg - wordSize 85 // adjust sp 86 87 __ addi(sp, c_rarg3, -18 * wordSize); 88 __ addi(sp, sp, -2 * wordSize); 89 __ sd(ra, Address(sp, 0)); 90 91 __ call_VM(noreg, 92 CAST_FROM_FN_PTR(address, 93 InterpreterRuntime::slow_signature_handler), 94 xmethod, xlocals, c_rarg3); 95 96 // x10: result handler 97 98 // Stack layout: 99 // sp: return address <- sp 100 // 1 garbage 101 // 8 integer args (if static first is unused) 102 // 1 float/double identifiers 103 // 8 double args 104 // stack args <- esp 105 // garbage 106 // expression stack bottom 107 // bcp (null) 108 // ... 109 110 // Restore ra 111 __ ld(ra, Address(sp, 0)); 112 __ addi(sp, sp , 2 * wordSize); 113 114 // Do FP first so we can use c_rarg3 as temp 115 __ lwu(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers 116 117 for (int i = 0; i < Argument::n_float_register_parameters_c; i++) { 118 const FloatRegister r = g_FPArgReg[i]; 119 Label d, done; 120 121 __ test_bit(t0, c_rarg3, i); 122 __ bnez(t0, d); 123 __ flw(r, Address(sp, (10 + i) * wordSize)); 124 __ j(done); 125 __ bind(d); 126 __ fld(r, Address(sp, (10 + i) * wordSize)); 127 __ bind(done); 128 } 129 130 // c_rarg0 contains the result from the call of 131 // InterpreterRuntime::slow_signature_handler so we don't touch it 132 // here. It will be loaded with the JNIEnv* later. 133 for (int i = 1; i < Argument::n_int_register_parameters_c; i++) { 134 const Register rm = g_INTArgReg[i]; 135 __ ld(rm, Address(sp, i * wordSize)); 136 } 137 138 __ addi(sp, sp, 18 * wordSize); 139 __ ret(); 140 141 return entry; 142 } 143 144 // Various method entries 145 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 146 // xmethod: Method* 147 // x19_sender_sp: sender sp 148 // esp: args 149 150 // These don't need a safepoint check because they aren't virtually 151 // callable. We won't enter these intrinsics from compiled code. 152 // If in the future we added an intrinsic which was virtually callable 153 // we'd have to worry about how to safepoint so that this code is used. 154 155 // mathematical functions inlined by compiler 156 // (interpreter must provide identical implementation 157 // in order to avoid monotonicity bugs when switching 158 // from interpreter to compiler in the middle of some 159 // computation) 160 // 161 // stack: 162 // [ arg ] <-- esp 163 // [ arg ] 164 // retaddr in ra 165 166 address fn = nullptr; 167 address entry_point = nullptr; 168 Register continuation = ra; 169 switch (kind) { 170 case Interpreter::java_lang_math_abs: 171 entry_point = __ pc(); 172 __ fld(f10, Address(esp)); 173 __ fabs_d(f10, f10); 174 __ mv(sp, x19_sender_sp); // Restore caller's SP 175 break; 176 case Interpreter::java_lang_math_sqrt: 177 entry_point = __ pc(); 178 __ fld(f10, Address(esp)); 179 __ fsqrt_d(f10, f10); 180 __ mv(sp, x19_sender_sp); 181 break; 182 case Interpreter::java_lang_math_sin : 183 entry_point = __ pc(); 184 __ fld(f10, Address(esp)); 185 __ mv(sp, x19_sender_sp); 186 __ mv(x9, ra); 187 continuation = x9; // The first callee-saved register 188 if (StubRoutines::dsin() == nullptr) { 189 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 190 } else { 191 fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin()); 192 } 193 __ call(fn); 194 break; 195 case Interpreter::java_lang_math_cos : 196 entry_point = __ pc(); 197 __ fld(f10, Address(esp)); 198 __ mv(sp, x19_sender_sp); 199 __ mv(x9, ra); 200 continuation = x9; // The first callee-saved register 201 if (StubRoutines::dcos() == nullptr) { 202 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 203 } else { 204 fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos()); 205 } 206 __ call(fn); 207 break; 208 case Interpreter::java_lang_math_tan : 209 entry_point = __ pc(); 210 __ fld(f10, Address(esp)); 211 __ mv(sp, x19_sender_sp); 212 __ mv(x9, ra); 213 continuation = x9; // The first callee-saved register 214 if (StubRoutines::dtan() == nullptr) { 215 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 216 } else { 217 fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan()); 218 } 219 __ call(fn); 220 break; 221 case Interpreter::java_lang_math_log : 222 entry_point = __ pc(); 223 __ fld(f10, Address(esp)); 224 __ mv(sp, x19_sender_sp); 225 __ mv(x9, ra); 226 continuation = x9; // The first callee-saved register 227 if (StubRoutines::dlog() == nullptr) { 228 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 229 } else { 230 fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog()); 231 } 232 __ call(fn); 233 break; 234 case Interpreter::java_lang_math_log10 : 235 entry_point = __ pc(); 236 __ fld(f10, Address(esp)); 237 __ mv(sp, x19_sender_sp); 238 __ mv(x9, ra); 239 continuation = x9; // The first callee-saved register 240 if (StubRoutines::dlog10() == nullptr) { 241 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 242 } else { 243 fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10()); 244 } 245 __ call(fn); 246 break; 247 case Interpreter::java_lang_math_exp : 248 entry_point = __ pc(); 249 __ fld(f10, Address(esp)); 250 __ mv(sp, x19_sender_sp); 251 __ mv(x9, ra); 252 continuation = x9; // The first callee-saved register 253 if (StubRoutines::dexp() == nullptr) { 254 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 255 } else { 256 fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp()); 257 } 258 __ call(fn); 259 break; 260 case Interpreter::java_lang_math_pow : 261 entry_point = __ pc(); 262 __ mv(x9, ra); 263 continuation = x9; 264 __ fld(f10, Address(esp, 2 * Interpreter::stackElementSize)); 265 __ fld(f11, Address(esp)); 266 __ mv(sp, x19_sender_sp); 267 if (StubRoutines::dpow() == nullptr) { 268 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 269 } else { 270 fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow()); 271 } 272 __ call(fn); 273 break; 274 case Interpreter::java_lang_math_fmaD : 275 if (UseFMA) { 276 entry_point = __ pc(); 277 __ fld(f10, Address(esp, 4 * Interpreter::stackElementSize)); 278 __ fld(f11, Address(esp, 2 * Interpreter::stackElementSize)); 279 __ fld(f12, Address(esp)); 280 __ fmadd_d(f10, f10, f11, f12); 281 __ mv(sp, x19_sender_sp); // Restore caller's SP 282 } 283 break; 284 case Interpreter::java_lang_math_fmaF : 285 if (UseFMA) { 286 entry_point = __ pc(); 287 __ flw(f10, Address(esp, 2 * Interpreter::stackElementSize)); 288 __ flw(f11, Address(esp, Interpreter::stackElementSize)); 289 __ flw(f12, Address(esp)); 290 __ fmadd_s(f10, f10, f11, f12); 291 __ mv(sp, x19_sender_sp); // Restore caller's SP 292 } 293 break; 294 default: 295 ; 296 } 297 if (entry_point != nullptr) { 298 __ jr(continuation); 299 } 300 301 return entry_point; 302 } 303 304 // Abstract method entry 305 // Attempt to execute abstract method. Throw exception 306 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 307 // xmethod: Method* 308 // x19_sender_sp: sender SP 309 310 address entry_point = __ pc(); 311 312 // abstract method entry 313 314 // pop return address, reset last_sp to null 315 __ empty_expression_stack(); 316 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) 317 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 318 319 // throw exception 320 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 321 InterpreterRuntime::throw_AbstractMethodErrorWithMethod), 322 xmethod); 323 // the call_VM checks for exception, so we should never return here. 324 __ should_not_reach_here(); 325 326 return entry_point; 327 } 328 329 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 330 address entry = __ pc(); 331 332 #ifdef ASSERT 333 { 334 Label L; 335 __ ld(t0, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 336 __ shadd(t0, t0, fp, t0, LogBytesPerWord); 337 // maximal sp for current fp (stack grows negative) 338 // check if frame is complete 339 __ bge(t0, sp, L); 340 __ stop ("interpreter frame not set up"); 341 __ bind(L); 342 } 343 #endif // ASSERT 344 // Restore bcp under the assumption that the current frame is still 345 // interpreted 346 __ restore_bcp(); 347 348 // expression stack must be empty before entering the VM if an 349 // exception happened 350 __ empty_expression_stack(); 351 // throw exception 352 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 353 return entry; 354 } 355 356 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 357 address entry = __ pc(); 358 // expression stack must be empty before entering the VM if an 359 // exception happened 360 __ empty_expression_stack(); 361 // setup parameters 362 363 // convention: expect aberrant index in register x11 364 __ zero_extend(c_rarg2, x11, 32); 365 // convention: expect array in register x13 366 __ mv(c_rarg1, x13); 367 __ call_VM(noreg, 368 CAST_FROM_FN_PTR(address, 369 InterpreterRuntime:: 370 throw_ArrayIndexOutOfBoundsException), 371 c_rarg1, c_rarg2); 372 return entry; 373 } 374 375 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 376 address entry = __ pc(); 377 378 // object is at TOS 379 __ pop_reg(c_rarg1); 380 381 // expression stack must be empty before entering the VM if an 382 // exception happened 383 __ empty_expression_stack(); 384 385 __ call_VM(noreg, 386 CAST_FROM_FN_PTR(address, 387 InterpreterRuntime:: 388 throw_ClassCastException), 389 c_rarg1); 390 return entry; 391 } 392 393 address TemplateInterpreterGenerator::generate_exception_handler_common( 394 const char* name, const char* message, bool pass_oop) { 395 assert(!pass_oop || message == nullptr, "either oop or message but not both"); 396 address entry = __ pc(); 397 if (pass_oop) { 398 // object is at TOS 399 __ pop_reg(c_rarg2); 400 } 401 // expression stack must be empty before entering the VM if an 402 // exception happened 403 __ empty_expression_stack(); 404 // setup parameters 405 __ la(c_rarg1, Address((address)name)); 406 if (pass_oop) { 407 __ call_VM(x10, CAST_FROM_FN_PTR(address, 408 InterpreterRuntime:: 409 create_klass_exception), 410 c_rarg1, c_rarg2); 411 } else { 412 // kind of lame ExternalAddress can't take null because 413 // external_word_Relocation will assert. 414 if (message != nullptr) { 415 __ la(c_rarg2, Address((address)message)); 416 } else { 417 __ mv(c_rarg2, NULL_WORD); 418 } 419 __ call_VM(x10, 420 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 421 c_rarg1, c_rarg2); 422 } 423 // throw exception 424 __ j(address(Interpreter::throw_exception_entry())); 425 return entry; 426 } 427 428 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 429 address entry = __ pc(); 430 431 // Restore stack bottom in case i2c adjusted stack 432 __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 433 __ shadd(esp, t0, fp, t0, LogBytesPerWord); 434 // and null it as marker that esp is now tos until next java call 435 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 436 __ restore_bcp(); 437 __ restore_locals(); 438 __ restore_constant_pool_cache(); 439 __ get_method(xmethod); 440 441 if (state == atos) { 442 Register obj = x10; 443 Register mdp = x11; 444 Register tmp = x12; 445 __ ld(mdp, Address(xmethod, Method::method_data_offset())); 446 __ profile_return_type(mdp, obj, tmp); 447 } 448 449 const Register cache = x11; 450 const Register index = x12; 451 452 if (index_size == sizeof(u4)) { 453 __ load_resolved_indy_entry(cache, index); 454 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset()))); 455 __ shadd(esp, cache, esp, t0, 3); 456 } else { 457 // Pop N words from the stack 458 assert(index_size == sizeof(u2), "Can only be u2"); 459 __ load_method_entry(cache, index); 460 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset()))); 461 462 __ shadd(esp, cache, esp, t0, 3); 463 } 464 465 // Restore machine SP 466 __ restore_sp_after_call(); 467 468 __ check_and_handle_popframe(xthread); 469 __ check_and_handle_earlyret(xthread); 470 471 __ get_dispatch(); 472 __ dispatch_next(state, step); 473 474 return entry; 475 } 476 477 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 478 int step, 479 address continuation) { 480 address entry = __ pc(); 481 __ restore_bcp(); 482 __ restore_locals(); 483 __ restore_constant_pool_cache(); 484 __ get_method(xmethod); 485 __ get_dispatch(); 486 487 __ restore_sp_after_call(); // Restore SP to extended SP 488 489 // Restore expression stack pointer 490 __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 491 __ shadd(esp, t0, fp, t0, LogBytesPerWord); 492 // null last_sp until next java call 493 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 494 495 // handle exceptions 496 { 497 Label L; 498 __ ld(t0, Address(xthread, Thread::pending_exception_offset())); 499 __ beqz(t0, L); 500 __ call_VM(noreg, 501 CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 502 __ should_not_reach_here(); 503 __ bind(L); 504 } 505 506 if (continuation == nullptr) { 507 __ dispatch_next(state, step); 508 } else { 509 __ jump_to_entry(continuation); 510 } 511 return entry; 512 } 513 514 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 515 address entry = __ pc(); 516 if (type == T_OBJECT) { 517 // retrieve result from frame 518 __ ld(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); 519 // and verify it 520 __ verify_oop(x10); 521 } else { 522 __ cast_primitive_type(type, x10); 523 } 524 525 __ ret(); // return from result handler 526 return entry; 527 } 528 529 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, 530 address runtime_entry) { 531 assert_cond(runtime_entry != nullptr); 532 address entry = __ pc(); 533 __ push(state); 534 __ push_cont_fastpath(xthread); 535 __ call_VM(noreg, runtime_entry); 536 __ pop_cont_fastpath(xthread); 537 __ membar(MacroAssembler::AnyAny); 538 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 539 return entry; 540 } 541 542 address TemplateInterpreterGenerator::generate_cont_preempt_rerun_interpreter_adapter() { 543 return nullptr; 544 } 545 546 547 // Helpers for commoning out cases in the various type of method entries. 548 // 549 550 551 // increment invocation count & check for overflow 552 // 553 // Note: checking for negative value instead of overflow 554 // so we have a 'sticky' overflow test 555 // 556 // xmethod: method 557 // 558 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { 559 Label done; 560 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 561 int increment = InvocationCounter::count_increment; 562 Label no_mdo; 563 if (ProfileInterpreter) { 564 // Are we profiling? 565 __ ld(x10, Address(xmethod, Method::method_data_offset())); 566 __ beqz(x10, no_mdo); 567 // Increment counter in the MDO 568 const Address mdo_invocation_counter(x10, in_bytes(MethodData::invocation_counter_offset()) + 569 in_bytes(InvocationCounter::counter_offset())); 570 const Address mask(x10, in_bytes(MethodData::invoke_mask_offset())); 571 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, t0, t1, false, overflow); 572 __ j(done); 573 } 574 __ bind(no_mdo); 575 // Increment counter in MethodCounters 576 const Address invocation_counter(t1, 577 MethodCounters::invocation_counter_offset() + 578 InvocationCounter::counter_offset()); 579 __ get_method_counters(xmethod, t1, done); 580 const Address mask(t1, in_bytes(MethodCounters::invoke_mask_offset())); 581 __ increment_mask_and_jump(invocation_counter, increment, mask, t0, x11, false, overflow); 582 __ bind(done); 583 } 584 585 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 586 __ mv(c_rarg1, zr); 587 __ call_VM(noreg, 588 CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), c_rarg1); 589 __ j(do_continue); 590 } 591 592 // See if we've got enough room on the stack for locals plus overhead 593 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 594 // without going through the signal handler, i.e., reserved and yellow zones 595 // will not be made usable. The shadow zone must suffice to handle the 596 // overflow. 597 // The expression stack grows down incrementally, so the normal guard 598 // page mechanism will work for that. 599 // 600 // NOTE: Since the additional locals are also always pushed (wasn't 601 // obvious in generate_method_entry) so the guard should work for them 602 // too. 603 // 604 // Args: 605 // x13: number of additional locals this frame needs (what we must check) 606 // xmethod: Method* 607 // 608 // Kills: 609 // x10 610 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 611 612 // monitor entry size: see picture of stack set 613 // (generate_method_entry) and frame_amd64.hpp 614 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 615 616 // total overhead size: entry_size + (saved fp through expr stack 617 // bottom). be sure to change this if you add/subtract anything 618 // to/from the overhead area 619 const int overhead_size = 620 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 621 622 const int page_size = (int)os::vm_page_size(); 623 624 Label after_frame_check; 625 626 // see if the frame is greater than one page in size. If so, 627 // then we need to verify there is enough stack space remaining 628 // for the additional locals. 629 __ mv(t0, (page_size - overhead_size) / Interpreter::stackElementSize); 630 __ bleu(x13, t0, after_frame_check); 631 632 // compute sp as if this were going to be the last frame on 633 // the stack before the red zone 634 635 // locals + overhead, in bytes 636 __ mv(x10, overhead_size); 637 __ shadd(x10, x13, x10, t0, Interpreter::logStackElementSize); // 2 slots per parameter. 638 639 const Address stack_limit(xthread, JavaThread::stack_overflow_limit_offset()); 640 __ ld(t0, stack_limit); 641 642 #ifdef ASSERT 643 Label limit_okay; 644 // Verify that thread stack limit is non-zero. 645 __ bnez(t0, limit_okay); 646 __ stop("stack overflow limit is zero"); 647 __ bind(limit_okay); 648 #endif 649 650 // Add stack limit to locals. 651 __ add(x10, x10, t0); 652 653 // Check against the current stack bottom. 654 __ bgtu(sp, x10, after_frame_check); 655 656 // Remove the incoming args, peeling the machine SP back to where it 657 // was in the caller. This is not strictly necessary, but unless we 658 // do so the stack frame may have a garbage FP; this ensures a 659 // correct call stack that we can always unwind. The ANDI should be 660 // unnecessary because the sender SP in x19 is always aligned, but 661 // it doesn't hurt. 662 __ andi(sp, x19_sender_sp, -16); 663 664 // Note: the restored frame is not necessarily interpreted. 665 // Use the shared runtime version of the StackOverflowError. 666 assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); 667 __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); 668 669 // all done with frame size check 670 __ bind(after_frame_check); 671 } 672 673 // Allocate monitor and lock method (asm interpreter) 674 // 675 // Args: 676 // xmethod: Method* 677 // xlocals: locals 678 // 679 // Kills: 680 // x10 681 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 682 // t0, t1 (temporary regs) 683 void TemplateInterpreterGenerator::lock_method() { 684 // synchronize method 685 const Address access_flags(xmethod, Method::access_flags_offset()); 686 const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 687 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 688 689 #ifdef ASSERT 690 __ lwu(x10, access_flags); 691 __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method doesn't need synchronization", false); 692 #endif // ASSERT 693 694 // get synchronization object 695 { 696 Label done; 697 __ lwu(x10, access_flags); 698 __ andi(t0, x10, JVM_ACC_STATIC); 699 // get receiver (assume this is frequent case) 700 __ ld(x10, Address(xlocals, Interpreter::local_offset_in_bytes(0))); 701 __ beqz(t0, done); 702 __ load_mirror(x10, xmethod, x15, t1); 703 704 #ifdef ASSERT 705 { 706 Label L; 707 __ bnez(x10, L); 708 __ stop("synchronization object is null"); 709 __ bind(L); 710 } 711 #endif // ASSERT 712 713 __ bind(done); 714 } 715 716 // add space for monitor & lock 717 __ check_extended_sp(); 718 __ add(sp, sp, - entry_size); // add space for a monitor entry 719 __ add(esp, esp, - entry_size); 720 __ sub(t0, sp, fp); 721 __ srai(t0, t0, Interpreter::logStackElementSize); 722 __ sd(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize)); 723 __ sub(t0, esp, fp); 724 __ srai(t0, t0, Interpreter::logStackElementSize); 725 __ sd(t0, monitor_block_top); // set new monitor block top 726 // store object 727 __ sd(x10, Address(esp, BasicObjectLock::obj_offset())); 728 __ mv(c_rarg1, esp); // object address 729 __ lock_object(c_rarg1); 730 } 731 732 // Generate a fixed interpreter frame. This is identical setup for 733 // interpreted methods and for native methods hence the shared code. 734 // 735 // Args: 736 // ra: return address 737 // xmethod: Method* 738 // xlocals: pointer to locals 739 // xcpool: cp cache 740 // stack_pointer: previous sp 741 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 742 // initialize fixed part of activation frame 743 if (native_call) { 744 __ add(esp, sp, - 14 * wordSize); 745 __ mv(xbcp, zr); 746 __ add(sp, sp, - 14 * wordSize); 747 // add 2 zero-initialized slots for native calls 748 __ sd(zr, Address(sp, 13 * wordSize)); 749 __ sd(zr, Address(sp, 12 * wordSize)); 750 } else { 751 __ add(esp, sp, - 12 * wordSize); 752 __ ld(t0, Address(xmethod, Method::const_offset())); // get ConstMethod 753 __ add(xbcp, t0, in_bytes(ConstMethod::codes_offset())); // get codebase 754 __ add(sp, sp, - 12 * wordSize); 755 } 756 __ sd(xbcp, Address(sp, wordSize)); 757 __ mv(t0, frame::interpreter_frame_initial_sp_offset); 758 __ sd(t0, Address(sp, 0)); 759 760 if (ProfileInterpreter) { 761 Label method_data_continue; 762 __ ld(t0, Address(xmethod, Method::method_data_offset())); 763 __ beqz(t0, method_data_continue); 764 __ la(t0, Address(t0, in_bytes(MethodData::data_offset()))); 765 __ bind(method_data_continue); 766 } 767 768 __ sd(xmethod, Address(sp, 7 * wordSize)); 769 __ sd(ProfileInterpreter ? t0 : zr, Address(sp, 6 * wordSize)); 770 771 __ sd(ra, Address(sp, 11 * wordSize)); 772 __ sd(fp, Address(sp, 10 * wordSize)); 773 __ la(fp, Address(sp, 12 * wordSize)); // include ra & fp 774 775 __ ld(xcpool, Address(xmethod, Method::const_offset())); 776 __ ld(xcpool, Address(xcpool, ConstMethod::constants_offset())); 777 __ ld(xcpool, Address(xcpool, ConstantPool::cache_offset())); 778 __ sd(xcpool, Address(sp, 3 * wordSize)); 779 __ sub(t0, xlocals, fp); 780 __ srai(t0, t0, Interpreter::logStackElementSize); // t0 = xlocals - fp(); 781 // Store relativized xlocals, see frame::interpreter_frame_locals(). 782 __ sd(t0, Address(sp, 2 * wordSize)); 783 784 // set sender sp 785 // leave last_sp as null 786 __ sd(x19_sender_sp, Address(sp, 9 * wordSize)); 787 __ sd(zr, Address(sp, 8 * wordSize)); 788 789 // Get mirror and store it in the frame as GC root for this Method* 790 __ load_mirror(t2, xmethod, x15, t1); 791 __ sd(t2, Address(sp, 4 * wordSize)); 792 793 if (!native_call) { 794 __ ld(t0, Address(xmethod, Method::const_offset())); 795 __ lhu(t0, Address(t0, ConstMethod::max_stack_offset())); 796 __ add(t0, t0, MAX2(3, Method::extra_stack_entries())); 797 __ slli(t0, t0, 3); 798 __ sub(t0, sp, t0); 799 __ andi(t0, t0, -16); 800 __ sub(t1, t0, fp); 801 __ srai(t1, t1, Interpreter::logStackElementSize); 802 // Store extended SP 803 __ sd(t1, Address(sp, 5 * wordSize)); 804 // Move SP out of the way 805 __ mv(sp, t0); 806 } else { 807 // Make sure there is room for the exception oop pushed in case method throws 808 // an exception (see TemplateInterpreterGenerator::generate_throw_exception()) 809 __ sub(t0, sp, 2 * wordSize); 810 __ sub(t1, t0, fp); 811 __ srai(t1, t1, Interpreter::logStackElementSize); 812 __ sd(t1, Address(sp, 5 * wordSize)); 813 __ mv(sp, t0); 814 } 815 } 816 817 // End of helpers 818 819 // Various method entries 820 //------------------------------------------------------------------------------------------------------------------------ 821 // 822 // 823 824 // Method entry for java.lang.ref.Reference.get. 825 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 826 // Code: _aload_0, _getfield, _areturn 827 // parameter size = 1 828 // 829 // The code that gets generated by this routine is split into 2 parts: 830 // 1. The "intrinsified" code for G1 (or any SATB based GC), 831 // 2. The slow path - which is an expansion of the regular method entry. 832 // 833 // Notes:- 834 // * In the G1 code we do not check whether we need to block for 835 // a safepoint. If G1 is enabled then we must execute the specialized 836 // code for Reference.get (except when the Reference object is null) 837 // so that we can log the value in the referent field with an SATB 838 // update buffer. 839 // If the code for the getfield template is modified so that the 840 // G1 pre-barrier code is executed when the current method is 841 // Reference.get() then going through the normal method entry 842 // will be fine. 843 // * The G1 code can, however, check the receiver object (the instance 844 // of java.lang.Reference) and jump to the slow path if null. If the 845 // Reference object is null then we obviously cannot fetch the referent 846 // and so we don't need to call the G1 pre-barrier. Thus we can use the 847 // regular method entry code to generate the NPE. 848 // 849 // This code is based on generate_accessor_entry. 850 // 851 // xmethod: Method* 852 // x19_sender_sp: senderSP must preserve for slow path, set SP to it on fast path 853 854 // ra is live. It must be saved around calls. 855 856 address entry = __ pc(); 857 858 const int referent_offset = java_lang_ref_Reference::referent_offset(); 859 guarantee(referent_offset > 0, "referent offset not initialized"); 860 861 Label slow_path; 862 const Register local_0 = c_rarg0; 863 // Check if local 0 isn't null 864 // If the receiver is null then it is OK to jump to the slow path. 865 __ ld(local_0, Address(esp, 0)); 866 __ beqz(local_0, slow_path); 867 868 // Load the value of the referent field. 869 const Address field_address(local_0, referent_offset); 870 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 871 bs->load_at(_masm, IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ t0, /*tmp2*/ t1); 872 873 // areturn 874 __ andi(sp, x19_sender_sp, -16); // done with stack 875 __ ret(); 876 877 // generate a vanilla interpreter entry as the slow path 878 __ bind(slow_path); 879 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 880 return entry; 881 } 882 883 /** 884 * Method entry for static native methods: 885 * int java.util.zip.CRC32.update(int crc, int b) 886 */ 887 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 888 // TODO: Unimplemented generate_CRC32_update_entry 889 return nullptr; 890 } 891 892 /** 893 * Method entry for static native methods: 894 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 895 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 896 */ 897 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 898 // TODO: Unimplemented generate_CRC32_updateBytes_entry 899 return nullptr; 900 } 901 902 /** 903 * Method entry for intrinsic-candidate (non-native) methods: 904 * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end) 905 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end) 906 * Unlike CRC32, CRC32C does not have any methods marked as native 907 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses 908 */ 909 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 910 // TODO: Unimplemented generate_CRC32C_updateBytes_entry 911 return nullptr; 912 } 913 914 // Not supported 915 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; } 916 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; } 917 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; } 918 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; } 919 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; } 920 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; } 921 922 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 923 // See more discussion in stackOverflow.hpp. 924 925 const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size()); 926 const int page_size = (int)os::vm_page_size(); 927 const int n_shadow_pages = shadow_zone_size / page_size; 928 929 #ifdef ASSERT 930 Label L_good_limit; 931 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit())); 932 __ bnez(t0, L_good_limit); 933 __ stop("shadow zone safe limit is not initialized"); 934 __ bind(L_good_limit); 935 936 Label L_good_watermark; 937 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark())); 938 __ bnez(t0, L_good_watermark); 939 __ stop("shadow zone growth watermark is not initialized"); 940 __ bind(L_good_watermark); 941 #endif 942 943 Label L_done; 944 945 __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark())); 946 __ bgtu(sp, t0, L_done); 947 948 for (int p = 1; p <= n_shadow_pages; p++) { 949 __ bang_stack_with_offset(p * page_size); 950 } 951 952 // Record the new watermark, but only if the update is above the safe limit. 953 // Otherwise, the next time around the check above would pass the safe limit. 954 __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit())); 955 __ bleu(sp, t0, L_done); 956 __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark())); 957 958 __ bind(L_done); 959 } 960 961 // Interpreter stub for calling a native method. (asm interpreter) 962 // This sets up a somewhat different looking stack for calling the 963 // native method than the typical interpreter frame setup. 964 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 965 // determine code generation flags 966 bool inc_counter = UseCompiler || CountCompiledCalls; 967 968 // x11: Method* 969 // x30: sender sp 970 971 address entry_point = __ pc(); 972 973 const Address constMethod (xmethod, Method::const_offset()); 974 const Address access_flags (xmethod, Method::access_flags_offset()); 975 const Address size_of_parameters(x12, ConstMethod:: 976 size_of_parameters_offset()); 977 978 // get parameter size (always needed) 979 __ ld(x12, constMethod); 980 __ load_unsigned_short(x12, size_of_parameters); 981 982 // Native calls don't need the stack size check since they have no 983 // expression stack and the arguments are already on the stack and 984 // we only add a handful of words to the stack. 985 986 // xmethod: Method* 987 // x12: size of parameters 988 // x30: sender sp 989 990 // for natives the size of locals is zero 991 992 // compute beginning of parameters (xlocals) 993 __ shadd(xlocals, x12, esp, xlocals, 3); 994 __ addi(xlocals, xlocals, -wordSize); 995 996 // Pull SP back to minimum size: this avoids holes in the stack 997 __ andi(sp, esp, -16); 998 999 // initialize fixed part of activation frame 1000 generate_fixed_frame(true); 1001 1002 // make sure method is native & not abstract 1003 #ifdef ASSERT 1004 __ lwu(x10, access_flags); 1005 __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute non-native method as native", false); 1006 __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter"); 1007 #endif 1008 1009 // Since at this point in the method invocation the exception 1010 // handler would try to exit the monitor of synchronized methods 1011 // which hasn't been entered yet, we set the thread local variable 1012 // _do_not_unlock_if_synchronized to true. The remove_activation 1013 // will check this flag. 1014 1015 const Address do_not_unlock_if_synchronized(xthread, 1016 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1017 __ mv(t1, true); 1018 __ sb(t1, do_not_unlock_if_synchronized); 1019 1020 // increment invocation count & check for overflow 1021 Label invocation_counter_overflow; 1022 if (inc_counter) { 1023 generate_counter_incr(&invocation_counter_overflow); 1024 } 1025 1026 Label continue_after_compile; 1027 __ bind(continue_after_compile); 1028 1029 bang_stack_shadow_pages(true); 1030 1031 // reset the _do_not_unlock_if_synchronized flag 1032 __ sb(zr, do_not_unlock_if_synchronized); 1033 1034 // check for synchronized methods 1035 // Must happen AFTER invocation_counter check and stack overflow check, 1036 // so method is not locked if overflows. 1037 if (synchronized) { 1038 lock_method(); 1039 } else { 1040 // no synchronization necessary 1041 #ifdef ASSERT 1042 __ lwu(x10, access_flags); 1043 __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization"); 1044 #endif 1045 } 1046 1047 // start execution 1048 #ifdef ASSERT 1049 __ verify_frame_setup(); 1050 #endif 1051 1052 // jvmti support 1053 __ notify_method_entry(); 1054 1055 // work registers 1056 const Register t = x18; 1057 const Register result_handler = x19; 1058 1059 // allocate space for parameters 1060 __ ld(t, Address(xmethod, Method::const_offset())); 1061 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1062 1063 __ slli(t, t, Interpreter::logStackElementSize); 1064 __ sub(x30, esp, t); 1065 __ andi(sp, x30, -16); 1066 __ mv(esp, x30); 1067 1068 // get signature handler 1069 { 1070 Label L; 1071 __ ld(t, Address(xmethod, Method::signature_handler_offset())); 1072 __ bnez(t, L); 1073 __ call_VM(noreg, 1074 CAST_FROM_FN_PTR(address, 1075 InterpreterRuntime::prepare_native_call), 1076 xmethod); 1077 __ ld(t, Address(xmethod, Method::signature_handler_offset())); 1078 __ bind(L); 1079 } 1080 1081 // call signature handler 1082 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == xlocals, 1083 "adjust this code"); 1084 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp, 1085 "adjust this code"); 1086 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t0, 1087 "adjust this code"); 1088 1089 // The generated handlers do not touch xmethod (the method). 1090 // However, large signatures cannot be cached and are generated 1091 // each time here. The slow-path generator can do a GC on return, 1092 // so we must reload it after the call. 1093 __ jalr(t); 1094 __ get_method(xmethod); // slow path can do a GC, reload xmethod 1095 1096 1097 // result handler is in x10 1098 // set result handler 1099 __ mv(result_handler, x10); 1100 // pass mirror handle if static call 1101 { 1102 Label L; 1103 __ lwu(t, Address(xmethod, Method::access_flags_offset())); 1104 __ test_bit(t0, t, exact_log2(JVM_ACC_STATIC)); 1105 __ beqz(t0, L); 1106 // get mirror 1107 __ load_mirror(t, xmethod, x28, t1); 1108 // copy mirror into activation frame 1109 __ sd(t, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1110 // pass handle to mirror 1111 __ addi(c_rarg1, fp, frame::interpreter_frame_oop_temp_offset * wordSize); 1112 __ bind(L); 1113 } 1114 1115 // get native function entry point in x28 1116 { 1117 Label L; 1118 __ ld(x28, Address(xmethod, Method::native_function_offset())); 1119 address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1120 __ mv(t, unsatisfied); 1121 __ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8 1122 1123 __ bne(x28, t1, L); 1124 __ call_VM(noreg, 1125 CAST_FROM_FN_PTR(address, 1126 InterpreterRuntime::prepare_native_call), 1127 xmethod); 1128 __ get_method(xmethod); 1129 __ ld(x28, Address(xmethod, Method::native_function_offset())); 1130 __ bind(L); 1131 } 1132 1133 // pass JNIEnv 1134 __ add(c_rarg0, xthread, in_bytes(JavaThread::jni_environment_offset())); 1135 1136 // It is enough that the pc() points into the right code 1137 // segment. It does not have to be the correct return pc. 1138 Label native_return; 1139 __ set_last_Java_frame(esp, fp, native_return, x30); 1140 1141 // change thread state 1142 #ifdef ASSERT 1143 { 1144 Label L; 1145 __ lwu(t, Address(xthread, JavaThread::thread_state_offset())); 1146 __ addi(t0, zr, (u1)_thread_in_Java); 1147 __ beq(t, t0, L); 1148 __ stop("Wrong thread state in native stub"); 1149 __ bind(L); 1150 } 1151 #endif 1152 1153 // Change state to native 1154 __ la(t1, Address(xthread, JavaThread::thread_state_offset())); 1155 __ mv(t0, _thread_in_native); 1156 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1157 __ sw(t0, Address(t1)); 1158 1159 // Call the native method. 1160 __ jalr(x28); 1161 __ bind(native_return); 1162 __ get_method(xmethod); 1163 // result potentially in x10 or f10 1164 1165 // Restore cpu control state after JNI call 1166 __ restore_cpu_control_state_after_jni(t0); 1167 1168 // make room for the pushes we're about to do 1169 __ sub(t0, esp, 4 * wordSize); 1170 __ andi(sp, t0, -16); 1171 1172 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1173 // in order to extract the result of a method call. If the order of these 1174 // pushes change or anything else is added to the stack then the code in 1175 // interpreter_frame_result must also change. 1176 __ push(dtos); 1177 __ push(ltos); 1178 1179 // change thread state 1180 // Force all preceding writes to be observed prior to thread state change 1181 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1182 1183 __ mv(t0, _thread_in_native_trans); 1184 __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); 1185 1186 // Force this write out before the read below 1187 if (!UseSystemMemoryBarrier) { 1188 __ membar(MacroAssembler::AnyAny); 1189 } 1190 1191 // check for safepoint operation in progress and/or pending suspend requests 1192 { 1193 Label L, Continue; 1194 1195 // We need an acquire here to ensure that any subsequent load of the 1196 // global SafepointSynchronize::_state flag is ordered after this load 1197 // of the thread-local polling word. We don't want this poll to 1198 // return false (i.e. not safepointing) and a later poll of the global 1199 // SafepointSynchronize::_state spuriously to return true. 1200 // 1201 // This is to avoid a race when we're in a native->Java transition 1202 // racing the code which wakes up from a safepoint. 1203 __ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */); 1204 __ lwu(t1, Address(xthread, JavaThread::suspend_flags_offset())); 1205 __ beqz(t1, Continue); 1206 __ bind(L); 1207 1208 // Don't use call_VM as it will see a possible pending exception 1209 // and forward it and never return here preventing us from 1210 // clearing _last_native_pc down below. So we do a runtime call by 1211 // hand. 1212 // 1213 __ mv(c_rarg0, xthread); 1214 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); 1215 __ get_method(xmethod); 1216 __ reinit_heapbase(); 1217 __ bind(Continue); 1218 } 1219 1220 // change thread state 1221 // Force all preceding writes to be observed prior to thread state change 1222 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1223 1224 __ mv(t0, _thread_in_Java); 1225 __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); 1226 1227 // reset_last_Java_frame 1228 __ reset_last_Java_frame(true); 1229 1230 if (CheckJNICalls) { 1231 // clear_pending_jni_exception_check 1232 __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset())); 1233 } 1234 1235 // reset handle block 1236 __ ld(t, Address(xthread, JavaThread::active_handles_offset())); 1237 __ sd(zr, Address(t, JNIHandleBlock::top_offset())); 1238 1239 // If result is an oop unbox and store it in frame where gc will see it 1240 // and result handler will pick it up 1241 1242 { 1243 Label no_oop; 1244 __ la(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1245 __ bne(t, result_handler, no_oop); 1246 // Unbox oop result, e.g. JNIHandles::resolve result. 1247 __ pop(ltos); 1248 __ resolve_jobject(x10, t, t1); 1249 __ sd(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1250 // keep stack depth as expected by pushing oop which will eventually be discarded 1251 __ push(ltos); 1252 __ bind(no_oop); 1253 } 1254 1255 { 1256 Label no_reguard; 1257 __ lwu(t0, Address(xthread, in_bytes(JavaThread::stack_guard_state_offset()))); 1258 __ addi(t1, zr, (u1)StackOverflow::stack_guard_yellow_reserved_disabled); 1259 __ bne(t0, t1, no_reguard); 1260 1261 __ push_call_clobbered_registers(); 1262 __ mv(c_rarg0, xthread); 1263 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 1264 __ pop_call_clobbered_registers(); 1265 __ bind(no_reguard); 1266 } 1267 1268 // The method register is junk from after the thread_in_native transition 1269 // until here. Also can't call_VM until the bcp has been 1270 // restored. Need bcp for throwing exception below so get it now. 1271 __ get_method(xmethod); 1272 1273 // restore bcp to have legal interpreter frame, i.e., bci == 0 <=> 1274 // xbcp == code_base() 1275 __ ld(xbcp, Address(xmethod, Method::const_offset())); // get ConstMethod* 1276 __ add(xbcp, xbcp, in_bytes(ConstMethod::codes_offset())); // get codebase 1277 // handle exceptions (exception handling will handle unlocking!) 1278 { 1279 Label L; 1280 __ ld(t0, Address(xthread, Thread::pending_exception_offset())); 1281 __ beqz(t0, L); 1282 // Note: At some point we may want to unify this with the code 1283 // used in call_VM_base(); i.e., we should use the 1284 // StubRoutines::forward_exception code. For now this doesn't work 1285 // here because the sp is not correctly set at this point. 1286 __ MacroAssembler::call_VM(noreg, 1287 CAST_FROM_FN_PTR(address, 1288 InterpreterRuntime::throw_pending_exception)); 1289 __ should_not_reach_here(); 1290 __ bind(L); 1291 } 1292 1293 // do unlocking if necessary 1294 { 1295 Label L; 1296 __ lwu(t, Address(xmethod, Method::access_flags_offset())); 1297 __ test_bit(t0, t, exact_log2(JVM_ACC_SYNCHRONIZED)); 1298 __ beqz(t0, L); 1299 // the code below should be shared with interpreter macro 1300 // assembler implementation 1301 { 1302 Label unlock; 1303 // BasicObjectLock will be first in list, since this is a 1304 // synchronized method. However, need to check that the object 1305 // has not been unlocked by an explicit monitorexit bytecode. 1306 1307 // monitor expect in c_rarg1 for slow unlock path 1308 __ la(c_rarg1, Address(fp, // address of first monitor 1309 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1310 wordSize - sizeof(BasicObjectLock)))); 1311 1312 __ ld(t, Address(c_rarg1, BasicObjectLock::obj_offset())); 1313 __ bnez(t, unlock); 1314 1315 // Entry already unlocked, need to throw exception 1316 __ MacroAssembler::call_VM(noreg, 1317 CAST_FROM_FN_PTR(address, 1318 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1319 __ should_not_reach_here(); 1320 1321 __ bind(unlock); 1322 __ unlock_object(c_rarg1); 1323 } 1324 __ bind(L); 1325 } 1326 1327 // jvmti support 1328 // Note: This must happen _after_ handling/throwing any exceptions since 1329 // the exception handler code notifies the runtime of method exits 1330 // too. If this happens before, method entry/exit notifications are 1331 // not properly paired (was bug - gri 11/22/99). 1332 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1333 1334 __ pop(ltos); 1335 __ pop(dtos); 1336 1337 __ jalr(result_handler); 1338 1339 // remove activation 1340 __ ld(esp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp 1341 // remove frame anchor 1342 __ leave(); 1343 1344 // restore sender sp 1345 __ mv(sp, esp); 1346 1347 __ ret(); 1348 1349 if (inc_counter) { 1350 // Handle overflow of counter and compile method 1351 __ bind(invocation_counter_overflow); 1352 generate_counter_overflow(continue_after_compile); 1353 } 1354 1355 return entry_point; 1356 } 1357 1358 // 1359 // Generic interpreted method entry to (asm) interpreter 1360 // 1361 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1362 1363 // determine code generation flags 1364 const bool inc_counter = UseCompiler || CountCompiledCalls; 1365 1366 // t0: sender sp 1367 address entry_point = __ pc(); 1368 1369 const Address constMethod(xmethod, Method::const_offset()); 1370 const Address access_flags(xmethod, Method::access_flags_offset()); 1371 const Address size_of_parameters(x13, 1372 ConstMethod::size_of_parameters_offset()); 1373 const Address size_of_locals(x13, ConstMethod::size_of_locals_offset()); 1374 1375 // get parameter size (always needed) 1376 // need to load the const method first 1377 __ ld(x13, constMethod); 1378 __ load_unsigned_short(x12, size_of_parameters); 1379 1380 // x12: size of parameters 1381 1382 __ load_unsigned_short(x13, size_of_locals); // get size of locals in words 1383 __ sub(x13, x13, x12); // x13 = no. of additional locals 1384 1385 // see if we've got enough room on the stack for locals plus overhead. 1386 generate_stack_overflow_check(); 1387 1388 // compute beginning of parameters (xlocals) 1389 __ shadd(xlocals, x12, esp, t1, 3); 1390 __ add(xlocals, xlocals, -wordSize); 1391 1392 // Make room for additional locals 1393 __ slli(t1, x13, 3); 1394 __ sub(t0, esp, t1); 1395 1396 // Padding between locals and fixed part of activation frame to ensure 1397 // SP is always 16-byte aligned. 1398 __ andi(sp, t0, -16); 1399 1400 // x13 - # of additional locals 1401 // allocate space for locals 1402 // explicitly initialize locals 1403 { 1404 Label exit, loop; 1405 __ blez(x13, exit); // do nothing if x13 <= 0 1406 __ bind(loop); 1407 __ sd(zr, Address(t0)); 1408 __ add(t0, t0, wordSize); 1409 __ add(x13, x13, -1); // until everything initialized 1410 __ bnez(x13, loop); 1411 __ bind(exit); 1412 } 1413 1414 // And the base dispatch table 1415 __ get_dispatch(); 1416 1417 // initialize fixed part of activation frame 1418 generate_fixed_frame(false); 1419 1420 // make sure method is not native & not abstract 1421 #ifdef ASSERT 1422 __ lwu(x10, access_flags); 1423 __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute native method as non-native"); 1424 __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter"); 1425 #endif 1426 1427 // Since at this point in the method invocation the exception 1428 // handler would try to exit the monitor of synchronized methods 1429 // which hasn't been entered yet, we set the thread local variable 1430 // _do_not_unlock_if_synchronized to true. The remove_activation 1431 // will check this flag. 1432 1433 const Address do_not_unlock_if_synchronized(xthread, 1434 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1435 __ mv(t1, true); 1436 __ sb(t1, do_not_unlock_if_synchronized); 1437 1438 Label no_mdp; 1439 const Register mdp = x13; 1440 __ ld(mdp, Address(xmethod, Method::method_data_offset())); 1441 __ beqz(mdp, no_mdp); 1442 __ add(mdp, mdp, in_bytes(MethodData::data_offset())); 1443 __ profile_parameters_type(mdp, x11, x12, x14); // use x11, x12, x14 as tmp registers 1444 __ bind(no_mdp); 1445 1446 // increment invocation count & check for overflow 1447 Label invocation_counter_overflow; 1448 if (inc_counter) { 1449 generate_counter_incr(&invocation_counter_overflow); 1450 } 1451 1452 Label continue_after_compile; 1453 __ bind(continue_after_compile); 1454 1455 bang_stack_shadow_pages(false); 1456 1457 // reset the _do_not_unlock_if_synchronized flag 1458 __ sb(zr, do_not_unlock_if_synchronized); 1459 1460 // check for synchronized methods 1461 // Must happen AFTER invocation_counter check and stack overflow check, 1462 // so method is not locked if overflows. 1463 if (synchronized) { 1464 // Allocate monitor and lock method 1465 lock_method(); 1466 } else { 1467 // no synchronization necessary 1468 #ifdef ASSERT 1469 __ lwu(x10, access_flags); 1470 __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization"); 1471 #endif 1472 } 1473 1474 // start execution 1475 #ifdef ASSERT 1476 __ verify_frame_setup(); 1477 #endif 1478 1479 // jvmti support 1480 __ notify_method_entry(); 1481 1482 __ dispatch_next(vtos); 1483 1484 // invocation counter overflow 1485 if (inc_counter) { 1486 // Handle overflow of counter and compile method 1487 __ bind(invocation_counter_overflow); 1488 generate_counter_overflow(continue_after_compile); 1489 } 1490 1491 return entry_point; 1492 } 1493 1494 // Method entry for java.lang.Thread.currentThread 1495 address TemplateInterpreterGenerator::generate_currentThread() { 1496 address entry_point = __ pc(); 1497 1498 __ ld(x10, Address(xthread, JavaThread::vthread_offset())); 1499 __ resolve_oop_handle(x10, t0, t1); 1500 __ ret(); 1501 1502 return entry_point; 1503 } 1504 1505 //----------------------------------------------------------------------------- 1506 // Exceptions 1507 1508 void TemplateInterpreterGenerator::generate_throw_exception() { 1509 // Entry point in previous activation (i.e., if the caller was 1510 // interpreted) 1511 Interpreter::_rethrow_exception_entry = __ pc(); 1512 // Restore sp to interpreter_frame_last_sp even though we are going 1513 // to empty the expression stack for the exception processing. 1514 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 1515 // x10: exception 1516 // x13: return address/pc that threw exception 1517 __ restore_bcp(); // xbcp points to call/send 1518 __ restore_locals(); 1519 __ restore_constant_pool_cache(); 1520 __ reinit_heapbase(); // restore xheapbase as heapbase. 1521 __ get_dispatch(); 1522 1523 // Entry point for exceptions thrown within interpreter code 1524 Interpreter::_throw_exception_entry = __ pc(); 1525 // If we came here via a NullPointerException on the receiver of a 1526 // method, xthread may be corrupt. 1527 __ get_method(xmethod); 1528 // expression stack is undefined here 1529 // x10: exception 1530 // xbcp: exception bcp 1531 __ verify_oop(x10); 1532 __ mv(c_rarg1, x10); 1533 1534 // expression stack must be empty before entering the VM in case of 1535 // an exception 1536 __ empty_expression_stack(); 1537 // find exception handler address and preserve exception oop 1538 __ call_VM(x13, 1539 CAST_FROM_FN_PTR(address, 1540 InterpreterRuntime::exception_handler_for_exception), 1541 c_rarg1); 1542 1543 // Restore machine SP 1544 __ restore_sp_after_call(); 1545 1546 // x10: exception handler entry point 1547 // x13: preserved exception oop 1548 // xbcp: bcp for exception handler 1549 __ push_ptr(x13); // push exception which is now the only value on the stack 1550 __ jr(x10); // jump to exception handler (may be _remove_activation_entry!) 1551 1552 // If the exception is not handled in the current frame the frame is 1553 // removed and the exception is rethrown (i.e. exception 1554 // continuation is _rethrow_exception). 1555 // 1556 // Note: At this point the bci is still the bxi for the instruction 1557 // which caused the exception and the expression stack is 1558 // empty. Thus, for any VM calls at this point, GC will find a legal 1559 // oop map (with empty expression stack). 1560 1561 // 1562 // JVMTI PopFrame support 1563 // 1564 1565 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1566 __ empty_expression_stack(); 1567 // Set the popframe_processing bit in pending_popframe_condition 1568 // indicating that we are currently handling popframe, so that 1569 // call_VMs that may happen later do not trigger new popframe 1570 // handling cycles. 1571 __ lwu(x13, Address(xthread, JavaThread::popframe_condition_offset())); 1572 __ ori(x13, x13, JavaThread::popframe_processing_bit); 1573 __ sw(x13, Address(xthread, JavaThread::popframe_condition_offset())); 1574 1575 { 1576 // Check to see whether we are returning to a deoptimized frame. 1577 // (The PopFrame call ensures that the caller of the popped frame is 1578 // either interpreted or compiled and deoptimizes it if compiled.) 1579 // In this case, we can't call dispatch_next() after the frame is 1580 // popped, but instead must save the incoming arguments and restore 1581 // them after deoptimization has occurred. 1582 // 1583 // Note that we don't compare the return PC against the 1584 // deoptimization blob's unpack entry because of the presence of 1585 // adapter frames in C2. 1586 Label caller_not_deoptimized; 1587 __ ld(c_rarg1, Address(fp, frame::return_addr_offset * wordSize)); 1588 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), c_rarg1); 1589 __ bnez(x10, caller_not_deoptimized); 1590 1591 // Compute size of arguments for saving when returning to 1592 // deoptimized caller 1593 __ get_method(x10); 1594 __ ld(x10, Address(x10, Method::const_offset())); 1595 __ load_unsigned_short(x10, Address(x10, in_bytes(ConstMethod:: 1596 size_of_parameters_offset()))); 1597 __ slli(x10, x10, Interpreter::logStackElementSize); 1598 __ restore_locals(); 1599 __ sub(xlocals, xlocals, x10); 1600 __ add(xlocals, xlocals, wordSize); 1601 // Save these arguments 1602 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1603 Deoptimization:: 1604 popframe_preserve_args), 1605 xthread, x10, xlocals); 1606 1607 __ remove_activation(vtos, 1608 /* throw_monitor_exception */ false, 1609 /* install_monitor_exception */ false, 1610 /* notify_jvmdi */ false); 1611 1612 // Inform deoptimization that it is responsible for restoring 1613 // these arguments 1614 __ mv(t0, JavaThread::popframe_force_deopt_reexecution_bit); 1615 __ sw(t0, Address(xthread, JavaThread::popframe_condition_offset())); 1616 1617 // Continue in deoptimization handler 1618 __ ret(); 1619 1620 __ bind(caller_not_deoptimized); 1621 } 1622 1623 __ remove_activation(vtos, 1624 /* throw_monitor_exception */ false, 1625 /* install_monitor_exception */ false, 1626 /* notify_jvmdi */ false); 1627 1628 // Restore the last_sp and null it out 1629 __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 1630 __ shadd(esp, t0, fp, t0, LogBytesPerWord); 1631 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 1632 1633 __ restore_bcp(); 1634 __ restore_locals(); 1635 __ restore_constant_pool_cache(); 1636 __ get_method(xmethod); 1637 __ get_dispatch(); 1638 1639 // The method data pointer was incremented already during 1640 // call profiling. We have to restore the mdp for the current bcp. 1641 if (ProfileInterpreter) { 1642 __ set_method_data_pointer_for_bcp(); 1643 } 1644 1645 // Clear the popframe condition flag 1646 __ sw(zr, Address(xthread, JavaThread::popframe_condition_offset())); 1647 assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive"); 1648 1649 #if INCLUDE_JVMTI 1650 { 1651 Label L_done; 1652 1653 __ lbu(t0, Address(xbcp, 0)); 1654 __ mv(t1, Bytecodes::_invokestatic); 1655 __ bne(t1, t0, L_done); 1656 1657 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1658 // Detect such a case in the InterpreterRuntime function and return the member name argument,or null. 1659 1660 __ ld(c_rarg0, Address(xlocals, 0)); 1661 __ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp); 1662 1663 __ beqz(x10, L_done); 1664 1665 __ sd(x10, Address(esp, 0)); 1666 __ bind(L_done); 1667 } 1668 #endif // INCLUDE_JVMTI 1669 1670 // Restore machine SP 1671 __ restore_sp_after_call(); 1672 1673 __ dispatch_next(vtos); 1674 // end of PopFrame support 1675 1676 Interpreter::_remove_activation_entry = __ pc(); 1677 1678 // preserve exception over this code sequence 1679 __ pop_ptr(x10); 1680 __ sd(x10, Address(xthread, JavaThread::vm_result_offset())); 1681 // remove the activation (without doing throws on illegalMonitorExceptions) 1682 __ remove_activation(vtos, false, true, false); 1683 // restore exception 1684 __ get_vm_result(x10, xthread); 1685 1686 // In between activations - previous activation type unknown yet 1687 // compute continuation point - the continuation point expects the 1688 // following registers set up: 1689 // 1690 // x10: exception 1691 // ra: return address/pc that threw exception 1692 // sp: expression stack of caller 1693 // fp: fp of caller 1694 // FIXME: There's no point saving ra here because VM calls don't trash it 1695 __ sub(sp, sp, 2 * wordSize); 1696 __ sd(x10, Address(sp, 0)); // save exception 1697 __ sd(ra, Address(sp, wordSize)); // save return address 1698 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1699 SharedRuntime::exception_handler_for_return_address), 1700 xthread, ra); 1701 __ mv(x11, x10); // save exception handler 1702 __ ld(x10, Address(sp, 0)); // restore exception 1703 __ ld(ra, Address(sp, wordSize)); // restore return address 1704 __ add(sp, sp, 2 * wordSize); 1705 // We might be returning to a deopt handler that expects x13 to 1706 // contain the exception pc 1707 __ mv(x13, ra); 1708 // Note that an "issuing PC" is actually the next PC after the call 1709 __ jr(x11); // jump to exception 1710 // handler of caller 1711 } 1712 1713 // 1714 // JVMTI ForceEarlyReturn support 1715 // 1716 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1717 address entry = __ pc(); 1718 1719 __ restore_bcp(); 1720 __ restore_locals(); 1721 __ empty_expression_stack(); 1722 __ load_earlyret_value(state); 1723 1724 __ ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset())); 1725 Address cond_addr(t0, JvmtiThreadState::earlyret_state_offset()); 1726 1727 // Clear the earlyret state 1728 assert(JvmtiThreadState::earlyret_inactive == 0, "should be"); 1729 __ sd(zr, cond_addr); 1730 1731 __ remove_activation(state, 1732 false, /* throw_monitor_exception */ 1733 false, /* install_monitor_exception */ 1734 true); /* notify_jvmdi */ 1735 __ ret(); 1736 1737 return entry; 1738 } 1739 // end of ForceEarlyReturn support 1740 1741 //----------------------------------------------------------------------------- 1742 // Helper for vtos entry point generation 1743 1744 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1745 address& bep, 1746 address& cep, 1747 address& sep, 1748 address& aep, 1749 address& iep, 1750 address& lep, 1751 address& fep, 1752 address& dep, 1753 address& vep) { 1754 assert(t != nullptr && t->is_valid() && t->tos_in() == vtos, "illegal template"); 1755 Label L; 1756 aep = __ pc(); __ push_ptr(); __ j(L); 1757 fep = __ pc(); __ push_f(); __ j(L); 1758 dep = __ pc(); __ push_d(); __ j(L); 1759 lep = __ pc(); __ push_l(); __ j(L); 1760 bep = cep = sep = 1761 iep = __ pc(); __ push_i(); 1762 vep = __ pc(); 1763 __ bind(L); 1764 generate_and_dispatch(t); 1765 } 1766 1767 //----------------------------------------------------------------------------- 1768 1769 // Non-product code 1770 #ifndef PRODUCT 1771 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1772 address entry = __ pc(); 1773 1774 __ push_reg(ra); 1775 __ push(state); 1776 __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp); 1777 __ mv(c_rarg2, x10); // Pass itos 1778 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3); 1779 __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp); 1780 __ pop(state); 1781 __ pop_reg(ra); 1782 __ ret(); // return from result handler 1783 1784 return entry; 1785 } 1786 1787 void TemplateInterpreterGenerator::count_bytecode() { 1788 __ mv(x7, (address) &BytecodeCounter::_counter_value); 1789 __ atomic_addw(noreg, 1, x7); 1790 } 1791 1792 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1793 __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]); 1794 __ atomic_addw(noreg, 1, x7); 1795 } 1796 1797 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1798 // Calculate new index for counter: 1799 // _index = (_index >> log2_number_of_codes) | 1800 // (bytecode << log2_number_of_codes); 1801 Register index_addr = t1; 1802 Register index = t0; 1803 __ mv(index_addr, (address) &BytecodePairHistogram::_index); 1804 __ lw(index, index_addr); 1805 __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1806 __ srli(index, index, BytecodePairHistogram::log2_number_of_codes); 1807 __ orrw(index, x7, index); 1808 __ sw(index, index_addr); 1809 // Bump bucket contents: 1810 // _counters[_index] ++; 1811 Register counter_addr = t1; 1812 __ mv(x7, (address) &BytecodePairHistogram::_counters); 1813 __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt); 1814 __ atomic_addw(noreg, 1, counter_addr); 1815 } 1816 1817 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1818 // Call a little run-time stub to avoid blow-up for each bytecode. 1819 // The run-time runtime saves the right registers, depending on 1820 // the tosca in-state for the given template. 1821 1822 assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated"); 1823 __ jal(Interpreter::trace_code(t->tos_in())); 1824 __ reinit_heapbase(); 1825 } 1826 1827 void TemplateInterpreterGenerator::stop_interpreter_at() { 1828 Label L; 1829 __ push_reg(t0); 1830 __ mv(t0, (address) &BytecodeCounter::_counter_value); 1831 __ ld(t0, Address(t0)); 1832 __ mv(t1, StopInterpreterAt); 1833 __ bne(t0, t1, L); 1834 __ ebreak(); 1835 __ bind(L); 1836 __ pop_reg(t0); 1837 } 1838 1839 #endif // !PRODUCT