1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "classfile/javaClasses.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interpreter/bytecodeHistogram.hpp"
  32 #include "interpreter/bytecodeTracer.hpp"
  33 #include "interpreter/interp_masm.hpp"
  34 #include "interpreter/interpreter.hpp"
  35 #include "interpreter/interpreterRuntime.hpp"
  36 #include "interpreter/templateInterpreterGenerator.hpp"
  37 #include "interpreter/templateTable.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/arrayOop.hpp"
  40 #include "oops/method.inline.hpp"
  41 #include "oops/methodData.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "oops/resolvedIndyEntry.hpp"
  44 #include "oops/resolvedMethodEntry.hpp"
  45 #include "prims/jvmtiExport.hpp"
  46 #include "prims/jvmtiThreadState.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/deoptimization.hpp"
  49 #include "runtime/frame.inline.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/jniHandles.hpp"
  52 #include "runtime/sharedRuntime.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "runtime/synchronizer.hpp"
  55 #include "runtime/timer.hpp"
  56 #include "runtime/vframeArray.hpp"
  57 #include "utilities/checkedCast.hpp"
  58 #include "utilities/debug.hpp"
  59 #include "utilities/powerOfTwo.hpp"
  60 #include <sys/types.h>
  61 
  62 #ifndef PRODUCT
  63 #include "oops/method.hpp"
  64 #endif // !PRODUCT
  65 
  66 // Size of interpreter code.  Increase if too small.  Interpreter will
  67 // fail with a guarantee ("not enough space for interpreter generation");
  68 // if too small.
  69 // Run with +PrintInterpreter to get the VM to print out the size.
  70 // Max size with JVMTI
  71 int TemplateInterpreter::InterpreterCodeSize = 256 * 1024;
  72 
  73 #define __ _masm->
  74 
  75 //-----------------------------------------------------------------------------
  76 
  77 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  78   address entry = __ pc();
  79 
  80   __ andi(esp, esp, -16);
  81   __ mv(c_rarg3, esp);
  82   // xmethod
  83   // xlocals
  84   // c_rarg3: first stack arg - wordSize
  85   // adjust sp
  86 
  87   __ addi(sp, c_rarg3, -18 * wordSize);
  88   __ addi(sp, sp, -2 * wordSize);
  89   __ sd(ra, Address(sp, 0));
  90 
  91   __ call_VM(noreg,
  92              CAST_FROM_FN_PTR(address,
  93                               InterpreterRuntime::slow_signature_handler),
  94              xmethod, xlocals, c_rarg3);
  95 
  96   // x10: result handler
  97 
  98   // Stack layout:
  99   // sp: return address           <- sp
 100   //      1 garbage
 101   //      8 integer args (if static first is unused)
 102   //      1 float/double identifiers
 103   //      8 double args
 104   //        stack args              <- esp
 105   //        garbage
 106   //        expression stack bottom
 107   //        bcp (null)
 108   //        ...
 109 
 110   // Restore ra
 111   __ ld(ra, Address(sp, 0));
 112   __ addi(sp, sp , 2 * wordSize);
 113 
 114   // Do FP first so we can use c_rarg3 as temp
 115   __ lwu(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers
 116 
 117   for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
 118     const FloatRegister r = g_FPArgReg[i];
 119     Label d, done;
 120 
 121     __ test_bit(t0, c_rarg3, i);
 122     __ bnez(t0, d);
 123     __ flw(r, Address(sp, (10 + i) * wordSize));
 124     __ j(done);
 125     __ bind(d);
 126     __ fld(r, Address(sp, (10 + i) * wordSize));
 127     __ bind(done);
 128   }
 129 
 130   // c_rarg0 contains the result from the call of
 131   // InterpreterRuntime::slow_signature_handler so we don't touch it
 132   // here.  It will be loaded with the JNIEnv* later.
 133   for (int i = 1; i < Argument::n_int_register_parameters_c; i++) {
 134     const Register rm = g_INTArgReg[i];
 135     __ ld(rm, Address(sp, i * wordSize));
 136   }
 137 
 138   __ addi(sp, sp, 18 * wordSize);
 139   __ ret();
 140 
 141   return entry;
 142 }
 143 
 144 // Various method entries
 145 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 146   // xmethod: Method*
 147   // x19_sender_sp: sender sp
 148   // esp: args
 149 
 150   // These don't need a safepoint check because they aren't virtually
 151   // callable. We won't enter these intrinsics from compiled code.
 152   // If in the future we added an intrinsic which was virtually callable
 153   // we'd have to worry about how to safepoint so that this code is used.
 154 
 155   // mathematical functions inlined by compiler
 156   // (interpreter must provide identical implementation
 157   // in order to avoid monotonicity bugs when switching
 158   // from interpreter to compiler in the middle of some
 159   // computation)
 160   //
 161   // stack:
 162   //        [ arg ] <-- esp
 163   //        [ arg ]
 164   // retaddr in ra
 165 
 166   address fn = nullptr;
 167   address entry_point = nullptr;
 168   Register continuation = ra;
 169   switch (kind) {
 170     case Interpreter::java_lang_math_abs:
 171       entry_point = __ pc();
 172       __ fld(f10, Address(esp));
 173       __ fabs_d(f10, f10);
 174       __ mv(sp, x19_sender_sp); // Restore caller's SP
 175       break;
 176     case Interpreter::java_lang_math_sqrt:
 177       entry_point = __ pc();
 178       __ fld(f10, Address(esp));
 179       __ fsqrt_d(f10, f10);
 180       __ mv(sp, x19_sender_sp);
 181       break;
 182     case Interpreter::java_lang_math_sin :
 183       entry_point = __ pc();
 184       __ fld(f10, Address(esp));
 185       __ mv(sp, x19_sender_sp);
 186       __ mv(x9, ra);
 187       continuation = x9;  // The first callee-saved register
 188       if (StubRoutines::dsin() == nullptr) {
 189         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 190       } else {
 191         fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin());
 192       }
 193       __ call(fn);
 194       break;
 195     case Interpreter::java_lang_math_cos :
 196       entry_point = __ pc();
 197       __ fld(f10, Address(esp));
 198       __ mv(sp, x19_sender_sp);
 199       __ mv(x9, ra);
 200       continuation = x9;  // The first callee-saved register
 201       if (StubRoutines::dcos() == nullptr) {
 202         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 203       } else {
 204         fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos());
 205       }
 206       __ call(fn);
 207       break;
 208     case Interpreter::java_lang_math_tan :
 209       entry_point = __ pc();
 210       __ fld(f10, Address(esp));
 211       __ mv(sp, x19_sender_sp);
 212       __ mv(x9, ra);
 213       continuation = x9;  // The first callee-saved register
 214       if (StubRoutines::dtan() == nullptr) {
 215         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 216       } else {
 217         fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan());
 218       }
 219       __ call(fn);
 220       break;
 221     case Interpreter::java_lang_math_log :
 222       entry_point = __ pc();
 223       __ fld(f10, Address(esp));
 224       __ mv(sp, x19_sender_sp);
 225       __ mv(x9, ra);
 226       continuation = x9;  // The first callee-saved register
 227       if (StubRoutines::dlog() == nullptr) {
 228         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 229       } else {
 230         fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog());
 231       }
 232       __ call(fn);
 233       break;
 234     case Interpreter::java_lang_math_log10 :
 235       entry_point = __ pc();
 236       __ fld(f10, Address(esp));
 237       __ mv(sp, x19_sender_sp);
 238       __ mv(x9, ra);
 239       continuation = x9;  // The first callee-saved register
 240       if (StubRoutines::dlog10() == nullptr) {
 241         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 242       } else {
 243         fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10());
 244       }
 245       __ call(fn);
 246       break;
 247     case Interpreter::java_lang_math_exp :
 248       entry_point = __ pc();
 249       __ fld(f10, Address(esp));
 250       __ mv(sp, x19_sender_sp);
 251       __ mv(x9, ra);
 252       continuation = x9;  // The first callee-saved register
 253       if (StubRoutines::dexp() == nullptr) {
 254         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 255       } else {
 256         fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp());
 257       }
 258       __ call(fn);
 259       break;
 260     case Interpreter::java_lang_math_pow :
 261       entry_point = __ pc();
 262       __ mv(x9, ra);
 263       continuation = x9;
 264       __ fld(f10, Address(esp, 2 * Interpreter::stackElementSize));
 265       __ fld(f11, Address(esp));
 266       __ mv(sp, x19_sender_sp);
 267       if (StubRoutines::dpow() == nullptr) {
 268         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 269       } else {
 270         fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow());
 271       }
 272       __ call(fn);
 273       break;
 274     case Interpreter::java_lang_math_fmaD :
 275       if (UseFMA) {
 276         entry_point = __ pc();
 277         __ fld(f10, Address(esp, 4 * Interpreter::stackElementSize));
 278         __ fld(f11, Address(esp, 2 * Interpreter::stackElementSize));
 279         __ fld(f12, Address(esp));
 280         __ fmadd_d(f10, f10, f11, f12);
 281         __ mv(sp, x19_sender_sp); // Restore caller's SP
 282       }
 283       break;
 284     case Interpreter::java_lang_math_fmaF :
 285       if (UseFMA) {
 286         entry_point = __ pc();
 287         __ flw(f10, Address(esp, 2 * Interpreter::stackElementSize));
 288         __ flw(f11, Address(esp, Interpreter::stackElementSize));
 289         __ flw(f12, Address(esp));
 290         __ fmadd_s(f10, f10, f11, f12);
 291         __ mv(sp, x19_sender_sp); // Restore caller's SP
 292       }
 293       break;
 294     default:
 295       ;
 296   }
 297   if (entry_point != nullptr) {
 298     __ jr(continuation);
 299   }
 300 
 301   return entry_point;
 302 }
 303 
 304 // Abstract method entry
 305 // Attempt to execute abstract method. Throw exception
 306 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 307   // xmethod: Method*
 308   // x19_sender_sp: sender SP
 309 
 310   address entry_point = __ pc();
 311 
 312   // abstract method entry
 313 
 314   //  pop return address, reset last_sp to null
 315   __ empty_expression_stack();
 316   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
 317   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
 318 
 319   // throw exception
 320   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
 321                                      InterpreterRuntime::throw_AbstractMethodErrorWithMethod),
 322                                      xmethod);
 323   // the call_VM checks for exception, so we should never return here.
 324   __ should_not_reach_here();
 325 
 326   return entry_point;
 327 }
 328 
 329 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 330   address entry = __ pc();
 331 
 332 #ifdef ASSERT
 333   {
 334     Label L;
 335     __ ld(t0, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
 336     __ shadd(t0, t0, fp, t0, LogBytesPerWord);
 337     // maximal sp for current fp (stack grows negative)
 338     // check if frame is complete
 339     __ bge(t0, sp, L);
 340     __ stop ("interpreter frame not set up");
 341     __ bind(L);
 342   }
 343 #endif // ASSERT
 344   // Restore bcp under the assumption that the current frame is still
 345   // interpreted
 346   __ restore_bcp();
 347 
 348   // expression stack must be empty before entering the VM if an
 349   // exception happened
 350   __ empty_expression_stack();
 351   // throw exception
 352   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 353   return entry;
 354 }
 355 
 356 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
 357   address entry = __ pc();
 358   // expression stack must be empty before entering the VM if an
 359   // exception happened
 360   __ empty_expression_stack();
 361   // setup parameters
 362 
 363   // convention: expect aberrant index in register x11
 364   __ zero_extend(c_rarg2, x11, 32);
 365   // convention: expect array in register x13
 366   __ mv(c_rarg1, x13);
 367   __ call_VM(noreg,
 368              CAST_FROM_FN_PTR(address,
 369                               InterpreterRuntime::
 370                               throw_ArrayIndexOutOfBoundsException),
 371              c_rarg1, c_rarg2);
 372   return entry;
 373 }
 374 
 375 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 376   address entry = __ pc();
 377 
 378   // object is at TOS
 379   __ pop_reg(c_rarg1);
 380 
 381   // expression stack must be empty before entering the VM if an
 382   // exception happened
 383   __ empty_expression_stack();
 384 
 385   __ call_VM(noreg,
 386              CAST_FROM_FN_PTR(address,
 387                               InterpreterRuntime::
 388                               throw_ClassCastException),
 389              c_rarg1);
 390   return entry;
 391 }
 392 
 393 address TemplateInterpreterGenerator::generate_exception_handler_common(
 394   const char* name, const char* message, bool pass_oop) {
 395   assert(!pass_oop || message == nullptr, "either oop or message but not both");
 396   address entry = __ pc();
 397   if (pass_oop) {
 398     // object is at TOS
 399     __ pop_reg(c_rarg2);
 400   }
 401   // expression stack must be empty before entering the VM if an
 402   // exception happened
 403   __ empty_expression_stack();
 404   // setup parameters
 405   __ la(c_rarg1, Address((address)name));
 406   if (pass_oop) {
 407     __ call_VM(x10, CAST_FROM_FN_PTR(address,
 408                                      InterpreterRuntime::
 409                                      create_klass_exception),
 410                c_rarg1, c_rarg2);
 411   } else {
 412     // kind of lame ExternalAddress can't take null because
 413     // external_word_Relocation will assert.
 414     if (message != nullptr) {
 415       __ la(c_rarg2, Address((address)message));
 416     } else {
 417       __ mv(c_rarg2, NULL_WORD);
 418     }
 419     __ call_VM(x10,
 420                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
 421                c_rarg1, c_rarg2);
 422   }
 423   // throw exception
 424   __ j(address(Interpreter::throw_exception_entry()));
 425   return entry;
 426 }
 427 
 428 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 429   address entry = __ pc();
 430 
 431   // Restore stack bottom in case i2c adjusted stack
 432   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 433   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
 434   // and null it as marker that esp is now tos until next java call
 435   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 436   __ restore_bcp();
 437   __ restore_locals();
 438   __ restore_constant_pool_cache();
 439   __ get_method(xmethod);
 440 
 441   if (state == atos) {
 442     Register obj = x10;
 443     Register mdp = x11;
 444     Register tmp = x12;
 445     __ ld(mdp, Address(xmethod, Method::method_data_offset()));
 446     __ profile_return_type(mdp, obj, tmp);
 447   }
 448 
 449   const Register cache = x11;
 450   const Register index = x12;
 451 
 452   if (index_size == sizeof(u4)) {
 453     __ load_resolved_indy_entry(cache, index);
 454     __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
 455     __ shadd(esp, cache, esp, t0, 3);
 456   } else {
 457     // Pop N words from the stack
 458     assert(index_size == sizeof(u2), "Can only be u2");
 459     __ load_method_entry(cache, index);
 460     __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
 461 
 462     __ shadd(esp, cache, esp, t0, 3);
 463   }
 464 
 465   // Restore machine SP
 466   __ restore_sp_after_call();
 467 
 468   __ check_and_handle_popframe(xthread);
 469   __ check_and_handle_earlyret(xthread);
 470 
 471   __ get_dispatch();
 472   __ dispatch_next(state, step);
 473 
 474   return entry;
 475 }
 476 
 477 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
 478                                                                int step,
 479                                                                address continuation) {
 480   address entry = __ pc();
 481   __ restore_bcp();
 482   __ restore_locals();
 483   __ restore_constant_pool_cache();
 484   __ get_method(xmethod);
 485   __ get_dispatch();
 486 
 487   __ restore_sp_after_call();  // Restore SP to extended SP
 488 
 489   // Restore expression stack pointer
 490   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 491   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
 492   // null last_sp until next java call
 493   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 494 
 495   // handle exceptions
 496   {
 497     Label L;
 498     __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
 499     __ beqz(t0, L);
 500     __ call_VM(noreg,
 501                CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 502     __ should_not_reach_here();
 503     __ bind(L);
 504   }
 505 
 506   if (continuation == nullptr) {
 507     __ dispatch_next(state, step);
 508   } else {
 509     __ jump_to_entry(continuation);
 510   }
 511   return entry;
 512 }
 513 
 514 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 515   address entry = __ pc();
 516   if (type == T_OBJECT) {
 517     // retrieve result from frame
 518     __ ld(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
 519     // and verify it
 520     __ verify_oop(x10);
 521   } else {
 522    __ cast_primitive_type(type, x10);
 523   }
 524 
 525   __ ret();                                  // return from result handler
 526   return entry;
 527 }
 528 
 529 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
 530                                                                 address runtime_entry) {
 531   assert_cond(runtime_entry != nullptr);
 532   address entry = __ pc();
 533   __ push(state);
 534   __ push_cont_fastpath(xthread);
 535   __ call_VM(noreg, runtime_entry);
 536   __ pop_cont_fastpath(xthread);
 537   __ membar(MacroAssembler::AnyAny);
 538   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 539   return entry;
 540 }
 541 
 542 // Helpers for commoning out cases in the various type of method entries.
 543 //
 544 
 545 
 546 // increment invocation count & check for overflow
 547 //
 548 // Note: checking for negative value instead of overflow
 549 //       so we have a 'sticky' overflow test
 550 //
 551 // xmethod: method
 552 //
 553 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 554   Label done;
 555   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
 556   int increment = InvocationCounter::count_increment;
 557   Label no_mdo;
 558   if (ProfileInterpreter) {
 559     // Are we profiling?
 560     __ ld(x10, Address(xmethod, Method::method_data_offset()));
 561     __ beqz(x10, no_mdo);
 562     // Increment counter in the MDO
 563     const Address mdo_invocation_counter(x10, in_bytes(MethodData::invocation_counter_offset()) +
 564                                          in_bytes(InvocationCounter::counter_offset()));
 565     const Address mask(x10, in_bytes(MethodData::invoke_mask_offset()));
 566     __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, t0, t1, false, overflow);
 567     __ j(done);
 568   }
 569   __ bind(no_mdo);
 570   // Increment counter in MethodCounters
 571   const Address invocation_counter(t1,
 572                                    MethodCounters::invocation_counter_offset() +
 573                                    InvocationCounter::counter_offset());
 574   __ get_method_counters(xmethod, t1, done);
 575   const Address mask(t1, in_bytes(MethodCounters::invoke_mask_offset()));
 576   __ increment_mask_and_jump(invocation_counter, increment, mask, t0, x11, false, overflow);
 577   __ bind(done);
 578 }
 579 
 580 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 581   __ mv(c_rarg1, zr);
 582   __ call_VM(noreg,
 583              CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), c_rarg1);
 584   __ j(do_continue);
 585 }
 586 
 587 // See if we've got enough room on the stack for locals plus overhead
 588 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
 589 // without going through the signal handler, i.e., reserved and yellow zones
 590 // will not be made usable. The shadow zone must suffice to handle the
 591 // overflow.
 592 // The expression stack grows down incrementally, so the normal guard
 593 // page mechanism will work for that.
 594 //
 595 // NOTE: Since the additional locals are also always pushed (wasn't
 596 // obvious in generate_method_entry) so the guard should work for them
 597 // too.
 598 //
 599 // Args:
 600 //      x13: number of additional locals this frame needs (what we must check)
 601 //      xmethod: Method*
 602 //
 603 // Kills:
 604 //      x10
 605 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 606 
 607   // monitor entry size: see picture of stack set
 608   // (generate_method_entry) and frame_amd64.hpp
 609   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 610 
 611   // total overhead size: entry_size + (saved fp through expr stack
 612   // bottom).  be sure to change this if you add/subtract anything
 613   // to/from the overhead area
 614   const int overhead_size =
 615     -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
 616 
 617   const int page_size = (int)os::vm_page_size();
 618 
 619   Label after_frame_check;
 620 
 621   // see if the frame is greater than one page in size. If so,
 622   // then we need to verify there is enough stack space remaining
 623   // for the additional locals.
 624   __ mv(t0, (page_size - overhead_size) / Interpreter::stackElementSize);
 625   __ bleu(x13, t0, after_frame_check);
 626 
 627   // compute sp as if this were going to be the last frame on
 628   // the stack before the red zone
 629 
 630   // locals + overhead, in bytes
 631   __ mv(x10, overhead_size);
 632   __ shadd(x10, x13, x10, t0, Interpreter::logStackElementSize);  // 2 slots per parameter.
 633 
 634   const Address stack_limit(xthread, JavaThread::stack_overflow_limit_offset());
 635   __ ld(t0, stack_limit);
 636 
 637 #ifdef ASSERT
 638   Label limit_okay;
 639   // Verify that thread stack limit is non-zero.
 640   __ bnez(t0, limit_okay);
 641   __ stop("stack overflow limit is zero");
 642   __ bind(limit_okay);
 643 #endif
 644 
 645   // Add stack limit to locals.
 646   __ add(x10, x10, t0);
 647 
 648   // Check against the current stack bottom.
 649   __ bgtu(sp, x10, after_frame_check);
 650 
 651   // Remove the incoming args, peeling the machine SP back to where it
 652   // was in the caller.  This is not strictly necessary, but unless we
 653   // do so the stack frame may have a garbage FP; this ensures a
 654   // correct call stack that we can always unwind.  The ANDI should be
 655   // unnecessary because the sender SP in x19 is always aligned, but
 656   // it doesn't hurt.
 657   __ andi(sp, x19_sender_sp, -16);
 658 
 659   // Note: the restored frame is not necessarily interpreted.
 660   // Use the shared runtime version of the StackOverflowError.
 661   assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
 662   __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
 663 
 664   // all done with frame size check
 665   __ bind(after_frame_check);
 666 }
 667 
 668 // Allocate monitor and lock method (asm interpreter)
 669 //
 670 // Args:
 671 //      xmethod: Method*
 672 //      xlocals: locals
 673 //
 674 // Kills:
 675 //      x10
 676 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
 677 //      t0, t1 (temporary regs)
 678 void TemplateInterpreterGenerator::lock_method() {
 679   // synchronize method
 680   const Address access_flags(xmethod, Method::access_flags_offset());
 681   const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 682   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 683 
 684 #ifdef ASSERT
 685   __ lwu(x10, access_flags);
 686   __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method doesn't need synchronization", false);
 687 #endif // ASSERT
 688 
 689   // get synchronization object
 690   {
 691     Label done;
 692     __ lwu(x10, access_flags);
 693     __ andi(t0, x10, JVM_ACC_STATIC);
 694     // get receiver (assume this is frequent case)
 695     __ ld(x10, Address(xlocals, Interpreter::local_offset_in_bytes(0)));
 696     __ beqz(t0, done);
 697     __ load_mirror(x10, xmethod, x15, t1);
 698 
 699 #ifdef ASSERT
 700     {
 701       Label L;
 702       __ bnez(x10, L);
 703       __ stop("synchronization object is null");
 704       __ bind(L);
 705     }
 706 #endif // ASSERT
 707 
 708     __ bind(done);
 709   }
 710 
 711   // add space for monitor & lock
 712   __ check_extended_sp();
 713   __ add(sp, sp, - entry_size); // add space for a monitor entry
 714   __ add(esp, esp, - entry_size);
 715   __ sub(t0, sp, fp);
 716   __ srai(t0, t0, Interpreter::logStackElementSize);
 717   __ sd(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize));
 718   __ sub(t0, esp, fp);
 719   __ srai(t0, t0, Interpreter::logStackElementSize);
 720   __ sd(t0, monitor_block_top);  // set new monitor block top
 721   // store object
 722   __ sd(x10, Address(esp, BasicObjectLock::obj_offset()));
 723   __ mv(c_rarg1, esp); // object address
 724   __ lock_object(c_rarg1);
 725 }
 726 
 727 // Generate a fixed interpreter frame. This is identical setup for
 728 // interpreted methods and for native methods hence the shared code.
 729 //
 730 // Args:
 731 //      ra: return address
 732 //      xmethod: Method*
 733 //      xlocals: pointer to locals
 734 //      xcpool: cp cache
 735 //      stack_pointer: previous sp
 736 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 737   // initialize fixed part of activation frame
 738   if (native_call) {
 739     __ add(esp, sp, - 14 * wordSize);
 740     __ mv(xbcp, zr);
 741     __ add(sp, sp, - 14 * wordSize);
 742     // add 2 zero-initialized slots for native calls
 743     __ sd(zr, Address(sp, 13 * wordSize));
 744     __ sd(zr, Address(sp, 12 * wordSize));
 745   } else {
 746     __ add(esp, sp, - 12 * wordSize);
 747     __ ld(t0, Address(xmethod, Method::const_offset()));     // get ConstMethod
 748     __ add(xbcp, t0, in_bytes(ConstMethod::codes_offset())); // get codebase
 749     __ add(sp, sp, - 12 * wordSize);
 750   }
 751   __ sd(xbcp, Address(sp, wordSize));
 752   __ mv(t0, frame::interpreter_frame_initial_sp_offset);
 753   __ sd(t0, Address(sp, 0));
 754 
 755   if (ProfileInterpreter) {
 756     Label method_data_continue;
 757     __ ld(t0, Address(xmethod, Method::method_data_offset()));
 758     __ beqz(t0, method_data_continue);
 759     __ la(t0, Address(t0, in_bytes(MethodData::data_offset())));
 760     __ bind(method_data_continue);
 761   }
 762 
 763   __ sd(xmethod, Address(sp, 7 * wordSize));
 764   __ sd(ProfileInterpreter ? t0 : zr, Address(sp, 6 * wordSize));
 765 
 766   __ sd(ra, Address(sp, 11 * wordSize));
 767   __ sd(fp, Address(sp, 10 * wordSize));
 768   __ la(fp, Address(sp, 12 * wordSize)); // include ra & fp
 769 
 770   __ ld(xcpool, Address(xmethod, Method::const_offset()));
 771   __ ld(xcpool, Address(xcpool, ConstMethod::constants_offset()));
 772   __ ld(xcpool, Address(xcpool, ConstantPool::cache_offset()));
 773   __ sd(xcpool, Address(sp, 3 * wordSize));
 774   __ sub(t0, xlocals, fp);
 775   __ srai(t0, t0, Interpreter::logStackElementSize);   // t0 = xlocals - fp();
 776   // Store relativized xlocals, see frame::interpreter_frame_locals().
 777   __ sd(t0, Address(sp, 2 * wordSize));
 778 
 779   // set sender sp
 780   // leave last_sp as null
 781   __ sd(x19_sender_sp, Address(sp, 9 * wordSize));
 782   __ sd(zr, Address(sp, 8 * wordSize));
 783 
 784   // Get mirror and store it in the frame as GC root for this Method*
 785   __ load_mirror(t2, xmethod, x15, t1);
 786   __ sd(t2, Address(sp, 4 * wordSize));
 787 
 788   if (!native_call) {
 789     __ ld(t0, Address(xmethod, Method::const_offset()));
 790     __ lhu(t0, Address(t0, ConstMethod::max_stack_offset()));
 791     __ add(t0, t0, MAX2(3, Method::extra_stack_entries()));
 792     __ slli(t0, t0, 3);
 793     __ sub(t0, sp, t0);
 794     __ andi(t0, t0, -16);
 795     __ sub(t1, t0, fp);
 796     __ srai(t1, t1, Interpreter::logStackElementSize);
 797     // Store extended SP
 798     __ sd(t1, Address(sp, 5 * wordSize));
 799     // Move SP out of the way
 800     __ mv(sp, t0);
 801   } else {
 802     // Make sure there is room for the exception oop pushed in case method throws
 803     // an exception (see TemplateInterpreterGenerator::generate_throw_exception())
 804     __ sub(t0, sp, 2 * wordSize);
 805     __ sub(t1, t0, fp);
 806     __ srai(t1, t1, Interpreter::logStackElementSize);
 807     __ sd(t1, Address(sp, 5 * wordSize));
 808     __ mv(sp, t0);
 809   }
 810 }
 811 
 812 // End of helpers
 813 
 814 // Various method entries
 815 //------------------------------------------------------------------------------------------------------------------------
 816 //
 817 //
 818 
 819 // Method entry for java.lang.ref.Reference.get.
 820 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 821   // Code: _aload_0, _getfield, _areturn
 822   // parameter size = 1
 823   //
 824   // The code that gets generated by this routine is split into 2 parts:
 825   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 826   //    2. The slow path - which is an expansion of the regular method entry.
 827   //
 828   // Notes:-
 829   // * In the G1 code we do not check whether we need to block for
 830   //   a safepoint. If G1 is enabled then we must execute the specialized
 831   //   code for Reference.get (except when the Reference object is null)
 832   //   so that we can log the value in the referent field with an SATB
 833   //   update buffer.
 834   //   If the code for the getfield template is modified so that the
 835   //   G1 pre-barrier code is executed when the current method is
 836   //   Reference.get() then going through the normal method entry
 837   //   will be fine.
 838   // * The G1 code can, however, check the receiver object (the instance
 839   //   of java.lang.Reference) and jump to the slow path if null. If the
 840   //   Reference object is null then we obviously cannot fetch the referent
 841   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 842   //   regular method entry code to generate the NPE.
 843   //
 844   // This code is based on generate_accessor_entry.
 845   //
 846   // xmethod: Method*
 847   // x19_sender_sp: senderSP must preserve for slow path, set SP to it on fast path
 848 
 849   // ra is live.  It must be saved around calls.
 850 
 851   address entry = __ pc();
 852 
 853   const int referent_offset = java_lang_ref_Reference::referent_offset();
 854   guarantee(referent_offset > 0, "referent offset not initialized");
 855 
 856   Label slow_path;
 857   const Register local_0 = c_rarg0;
 858   // Check if local 0 isn't null
 859   // If the receiver is null then it is OK to jump to the slow path.
 860   __ ld(local_0, Address(esp, 0));
 861   __ beqz(local_0, slow_path);
 862 
 863   // Load the value of the referent field.
 864   const Address field_address(local_0, referent_offset);
 865   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
 866   bs->load_at(_masm, IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ t0, /*tmp2*/ t1);
 867 
 868   // areturn
 869   __ andi(sp, x19_sender_sp, -16);  // done with stack
 870   __ ret();
 871 
 872   // generate a vanilla interpreter entry as the slow path
 873   __ bind(slow_path);
 874   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 875   return entry;
 876 }
 877 
 878 /**
 879  * Method entry for static native methods:
 880  *   int java.util.zip.CRC32.update(int crc, int b)
 881  */
 882 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
 883   // TODO: Unimplemented generate_CRC32_update_entry
 884   return nullptr;
 885 }
 886 
 887 /**
 888  * Method entry for static native methods:
 889  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
 890  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
 891  */
 892 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 893   // TODO: Unimplemented generate_CRC32_updateBytes_entry
 894   return nullptr;
 895 }
 896 
 897 /**
 898  * Method entry for intrinsic-candidate (non-native) methods:
 899  *   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
 900  *   int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
 901  * Unlike CRC32, CRC32C does not have any methods marked as native
 902  * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
 903  */
 904 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 905   // TODO: Unimplemented generate_CRC32C_updateBytes_entry
 906   return nullptr;
 907 }
 908 
 909 // Not supported
 910 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; }
 911 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; }
 912 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; }
 913 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; }
 914 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
 915 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
 916 
 917 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
 918   // See more discussion in stackOverflow.hpp.
 919 
 920   const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size());
 921   const int page_size = (int)os::vm_page_size();
 922   const int n_shadow_pages = shadow_zone_size / page_size;
 923 
 924 #ifdef ASSERT
 925   Label L_good_limit;
 926   __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
 927   __ bnez(t0, L_good_limit);
 928   __ stop("shadow zone safe limit is not initialized");
 929   __ bind(L_good_limit);
 930 
 931   Label L_good_watermark;
 932   __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 933   __ bnez(t0, L_good_watermark);
 934   __ stop("shadow zone growth watermark is not initialized");
 935   __ bind(L_good_watermark);
 936 #endif
 937 
 938   Label L_done;
 939 
 940   __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 941   __ bgtu(sp, t0, L_done);
 942 
 943   for (int p = 1; p <= n_shadow_pages; p++) {
 944     __ bang_stack_with_offset(p * page_size);
 945   }
 946 
 947   // Record the new watermark, but only if the update is above the safe limit.
 948   // Otherwise, the next time around the check above would pass the safe limit.
 949   __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
 950   __ bleu(sp, t0, L_done);
 951   __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 952 
 953   __ bind(L_done);
 954 }
 955 
 956 // Interpreter stub for calling a native method. (asm interpreter)
 957 // This sets up a somewhat different looking stack for calling the
 958 // native method than the typical interpreter frame setup.
 959 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
 960   // determine code generation flags
 961   bool inc_counter = UseCompiler || CountCompiledCalls;
 962 
 963   // x11: Method*
 964   // x30: sender sp
 965 
 966   address entry_point = __ pc();
 967 
 968   const Address constMethod       (xmethod, Method::const_offset());
 969   const Address access_flags      (xmethod, Method::access_flags_offset());
 970   const Address size_of_parameters(x12, ConstMethod::
 971                                    size_of_parameters_offset());
 972 
 973   // get parameter size (always needed)
 974   __ ld(x12, constMethod);
 975   __ load_unsigned_short(x12, size_of_parameters);
 976 
 977   // Native calls don't need the stack size check since they have no
 978   // expression stack and the arguments are already on the stack and
 979   // we only add a handful of words to the stack.
 980 
 981   // xmethod: Method*
 982   // x12: size of parameters
 983   // x30: sender sp
 984 
 985   // for natives the size of locals is zero
 986 
 987   // compute beginning of parameters (xlocals)
 988   __ shadd(xlocals, x12, esp, xlocals, 3);
 989   __ addi(xlocals, xlocals, -wordSize);
 990 
 991   // Pull SP back to minimum size: this avoids holes in the stack
 992   __ andi(sp, esp, -16);
 993 
 994   // initialize fixed part of activation frame
 995   generate_fixed_frame(true);
 996 
 997   // make sure method is native & not abstract
 998 #ifdef ASSERT
 999   __ lwu(x10, access_flags);
1000   __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute non-native method as native", false);
1001   __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter");
1002 #endif
1003 
1004   // Since at this point in the method invocation the exception
1005   // handler would try to exit the monitor of synchronized methods
1006   // which hasn't been entered yet, we set the thread local variable
1007   // _do_not_unlock_if_synchronized to true. The remove_activation
1008   // will check this flag.
1009 
1010   const Address do_not_unlock_if_synchronized(xthread,
1011                                               in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1012   __ mv(t1, true);
1013   __ sb(t1, do_not_unlock_if_synchronized);
1014 
1015   // increment invocation count & check for overflow
1016   Label invocation_counter_overflow;
1017   if (inc_counter) {
1018     generate_counter_incr(&invocation_counter_overflow);
1019   }
1020 
1021   Label continue_after_compile;
1022   __ bind(continue_after_compile);
1023 
1024   bang_stack_shadow_pages(true);
1025 
1026   // reset the _do_not_unlock_if_synchronized flag
1027   __ sb(zr, do_not_unlock_if_synchronized);
1028 
1029   // check for synchronized methods
1030   // Must happen AFTER invocation_counter check and stack overflow check,
1031   // so method is not locked if overflows.
1032   if (synchronized) {
1033     lock_method();
1034   } else {
1035     // no synchronization necessary
1036 #ifdef ASSERT
1037     __ lwu(x10, access_flags);
1038     __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization");
1039 #endif
1040   }
1041 
1042   // start execution
1043 #ifdef ASSERT
1044   __ verify_frame_setup();
1045 #endif
1046 
1047   // jvmti support
1048   __ notify_method_entry();
1049 
1050   // work registers
1051   const Register t = x18;
1052   const Register result_handler = x19;
1053 
1054   // allocate space for parameters
1055   __ ld(t, Address(xmethod, Method::const_offset()));
1056   __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
1057 
1058   __ slli(t, t, Interpreter::logStackElementSize);
1059   __ sub(x30, esp, t);
1060   __ andi(sp, x30, -16);
1061   __ mv(esp, x30);
1062 
1063   // get signature handler
1064   {
1065     Label L;
1066     __ ld(t, Address(xmethod, Method::signature_handler_offset()));
1067     __ bnez(t, L);
1068     __ call_VM(noreg,
1069                CAST_FROM_FN_PTR(address,
1070                                 InterpreterRuntime::prepare_native_call),
1071                xmethod);
1072     __ ld(t, Address(xmethod, Method::signature_handler_offset()));
1073     __ bind(L);
1074   }
1075 
1076   // call signature handler
1077   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == xlocals,
1078          "adjust this code");
1079   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1080          "adjust this code");
1081   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t0,
1082          "adjust this code");
1083 
1084   // The generated handlers do not touch xmethod (the method).
1085   // However, large signatures cannot be cached and are generated
1086   // each time here.  The slow-path generator can do a GC on return,
1087   // so we must reload it after the call.
1088   __ jalr(t);
1089   __ get_method(xmethod);        // slow path can do a GC, reload xmethod
1090 
1091 
1092   // result handler is in x10
1093   // set result handler
1094   __ mv(result_handler, x10);
1095   // pass mirror handle if static call
1096   {
1097     Label L;
1098     __ lwu(t, Address(xmethod, Method::access_flags_offset()));
1099     __ test_bit(t0, t, exact_log2(JVM_ACC_STATIC));
1100     __ beqz(t0, L);
1101     // get mirror
1102     __ load_mirror(t, xmethod, x28, t1);
1103     // copy mirror into activation frame
1104     __ sd(t, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
1105     // pass handle to mirror
1106     __ addi(c_rarg1, fp, frame::interpreter_frame_oop_temp_offset * wordSize);
1107     __ bind(L);
1108   }
1109 
1110   // get native function entry point in x28
1111   {
1112     Label L;
1113     __ ld(x28, Address(xmethod, Method::native_function_offset()));
1114     address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1115     __ mv(t, unsatisfied);
1116     __ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8
1117 
1118     __ bne(x28, t1, L);
1119     __ call_VM(noreg,
1120                CAST_FROM_FN_PTR(address,
1121                                 InterpreterRuntime::prepare_native_call),
1122                xmethod);
1123     __ get_method(xmethod);
1124     __ ld(x28, Address(xmethod, Method::native_function_offset()));
1125     __ bind(L);
1126   }
1127 
1128   // pass JNIEnv
1129   __ add(c_rarg0, xthread, in_bytes(JavaThread::jni_environment_offset()));
1130 
1131   // It is enough that the pc() points into the right code
1132   // segment. It does not have to be the correct return pc.
1133   Label native_return;
1134   __ set_last_Java_frame(esp, fp, native_return, x30);
1135 
1136   // change thread state
1137 #ifdef ASSERT
1138   {
1139     Label L;
1140     __ lwu(t, Address(xthread, JavaThread::thread_state_offset()));
1141     __ addi(t0, zr, (u1)_thread_in_Java);
1142     __ beq(t, t0, L);
1143     __ stop("Wrong thread state in native stub");
1144     __ bind(L);
1145   }
1146 #endif
1147 
1148   // Change state to native
1149   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1150   __ mv(t0, _thread_in_native);
1151   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1152   __ sw(t0, Address(t1));
1153 
1154   // Call the native method.
1155   __ jalr(x28);
1156   __ bind(native_return);
1157   __ get_method(xmethod);
1158   // result potentially in x10 or f10
1159 
1160   // make room for the pushes we're about to do
1161   __ sub(t0, esp, 4 * wordSize);
1162   __ andi(sp, t0, -16);
1163 
1164   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1165   // in order to extract the result of a method call. If the order of these
1166   // pushes change or anything else is added to the stack then the code in
1167   // interpreter_frame_result must also change.
1168   __ push(dtos);
1169   __ push(ltos);
1170 
1171   // change thread state
1172   // Force all preceding writes to be observed prior to thread state change
1173   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1174 
1175   __ mv(t0, _thread_in_native_trans);
1176   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1177 
1178   // Force this write out before the read below
1179   if (!UseSystemMemoryBarrier) {
1180     __ membar(MacroAssembler::AnyAny);
1181   }
1182 
1183   // check for safepoint operation in progress and/or pending suspend requests
1184   {
1185     Label L, Continue;
1186 
1187     // We need an acquire here to ensure that any subsequent load of the
1188     // global SafepointSynchronize::_state flag is ordered after this load
1189     // of the thread-local polling word. We don't want this poll to
1190     // return false (i.e. not safepointing) and a later poll of the global
1191     // SafepointSynchronize::_state spuriously to return true.
1192     //
1193     // This is to avoid a race when we're in a native->Java transition
1194     // racing the code which wakes up from a safepoint.
1195     __ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1196     __ lwu(t1, Address(xthread, JavaThread::suspend_flags_offset()));
1197     __ beqz(t1, Continue);
1198     __ bind(L);
1199 
1200     // Don't use call_VM as it will see a possible pending exception
1201     // and forward it and never return here preventing us from
1202     // clearing _last_native_pc down below. So we do a runtime call by
1203     // hand.
1204     //
1205     __ mv(c_rarg0, xthread);
1206     __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1207     __ get_method(xmethod);
1208     __ reinit_heapbase();
1209     __ bind(Continue);
1210   }
1211 
1212   // change thread state
1213   // Force all preceding writes to be observed prior to thread state change
1214   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1215 
1216   __ mv(t0, _thread_in_Java);
1217   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1218 
1219   // reset_last_Java_frame
1220   __ reset_last_Java_frame(true);
1221 
1222   if (CheckJNICalls) {
1223     // clear_pending_jni_exception_check
1224     __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset()));
1225   }
1226 
1227   // reset handle block
1228   __ ld(t, Address(xthread, JavaThread::active_handles_offset()));
1229   __ sd(zr, Address(t, JNIHandleBlock::top_offset()));
1230 
1231   // If result is an oop unbox and store it in frame where gc will see it
1232   // and result handler will pick it up
1233 
1234   {
1235     Label no_oop;
1236     __ la(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1237     __ bne(t, result_handler, no_oop);
1238     // Unbox oop result, e.g. JNIHandles::resolve result.
1239     __ pop(ltos);
1240     __ resolve_jobject(x10, t, t1);
1241     __ sd(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
1242     // keep stack depth as expected by pushing oop which will eventually be discarded
1243     __ push(ltos);
1244     __ bind(no_oop);
1245   }
1246 
1247   {
1248     Label no_reguard;
1249     __ lwu(t0, Address(xthread, in_bytes(JavaThread::stack_guard_state_offset())));
1250     __ addi(t1, zr, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
1251     __ bne(t0, t1, no_reguard);
1252 
1253     __ push_call_clobbered_registers();
1254     __ mv(c_rarg0, xthread);
1255     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1256     __ pop_call_clobbered_registers();
1257     __ bind(no_reguard);
1258   }
1259 
1260   // The method register is junk from after the thread_in_native transition
1261   // until here.  Also can't call_VM until the bcp has been
1262   // restored.  Need bcp for throwing exception below so get it now.
1263   __ get_method(xmethod);
1264 
1265   // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
1266   // xbcp == code_base()
1267   __ ld(xbcp, Address(xmethod, Method::const_offset()));   // get ConstMethod*
1268   __ add(xbcp, xbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
1269   // handle exceptions (exception handling will handle unlocking!)
1270   {
1271     Label L;
1272     __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
1273     __ beqz(t0, L);
1274     // Note: At some point we may want to unify this with the code
1275     // used in call_VM_base(); i.e., we should use the
1276     // StubRoutines::forward_exception code. For now this doesn't work
1277     // here because the sp is not correctly set at this point.
1278     __ MacroAssembler::call_VM(noreg,
1279                                CAST_FROM_FN_PTR(address,
1280                                InterpreterRuntime::throw_pending_exception));
1281     __ should_not_reach_here();
1282     __ bind(L);
1283   }
1284 
1285   // do unlocking if necessary
1286   {
1287     Label L;
1288     __ lwu(t, Address(xmethod, Method::access_flags_offset()));
1289     __ test_bit(t0, t, exact_log2(JVM_ACC_SYNCHRONIZED));
1290     __ beqz(t0, L);
1291     // the code below should be shared with interpreter macro
1292     // assembler implementation
1293     {
1294       Label unlock;
1295       // BasicObjectLock will be first in list, since this is a
1296       // synchronized method. However, need to check that the object
1297       // has not been unlocked by an explicit monitorexit bytecode.
1298 
1299       // monitor expect in c_rarg1 for slow unlock path
1300       __ la(c_rarg1, Address(fp,   // address of first monitor
1301                              (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1302                                         wordSize - sizeof(BasicObjectLock))));
1303 
1304       __ ld(t, Address(c_rarg1, BasicObjectLock::obj_offset()));
1305       __ bnez(t, unlock);
1306 
1307       // Entry already unlocked, need to throw exception
1308       __ MacroAssembler::call_VM(noreg,
1309                                  CAST_FROM_FN_PTR(address,
1310                                                   InterpreterRuntime::throw_illegal_monitor_state_exception));
1311       __ should_not_reach_here();
1312 
1313       __ bind(unlock);
1314       __ unlock_object(c_rarg1);
1315     }
1316     __ bind(L);
1317   }
1318 
1319   // jvmti support
1320   // Note: This must happen _after_ handling/throwing any exceptions since
1321   //       the exception handler code notifies the runtime of method exits
1322   //       too. If this happens before, method entry/exit notifications are
1323   //       not properly paired (was bug - gri 11/22/99).
1324   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1325 
1326   __ pop(ltos);
1327   __ pop(dtos);
1328 
1329   __ jalr(result_handler);
1330 
1331   // remove activation
1332   __ ld(esp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1333   // remove frame anchor
1334   __ leave();
1335 
1336   // restore sender sp
1337   __ mv(sp, esp);
1338 
1339   __ ret();
1340 
1341   if (inc_counter) {
1342     // Handle overflow of counter and compile method
1343     __ bind(invocation_counter_overflow);
1344     generate_counter_overflow(continue_after_compile);
1345   }
1346 
1347   return entry_point;
1348 }
1349 
1350 //
1351 // Generic interpreted method entry to (asm) interpreter
1352 //
1353 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1354 
1355   // determine code generation flags
1356   const bool inc_counter  = UseCompiler || CountCompiledCalls;
1357 
1358   // t0: sender sp
1359   address entry_point = __ pc();
1360 
1361   const Address constMethod(xmethod, Method::const_offset());
1362   const Address access_flags(xmethod, Method::access_flags_offset());
1363   const Address size_of_parameters(x13,
1364                                    ConstMethod::size_of_parameters_offset());
1365   const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1366 
1367   // get parameter size (always needed)
1368   // need to load the const method first
1369   __ ld(x13, constMethod);
1370   __ load_unsigned_short(x12, size_of_parameters);
1371 
1372   // x12: size of parameters
1373 
1374   __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1375   __ sub(x13, x13, x12); // x13 = no. of additional locals
1376 
1377   // see if we've got enough room on the stack for locals plus overhead.
1378   generate_stack_overflow_check();
1379 
1380   // compute beginning of parameters (xlocals)
1381   __ shadd(xlocals, x12, esp, t1, 3);
1382   __ add(xlocals, xlocals, -wordSize);
1383 
1384   // Make room for additional locals
1385   __ slli(t1, x13, 3);
1386   __ sub(t0, esp, t1);
1387 
1388   // Padding between locals and fixed part of activation frame to ensure
1389   // SP is always 16-byte aligned.
1390   __ andi(sp, t0, -16);
1391 
1392   // x13 - # of additional locals
1393   // allocate space for locals
1394   // explicitly initialize locals
1395   {
1396     Label exit, loop;
1397     __ blez(x13, exit); // do nothing if x13 <= 0
1398     __ bind(loop);
1399     __ sd(zr, Address(t0));
1400     __ add(t0, t0, wordSize);
1401     __ add(x13, x13, -1); // until everything initialized
1402     __ bnez(x13, loop);
1403     __ bind(exit);
1404   }
1405 
1406   // And the base dispatch table
1407   __ get_dispatch();
1408 
1409   // initialize fixed part of activation frame
1410   generate_fixed_frame(false);
1411 
1412   // make sure method is not native & not abstract
1413 #ifdef ASSERT
1414   __ lwu(x10, access_flags);
1415   __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute native method as non-native");
1416   __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter");
1417 #endif
1418 
1419   // Since at this point in the method invocation the exception
1420   // handler would try to exit the monitor of synchronized methods
1421   // which hasn't been entered yet, we set the thread local variable
1422   // _do_not_unlock_if_synchronized to true. The remove_activation
1423   // will check this flag.
1424 
1425   const Address do_not_unlock_if_synchronized(xthread,
1426                                               in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1427   __ mv(t1, true);
1428   __ sb(t1, do_not_unlock_if_synchronized);
1429 
1430   Label no_mdp;
1431   const Register mdp = x13;
1432   __ ld(mdp, Address(xmethod, Method::method_data_offset()));
1433   __ beqz(mdp, no_mdp);
1434   __ add(mdp, mdp, in_bytes(MethodData::data_offset()));
1435   __ profile_parameters_type(mdp, x11, x12, x14); // use x11, x12, x14 as tmp registers
1436   __ bind(no_mdp);
1437 
1438   // increment invocation count & check for overflow
1439   Label invocation_counter_overflow;
1440   if (inc_counter) {
1441     generate_counter_incr(&invocation_counter_overflow);
1442   }
1443 
1444   Label continue_after_compile;
1445   __ bind(continue_after_compile);
1446 
1447   bang_stack_shadow_pages(false);
1448 
1449   // reset the _do_not_unlock_if_synchronized flag
1450   __ sb(zr, do_not_unlock_if_synchronized);
1451 
1452   // check for synchronized methods
1453   // Must happen AFTER invocation_counter check and stack overflow check,
1454   // so method is not locked if overflows.
1455   if (synchronized) {
1456     // Allocate monitor and lock method
1457     lock_method();
1458   } else {
1459     // no synchronization necessary
1460 #ifdef ASSERT
1461     __ lwu(x10, access_flags);
1462     __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization");
1463 #endif
1464   }
1465 
1466   // start execution
1467 #ifdef ASSERT
1468   __ verify_frame_setup();
1469 #endif
1470 
1471   // jvmti support
1472   __ notify_method_entry();
1473 
1474   __ dispatch_next(vtos);
1475 
1476   // invocation counter overflow
1477   if (inc_counter) {
1478     // Handle overflow of counter and compile method
1479     __ bind(invocation_counter_overflow);
1480     generate_counter_overflow(continue_after_compile);
1481   }
1482 
1483   return entry_point;
1484 }
1485 
1486 // Method entry for java.lang.Thread.currentThread
1487 address TemplateInterpreterGenerator::generate_currentThread() {
1488   address entry_point = __ pc();
1489 
1490   __ ld(x10, Address(xthread, JavaThread::vthread_offset()));
1491   __ resolve_oop_handle(x10, t0, t1);
1492   __ ret();
1493 
1494   return entry_point;
1495 }
1496 
1497 //-----------------------------------------------------------------------------
1498 // Exceptions
1499 
1500 void TemplateInterpreterGenerator::generate_throw_exception() {
1501   // Entry point in previous activation (i.e., if the caller was
1502   // interpreted)
1503   Interpreter::_rethrow_exception_entry = __ pc();
1504   // Restore sp to interpreter_frame_last_sp even though we are going
1505   // to empty the expression stack for the exception processing.
1506   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1507   // x10: exception
1508   // x13: return address/pc that threw exception
1509   __ restore_bcp();    // xbcp points to call/send
1510   __ restore_locals();
1511   __ restore_constant_pool_cache();
1512   __ reinit_heapbase();  // restore xheapbase as heapbase.
1513   __ get_dispatch();
1514 
1515   // Entry point for exceptions thrown within interpreter code
1516   Interpreter::_throw_exception_entry = __ pc();
1517   // If we came here via a NullPointerException on the receiver of a
1518   // method, xthread may be corrupt.
1519   __ get_method(xmethod);
1520   // expression stack is undefined here
1521   // x10: exception
1522   // xbcp: exception bcp
1523   __ verify_oop(x10);
1524   __ mv(c_rarg1, x10);
1525 
1526   // expression stack must be empty before entering the VM in case of
1527   // an exception
1528   __ empty_expression_stack();
1529   // find exception handler address and preserve exception oop
1530   __ call_VM(x13,
1531              CAST_FROM_FN_PTR(address,
1532                           InterpreterRuntime::exception_handler_for_exception),
1533              c_rarg1);
1534 
1535   // Restore machine SP
1536   __ restore_sp_after_call();
1537 
1538   // x10: exception handler entry point
1539   // x13: preserved exception oop
1540   // xbcp: bcp for exception handler
1541   __ push_ptr(x13); // push exception which is now the only value on the stack
1542   __ jr(x10); // jump to exception handler (may be _remove_activation_entry!)
1543 
1544   // If the exception is not handled in the current frame the frame is
1545   // removed and the exception is rethrown (i.e. exception
1546   // continuation is _rethrow_exception).
1547   //
1548   // Note: At this point the bci is still the bxi for the instruction
1549   // which caused the exception and the expression stack is
1550   // empty. Thus, for any VM calls at this point, GC will find a legal
1551   // oop map (with empty expression stack).
1552 
1553   //
1554   // JVMTI PopFrame support
1555   //
1556 
1557   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1558   __ empty_expression_stack();
1559   // Set the popframe_processing bit in pending_popframe_condition
1560   // indicating that we are currently handling popframe, so that
1561   // call_VMs that may happen later do not trigger new popframe
1562   // handling cycles.
1563   __ lwu(x13, Address(xthread, JavaThread::popframe_condition_offset()));
1564   __ ori(x13, x13, JavaThread::popframe_processing_bit);
1565   __ sw(x13, Address(xthread, JavaThread::popframe_condition_offset()));
1566 
1567   {
1568     // Check to see whether we are returning to a deoptimized frame.
1569     // (The PopFrame call ensures that the caller of the popped frame is
1570     // either interpreted or compiled and deoptimizes it if compiled.)
1571     // In this case, we can't call dispatch_next() after the frame is
1572     // popped, but instead must save the incoming arguments and restore
1573     // them after deoptimization has occurred.
1574     //
1575     // Note that we don't compare the return PC against the
1576     // deoptimization blob's unpack entry because of the presence of
1577     // adapter frames in C2.
1578     Label caller_not_deoptimized;
1579     __ ld(c_rarg1, Address(fp, frame::return_addr_offset * wordSize));
1580     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), c_rarg1);
1581     __ bnez(x10, caller_not_deoptimized);
1582 
1583     // Compute size of arguments for saving when returning to
1584     // deoptimized caller
1585     __ get_method(x10);
1586     __ ld(x10, Address(x10, Method::const_offset()));
1587     __ load_unsigned_short(x10, Address(x10, in_bytes(ConstMethod::
1588                                                       size_of_parameters_offset())));
1589     __ slli(x10, x10, Interpreter::logStackElementSize);
1590     __ restore_locals();
1591     __ sub(xlocals, xlocals, x10);
1592     __ add(xlocals, xlocals, wordSize);
1593     // Save these arguments
1594     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1595                                            Deoptimization::
1596                                            popframe_preserve_args),
1597                           xthread, x10, xlocals);
1598 
1599     __ remove_activation(vtos,
1600                          /* throw_monitor_exception */ false,
1601                          /* install_monitor_exception */ false,
1602                          /* notify_jvmdi */ false);
1603 
1604     // Inform deoptimization that it is responsible for restoring
1605     // these arguments
1606     __ mv(t0, JavaThread::popframe_force_deopt_reexecution_bit);
1607     __ sw(t0, Address(xthread, JavaThread::popframe_condition_offset()));
1608 
1609     // Continue in deoptimization handler
1610     __ ret();
1611 
1612     __ bind(caller_not_deoptimized);
1613   }
1614 
1615   __ remove_activation(vtos,
1616                        /* throw_monitor_exception */ false,
1617                        /* install_monitor_exception */ false,
1618                        /* notify_jvmdi */ false);
1619 
1620   // Restore the last_sp and null it out
1621   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1622   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
1623   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1624 
1625   __ restore_bcp();
1626   __ restore_locals();
1627   __ restore_constant_pool_cache();
1628   __ get_method(xmethod);
1629   __ get_dispatch();
1630 
1631   // The method data pointer was incremented already during
1632   // call profiling. We have to restore the mdp for the current bcp.
1633   if (ProfileInterpreter) {
1634     __ set_method_data_pointer_for_bcp();
1635   }
1636 
1637   // Clear the popframe condition flag
1638   __ sw(zr, Address(xthread, JavaThread::popframe_condition_offset()));
1639   assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
1640 
1641 #if INCLUDE_JVMTI
1642   {
1643     Label L_done;
1644 
1645     __ lbu(t0, Address(xbcp, 0));
1646     __ mv(t1, Bytecodes::_invokestatic);
1647     __ bne(t1, t0, L_done);
1648 
1649     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1650     // Detect such a case in the InterpreterRuntime function and return the member name argument,or null.
1651 
1652     __ ld(c_rarg0, Address(xlocals, 0));
1653     __ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp);
1654 
1655     __ beqz(x10, L_done);
1656 
1657     __ sd(x10, Address(esp, 0));
1658     __ bind(L_done);
1659   }
1660 #endif // INCLUDE_JVMTI
1661 
1662   // Restore machine SP
1663   __ restore_sp_after_call();
1664 
1665   __ dispatch_next(vtos);
1666   // end of PopFrame support
1667 
1668   Interpreter::_remove_activation_entry = __ pc();
1669 
1670   // preserve exception over this code sequence
1671   __ pop_ptr(x10);
1672   __ sd(x10, Address(xthread, JavaThread::vm_result_offset()));
1673   // remove the activation (without doing throws on illegalMonitorExceptions)
1674   __ remove_activation(vtos, false, true, false);
1675   // restore exception
1676   __ get_vm_result(x10, xthread);
1677 
1678   // In between activations - previous activation type unknown yet
1679   // compute continuation point - the continuation point expects the
1680   // following registers set up:
1681   //
1682   // x10: exception
1683   // ra: return address/pc that threw exception
1684   // sp: expression stack of caller
1685   // fp: fp of caller
1686   // FIXME: There's no point saving ra here because VM calls don't trash it
1687   __ sub(sp, sp, 2 * wordSize);
1688   __ sd(x10, Address(sp, 0));                   // save exception
1689   __ sd(ra, Address(sp, wordSize));             // save return address
1690   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1691                                          SharedRuntime::exception_handler_for_return_address),
1692                         xthread, ra);
1693   __ mv(x11, x10);                              // save exception handler
1694   __ ld(x10, Address(sp, 0));                   // restore exception
1695   __ ld(ra, Address(sp, wordSize));             // restore return address
1696   __ add(sp, sp, 2 * wordSize);
1697   // We might be returning to a deopt handler that expects x13 to
1698   // contain the exception pc
1699   __ mv(x13, ra);
1700   // Note that an "issuing PC" is actually the next PC after the call
1701   __ jr(x11);                                   // jump to exception
1702                                                 // handler of caller
1703 }
1704 
1705 //
1706 // JVMTI ForceEarlyReturn support
1707 //
1708 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state)  {
1709   address entry = __ pc();
1710 
1711   __ restore_bcp();
1712   __ restore_locals();
1713   __ empty_expression_stack();
1714   __ load_earlyret_value(state);
1715 
1716   __ ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset()));
1717   Address cond_addr(t0, JvmtiThreadState::earlyret_state_offset());
1718 
1719   // Clear the earlyret state
1720   assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
1721   __ sd(zr, cond_addr);
1722 
1723   __ remove_activation(state,
1724                        false, /* throw_monitor_exception */
1725                        false, /* install_monitor_exception */
1726                        true); /* notify_jvmdi */
1727   __ ret();
1728 
1729   return entry;
1730 }
1731 // end of ForceEarlyReturn support
1732 
1733 //-----------------------------------------------------------------------------
1734 // Helper for vtos entry point generation
1735 
1736 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1737                                                          address& bep,
1738                                                          address& cep,
1739                                                          address& sep,
1740                                                          address& aep,
1741                                                          address& iep,
1742                                                          address& lep,
1743                                                          address& fep,
1744                                                          address& dep,
1745                                                          address& vep) {
1746   assert(t != nullptr && t->is_valid() && t->tos_in() == vtos, "illegal template");
1747   Label L;
1748   aep = __ pc();  __ push_ptr();  __ j(L);
1749   fep = __ pc();  __ push_f();    __ j(L);
1750   dep = __ pc();  __ push_d();    __ j(L);
1751   lep = __ pc();  __ push_l();    __ j(L);
1752   bep = cep = sep =
1753   iep = __ pc();  __ push_i();
1754   vep = __ pc();
1755   __ bind(L);
1756   generate_and_dispatch(t);
1757 }
1758 
1759 //-----------------------------------------------------------------------------
1760 
1761 // Non-product code
1762 #ifndef PRODUCT
1763 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1764   address entry = __ pc();
1765 
1766   __ push_reg(ra);
1767   __ push(state);
1768   __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1769   __ mv(c_rarg2, x10);  // Pass itos
1770   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1771   __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1772   __ pop(state);
1773   __ pop_reg(ra);
1774   __ ret();                                   // return from result handler
1775 
1776   return entry;
1777 }
1778 
1779 void TemplateInterpreterGenerator::count_bytecode() {
1780   __ mv(x7, (address) &BytecodeCounter::_counter_value);
1781   __ atomic_addw(noreg, 1, x7);
1782 }
1783 
1784 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1785   __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1786   __ atomic_addw(noreg, 1, x7);
1787 }
1788 
1789 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1790   // Calculate new index for counter:
1791   //   _index = (_index >> log2_number_of_codes) |
1792   //            (bytecode << log2_number_of_codes);
1793   Register index_addr = t1;
1794   Register index = t0;
1795   __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1796   __ lw(index, index_addr);
1797   __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1798   __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1799   __ orrw(index, x7, index);
1800   __ sw(index, index_addr);
1801   // Bump bucket contents:
1802   //   _counters[_index] ++;
1803   Register counter_addr = t1;
1804   __ mv(x7, (address) &BytecodePairHistogram::_counters);
1805   __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1806   __ atomic_addw(noreg, 1, counter_addr);
1807  }
1808 
1809 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1810   // Call a little run-time stub to avoid blow-up for each bytecode.
1811   // The run-time runtime saves the right registers, depending on
1812   // the tosca in-state for the given template.
1813 
1814   assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated");
1815   __ jal(Interpreter::trace_code(t->tos_in()));
1816   __ reinit_heapbase();
1817 }
1818 
1819 void TemplateInterpreterGenerator::stop_interpreter_at() {
1820   Label L;
1821   __ push_reg(t0);
1822   __ mv(t0, (address) &BytecodeCounter::_counter_value);
1823   __ ld(t0, Address(t0));
1824   __ mv(t1, StopInterpreterAt);
1825   __ bne(t0, t1, L);
1826   __ ebreak();
1827   __ bind(L);
1828   __ pop_reg(t0);
1829 }
1830 
1831 #endif // !PRODUCT