1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "classfile/javaClasses.hpp"
  30 #include "compiler/disassembler.hpp"
  31 #include "gc/shared/barrierSetAssembler.hpp"
  32 #include "interpreter/bytecodeHistogram.hpp"
  33 #include "interpreter/bytecodeTracer.hpp"
  34 #include "interpreter/interp_masm.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "interpreter/interpreterRuntime.hpp"
  37 #include "interpreter/templateInterpreterGenerator.hpp"
  38 #include "interpreter/templateTable.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "oops/arrayOop.hpp"
  41 #include "oops/method.inline.hpp"
  42 #include "oops/methodData.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "oops/resolvedIndyEntry.hpp"
  45 #include "oops/resolvedMethodEntry.hpp"
  46 #include "prims/jvmtiExport.hpp"
  47 #include "prims/jvmtiThreadState.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/deoptimization.hpp"
  50 #include "runtime/frame.inline.hpp"
  51 #include "runtime/globals.hpp"
  52 #include "runtime/jniHandles.hpp"
  53 #include "runtime/sharedRuntime.hpp"
  54 #include "runtime/stubRoutines.hpp"
  55 #include "runtime/synchronizer.hpp"
  56 #include "runtime/timer.hpp"
  57 #include "runtime/vframeArray.hpp"
  58 #include "utilities/checkedCast.hpp"
  59 #include "utilities/debug.hpp"
  60 #include "utilities/powerOfTwo.hpp"
  61 #include <sys/types.h>
  62 
  63 #ifndef PRODUCT
  64 #include "oops/method.hpp"
  65 #endif // !PRODUCT
  66 
  67 // Size of interpreter code.  Increase if too small.  Interpreter will
  68 // fail with a guarantee ("not enough space for interpreter generation");
  69 // if too small.
  70 // Run with +PrintInterpreter to get the VM to print out the size.
  71 // Max size with JVMTI
  72 int TemplateInterpreter::InterpreterCodeSize = 256 * 1024;
  73 
  74 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  75 
  76 //-----------------------------------------------------------------------------
  77 
  78 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  79   address entry = __ pc();
  80 
  81   __ andi(esp, esp, -16);
  82   __ mv(c_rarg3, esp);
  83   // xmethod
  84   // xlocals
  85   // c_rarg3: first stack arg - wordSize
  86   // adjust sp
  87 
  88   __ addi(sp, c_rarg3, -18 * wordSize);
  89   __ addi(sp, sp, -2 * wordSize);
  90   __ sd(ra, Address(sp, 0));
  91 
  92   __ call_VM(noreg,
  93              CAST_FROM_FN_PTR(address,
  94                               InterpreterRuntime::slow_signature_handler),
  95              xmethod, xlocals, c_rarg3);
  96 
  97   // x10: result handler
  98 
  99   // Stack layout:
 100   // sp: return address           <- sp
 101   //      1 garbage
 102   //      8 integer args (if static first is unused)
 103   //      1 float/double identifiers
 104   //      8 double args
 105   //        stack args              <- esp
 106   //        garbage
 107   //        expression stack bottom
 108   //        bcp (null)
 109   //        ...
 110 
 111   // Restore ra
 112   __ ld(ra, Address(sp, 0));
 113   __ addi(sp, sp , 2 * wordSize);
 114 
 115   // Do FP first so we can use c_rarg3 as temp
 116   __ lwu(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers
 117 
 118   for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
 119     const FloatRegister r = g_FPArgReg[i];
 120     Label d, done;
 121 
 122     __ test_bit(t0, c_rarg3, i);
 123     __ bnez(t0, d);
 124     __ flw(r, Address(sp, (10 + i) * wordSize));
 125     __ j(done);
 126     __ bind(d);
 127     __ fld(r, Address(sp, (10 + i) * wordSize));
 128     __ bind(done);
 129   }
 130 
 131   // c_rarg0 contains the result from the call of
 132   // InterpreterRuntime::slow_signature_handler so we don't touch it
 133   // here.  It will be loaded with the JNIEnv* later.
 134   for (int i = 1; i < Argument::n_int_register_parameters_c; i++) {
 135     const Register rm = g_INTArgReg[i];
 136     __ ld(rm, Address(sp, i * wordSize));
 137   }
 138 
 139   __ addi(sp, sp, 18 * wordSize);
 140   __ ret();
 141 
 142   return entry;
 143 }
 144 
 145 // Various method entries
 146 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 147   // xmethod: Method*
 148   // x19_sender_sp: sender sp
 149   // esp: args
 150 
 151   // These don't need a safepoint check because they aren't virtually
 152   // callable. We won't enter these intrinsics from compiled code.
 153   // If in the future we added an intrinsic which was virtually callable
 154   // we'd have to worry about how to safepoint so that this code is used.
 155 
 156   // mathematical functions inlined by compiler
 157   // (interpreter must provide identical implementation
 158   // in order to avoid monotonicity bugs when switching
 159   // from interpreter to compiler in the middle of some
 160   // computation)
 161   //
 162   // stack:
 163   //        [ arg ] <-- esp
 164   //        [ arg ]
 165   // retaddr in ra
 166 
 167   address fn = nullptr;
 168   address entry_point = nullptr;
 169   switch (kind) {
 170     case Interpreter::java_lang_math_abs:
 171       entry_point = __ pc();
 172       __ fld(f10, Address(esp));
 173       __ fabs_d(f10, f10);
 174       __ mv(sp, x19_sender_sp); // Restore caller's SP
 175       break;
 176     case Interpreter::java_lang_math_sqrt:
 177       entry_point = __ pc();
 178       __ fld(f10, Address(esp));
 179       __ fsqrt_d(f10, f10);
 180       __ mv(sp, x19_sender_sp);
 181       break;
 182     case Interpreter::java_lang_math_sin :
 183       entry_point = __ pc();
 184       __ fld(f10, Address(esp));
 185       __ mv(sp, x19_sender_sp);
 186       __ mv(x9, ra);
 187       if (StubRoutines::dsin() == nullptr) {
 188         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 189       } else {
 190         fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin());
 191       }
 192       __ call(fn);
 193       __ mv(ra, x9);
 194       break;
 195     case Interpreter::java_lang_math_cos :
 196       entry_point = __ pc();
 197       __ fld(f10, Address(esp));
 198       __ mv(sp, x19_sender_sp);
 199       __ mv(x9, ra);
 200       if (StubRoutines::dcos() == nullptr) {
 201         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 202       } else {
 203         fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos());
 204       }
 205       __ call(fn);
 206       __ mv(ra, x9);
 207       break;
 208     case Interpreter::java_lang_math_tan :
 209       entry_point = __ pc();
 210       __ fld(f10, Address(esp));
 211       __ mv(sp, x19_sender_sp);
 212       __ mv(x9, ra);
 213       if (StubRoutines::dtan() == nullptr) {
 214         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 215       } else {
 216         fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan());
 217       }
 218       __ call(fn);
 219       __ mv(ra, x9);
 220       break;
 221     case Interpreter::java_lang_math_log :
 222       entry_point = __ pc();
 223       __ fld(f10, Address(esp));
 224       __ mv(sp, x19_sender_sp);
 225       __ mv(x9, ra);
 226       if (StubRoutines::dlog() == nullptr) {
 227         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 228       } else {
 229         fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog());
 230       }
 231       __ call(fn);
 232       __ mv(ra, x9);
 233       break;
 234     case Interpreter::java_lang_math_log10 :
 235       entry_point = __ pc();
 236       __ fld(f10, Address(esp));
 237       __ mv(sp, x19_sender_sp);
 238       __ mv(x9, ra);
 239       if (StubRoutines::dlog10() == nullptr) {
 240         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 241       } else {
 242         fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10());
 243       }
 244       __ call(fn);
 245       __ mv(ra, x9);
 246       break;
 247     case Interpreter::java_lang_math_exp :
 248       entry_point = __ pc();
 249       __ fld(f10, Address(esp));
 250       __ mv(sp, x19_sender_sp);
 251       __ mv(x9, ra);
 252       if (StubRoutines::dexp() == nullptr) {
 253         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 254       } else {
 255         fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp());
 256       }
 257       __ call(fn);
 258       __ mv(ra, x9);
 259       break;
 260     case Interpreter::java_lang_math_pow :
 261       entry_point = __ pc();
 262       __ mv(x9, ra);
 263       __ fld(f10, Address(esp, 2 * Interpreter::stackElementSize));
 264       __ fld(f11, Address(esp));
 265       __ mv(sp, x19_sender_sp);
 266       if (StubRoutines::dpow() == nullptr) {
 267         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 268       } else {
 269         fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow());
 270       }
 271       __ call(fn);
 272       __ mv(ra, x9);
 273       break;
 274     case Interpreter::java_lang_math_fmaD :
 275       if (UseFMA) {
 276         entry_point = __ pc();
 277         __ fld(f10, Address(esp, 4 * Interpreter::stackElementSize));
 278         __ fld(f11, Address(esp, 2 * Interpreter::stackElementSize));
 279         __ fld(f12, Address(esp));
 280         __ fmadd_d(f10, f10, f11, f12);
 281         __ mv(sp, x19_sender_sp); // Restore caller's SP
 282       }
 283       break;
 284     case Interpreter::java_lang_math_fmaF :
 285       if (UseFMA) {
 286         entry_point = __ pc();
 287         __ flw(f10, Address(esp, 2 * Interpreter::stackElementSize));
 288         __ flw(f11, Address(esp, Interpreter::stackElementSize));
 289         __ flw(f12, Address(esp));
 290         __ fmadd_s(f10, f10, f11, f12);
 291         __ mv(sp, x19_sender_sp); // Restore caller's SP
 292       }
 293       break;
 294     default:
 295       ;
 296   }
 297   if (entry_point != nullptr) {
 298     __ ret();
 299   }
 300 
 301   return entry_point;
 302 }
 303 
 304 // Abstract method entry
 305 // Attempt to execute abstract method. Throw exception
 306 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 307   // xmethod: Method*
 308   // x19_sender_sp: sender SP
 309 
 310   address entry_point = __ pc();
 311 
 312   // abstract method entry
 313 
 314   //  pop return address, reset last_sp to null
 315   __ empty_expression_stack();
 316   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
 317   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
 318 
 319   // throw exception
 320   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
 321                                      InterpreterRuntime::throw_AbstractMethodErrorWithMethod),
 322                                      xmethod);
 323   // the call_VM checks for exception, so we should never return here.
 324   __ should_not_reach_here();
 325 
 326   return entry_point;
 327 }
 328 
 329 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 330   address entry = __ pc();
 331 
 332 #ifdef ASSERT
 333   {
 334     Label L;
 335     __ ld(t0, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
 336     __ shadd(t0, t0, fp, t0, LogBytesPerWord);
 337     // maximal sp for current fp (stack grows negative)
 338     // check if frame is complete
 339     __ bge(t0, sp, L);
 340     __ stop ("interpreter frame not set up");
 341     __ bind(L);
 342   }
 343 #endif // ASSERT
 344   // Restore bcp under the assumption that the current frame is still
 345   // interpreted
 346   __ restore_bcp();
 347 
 348   // expression stack must be empty before entering the VM if an
 349   // exception happened
 350   __ empty_expression_stack();
 351   // throw exception
 352   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 353   return entry;
 354 }
 355 
 356 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
 357   address entry = __ pc();
 358   // expression stack must be empty before entering the VM if an
 359   // exception happened
 360   __ empty_expression_stack();
 361   // setup parameters
 362 
 363   // convention: expect aberrant index in register x11
 364   __ zero_extend(c_rarg2, x11, 32);
 365   // convention: expect array in register x13
 366   __ mv(c_rarg1, x13);
 367   __ call_VM(noreg,
 368              CAST_FROM_FN_PTR(address,
 369                               InterpreterRuntime::
 370                               throw_ArrayIndexOutOfBoundsException),
 371              c_rarg1, c_rarg2);
 372   return entry;
 373 }
 374 
 375 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 376   address entry = __ pc();
 377 
 378   // object is at TOS
 379   __ pop_reg(c_rarg1);
 380 
 381   // expression stack must be empty before entering the VM if an
 382   // exception happened
 383   __ empty_expression_stack();
 384 
 385   __ call_VM(noreg,
 386              CAST_FROM_FN_PTR(address,
 387                               InterpreterRuntime::
 388                               throw_ClassCastException),
 389              c_rarg1);
 390   return entry;
 391 }
 392 
 393 address TemplateInterpreterGenerator::generate_exception_handler_common(
 394   const char* name, const char* message, bool pass_oop) {
 395   assert(!pass_oop || message == nullptr, "either oop or message but not both");
 396   address entry = __ pc();
 397   if (pass_oop) {
 398     // object is at TOS
 399     __ pop_reg(c_rarg2);
 400   }
 401   // expression stack must be empty before entering the VM if an
 402   // exception happened
 403   __ empty_expression_stack();
 404   // setup parameters
 405   __ la(c_rarg1, Address((address)name));
 406   if (pass_oop) {
 407     __ call_VM(x10, CAST_FROM_FN_PTR(address,
 408                                      InterpreterRuntime::
 409                                      create_klass_exception),
 410                c_rarg1, c_rarg2);
 411   } else {
 412     // kind of lame ExternalAddress can't take null because
 413     // external_word_Relocation will assert.
 414     if (message != nullptr) {
 415       __ la(c_rarg2, Address((address)message));
 416     } else {
 417       __ mv(c_rarg2, NULL_WORD);
 418     }
 419     __ call_VM(x10,
 420                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
 421                c_rarg1, c_rarg2);
 422   }
 423   // throw exception
 424   __ j(RuntimeAddress(Interpreter::throw_exception_entry()));
 425   return entry;
 426 }
 427 
 428 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 429   address entry = __ pc();
 430 
 431   // Restore stack bottom in case i2c adjusted stack
 432   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 433   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
 434   // and null it as marker that esp is now tos until next java call
 435   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 436   __ restore_bcp();
 437   __ restore_locals();
 438   __ restore_constant_pool_cache();
 439   __ get_method(xmethod);
 440 
 441   if (state == atos) {
 442     Register obj = x10;
 443     Register mdp = x11;
 444     Register tmp = x12;
 445     __ ld(mdp, Address(xmethod, Method::method_data_offset()));
 446     __ profile_return_type(mdp, obj, tmp);
 447   }
 448 
 449   const Register cache = x11;
 450   const Register index = x12;
 451 
 452   if (index_size == sizeof(u4)) {
 453     __ load_resolved_indy_entry(cache, index);
 454     __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
 455     __ shadd(esp, cache, esp, t0, 3);
 456   } else {
 457     // Pop N words from the stack
 458     assert(index_size == sizeof(u2), "Can only be u2");
 459     __ load_method_entry(cache, index);
 460     __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
 461 
 462     __ shadd(esp, cache, esp, t0, 3);
 463   }
 464 
 465   // Restore machine SP
 466   __ restore_sp_after_call();
 467 
 468   __ check_and_handle_popframe(xthread);
 469   __ check_and_handle_earlyret(xthread);
 470 
 471   __ get_dispatch();
 472   __ dispatch_next(state, step);
 473 
 474   return entry;
 475 }
 476 
 477 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
 478                                                                int step,
 479                                                                address continuation) {
 480   address entry = __ pc();
 481   __ restore_bcp();
 482   __ restore_locals();
 483   __ restore_constant_pool_cache();
 484   __ get_method(xmethod);
 485   __ get_dispatch();
 486 
 487   __ restore_sp_after_call();  // Restore SP to extended SP
 488 
 489   // Restore expression stack pointer
 490   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 491   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
 492   // null last_sp until next java call
 493   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 494 
 495   // handle exceptions
 496   {
 497     Label L;
 498     __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
 499     __ beqz(t0, L);
 500     __ call_VM(noreg,
 501                CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 502     __ should_not_reach_here();
 503     __ bind(L);
 504   }
 505 
 506   if (continuation == nullptr) {
 507     __ dispatch_next(state, step);
 508   } else {
 509     __ jump_to_entry(continuation);
 510   }
 511   return entry;
 512 }
 513 
 514 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 515   address entry = __ pc();
 516   if (type == T_OBJECT) {
 517     // retrieve result from frame
 518     __ ld(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
 519     // and verify it
 520     __ verify_oop(x10);
 521   } else {
 522    __ cast_primitive_type(type, x10);
 523   }
 524 
 525   __ ret();                                  // return from result handler
 526   return entry;
 527 }
 528 
 529 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
 530                                                                 address runtime_entry) {
 531   assert_cond(runtime_entry != nullptr);
 532   address entry = __ pc();
 533   __ push(state);
 534   __ push_cont_fastpath(xthread);
 535   __ call_VM(noreg, runtime_entry);
 536   __ pop_cont_fastpath(xthread);
 537   __ membar(MacroAssembler::AnyAny);
 538   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 539   return entry;
 540 }
 541 
 542 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
 543   if (!Continuations::enabled()) return nullptr;
 544   address start = __ pc();
 545 
 546   __ restore_bcp();
 547   __ restore_locals();
 548 
 549   // Restore constant pool cache
 550   __ ld(xcpool, Address(fp, frame::interpreter_frame_cache_offset * wordSize));
 551 
 552   // Restore Java expression stack pointer
 553   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 554   __ shadd(esp, t0, fp, t0, Interpreter::logStackElementSize);
 555   // and NULL it as marker that esp is now tos until next java call
 556   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 557 
 558   // Restore machine SP
 559   __ ld(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize));
 560   __ shadd(sp, t0, fp, t0, LogBytesPerWord);
 561 
 562   // Restore method
 563   __ ld(xmethod, Address(fp, frame::interpreter_frame_method_offset * wordSize));
 564 
 565   // Restore dispatch
 566   __ la(xdispatch, ExternalAddress((address)Interpreter::dispatch_table()));
 567 
 568   __ ret();
 569 
 570   return start;
 571 }
 572 
 573 
 574 // Helpers for commoning out cases in the various type of method entries.
 575 //
 576 
 577 
 578 // increment invocation count & check for overflow
 579 //
 580 // Note: checking for negative value instead of overflow
 581 //       so we have a 'sticky' overflow test
 582 //
 583 // xmethod: method
 584 //
 585 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 586   Label done;
 587   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
 588   int increment = InvocationCounter::count_increment;
 589   Label no_mdo;
 590   if (ProfileInterpreter) {
 591     // Are we profiling?
 592     __ ld(x10, Address(xmethod, Method::method_data_offset()));
 593     __ beqz(x10, no_mdo);
 594     // Increment counter in the MDO
 595     const Address mdo_invocation_counter(x10, in_bytes(MethodData::invocation_counter_offset()) +
 596                                          in_bytes(InvocationCounter::counter_offset()));
 597     const Address mask(x10, in_bytes(MethodData::invoke_mask_offset()));
 598     __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, t0, t1, false, overflow);
 599     __ j(done);
 600   }
 601   __ bind(no_mdo);
 602   // Increment counter in MethodCounters
 603   const Address invocation_counter(t1,
 604                                    MethodCounters::invocation_counter_offset() +
 605                                    InvocationCounter::counter_offset());
 606   __ get_method_counters(xmethod, t1, done);
 607   const Address mask(t1, in_bytes(MethodCounters::invoke_mask_offset()));
 608   __ increment_mask_and_jump(invocation_counter, increment, mask, t0, x11, false, overflow);
 609   __ bind(done);
 610 }
 611 
 612 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 613   __ mv(c_rarg1, zr);
 614   __ call_VM(noreg,
 615              CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), c_rarg1);
 616   __ j(do_continue);
 617 }
 618 
 619 // See if we've got enough room on the stack for locals plus overhead
 620 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
 621 // without going through the signal handler, i.e., reserved and yellow zones
 622 // will not be made usable. The shadow zone must suffice to handle the
 623 // overflow.
 624 // The expression stack grows down incrementally, so the normal guard
 625 // page mechanism will work for that.
 626 //
 627 // NOTE: Since the additional locals are also always pushed (wasn't
 628 // obvious in generate_method_entry) so the guard should work for them
 629 // too.
 630 //
 631 // Args:
 632 //      x13: number of additional locals this frame needs (what we must check)
 633 //      xmethod: Method*
 634 //
 635 // Kills:
 636 //      x10
 637 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 638 
 639   // monitor entry size: see picture of stack set
 640   // (generate_method_entry) and frame_amd64.hpp
 641   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 642 
 643   // total overhead size: entry_size + (saved fp through expr stack
 644   // bottom).  be sure to change this if you add/subtract anything
 645   // to/from the overhead area
 646   const int overhead_size =
 647     -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
 648 
 649   const int page_size = (int)os::vm_page_size();
 650 
 651   Label after_frame_check;
 652 
 653   // see if the frame is greater than one page in size. If so,
 654   // then we need to verify there is enough stack space remaining
 655   // for the additional locals.
 656   __ mv(t0, (page_size - overhead_size) / Interpreter::stackElementSize);
 657   __ bleu(x13, t0, after_frame_check);
 658 
 659   // compute sp as if this were going to be the last frame on
 660   // the stack before the red zone
 661 
 662   // locals + overhead, in bytes
 663   __ mv(x10, overhead_size);
 664   __ shadd(x10, x13, x10, t0, Interpreter::logStackElementSize);  // 2 slots per parameter.
 665 
 666   const Address stack_limit(xthread, JavaThread::stack_overflow_limit_offset());
 667   __ ld(t0, stack_limit);
 668 
 669 #ifdef ASSERT
 670   Label limit_okay;
 671   // Verify that thread stack limit is non-zero.
 672   __ bnez(t0, limit_okay);
 673   __ stop("stack overflow limit is zero");
 674   __ bind(limit_okay);
 675 #endif
 676 
 677   // Add stack limit to locals.
 678   __ add(x10, x10, t0);
 679 
 680   // Check against the current stack bottom.
 681   __ bgtu(sp, x10, after_frame_check);
 682 
 683   // Remove the incoming args, peeling the machine SP back to where it
 684   // was in the caller.  This is not strictly necessary, but unless we
 685   // do so the stack frame may have a garbage FP; this ensures a
 686   // correct call stack that we can always unwind.  The ANDI should be
 687   // unnecessary because the sender SP in x19 is always aligned, but
 688   // it doesn't hurt.
 689   __ andi(sp, x19_sender_sp, -16);
 690 
 691   // Note: the restored frame is not necessarily interpreted.
 692   // Use the shared runtime version of the StackOverflowError.
 693   assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
 694   __ far_jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
 695 
 696   // all done with frame size check
 697   __ bind(after_frame_check);
 698 }
 699 
 700 // Allocate monitor and lock method (asm interpreter)
 701 //
 702 // Args:
 703 //      xmethod: Method*
 704 //      xlocals: locals
 705 //
 706 // Kills:
 707 //      x10
 708 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
 709 //      t0, t1 (temporary regs)
 710 void TemplateInterpreterGenerator::lock_method() {
 711   // synchronize method
 712   const Address access_flags(xmethod, Method::access_flags_offset());
 713   const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 714   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 715 
 716 #ifdef ASSERT
 717   __ lwu(x10, access_flags);
 718   __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method doesn't need synchronization", false);
 719 #endif // ASSERT
 720 
 721   // get synchronization object
 722   {
 723     Label done;
 724     __ lwu(x10, access_flags);
 725     __ andi(t0, x10, JVM_ACC_STATIC);
 726     // get receiver (assume this is frequent case)
 727     __ ld(x10, Address(xlocals, Interpreter::local_offset_in_bytes(0)));
 728     __ beqz(t0, done);
 729     __ load_mirror(x10, xmethod, x15, t1);
 730 
 731 #ifdef ASSERT
 732     {
 733       Label L;
 734       __ bnez(x10, L);
 735       __ stop("synchronization object is null");
 736       __ bind(L);
 737     }
 738 #endif // ASSERT
 739 
 740     __ bind(done);
 741   }
 742 
 743   // add space for monitor & lock
 744   __ check_extended_sp();
 745   __ add(sp, sp, - entry_size); // add space for a monitor entry
 746   __ add(esp, esp, - entry_size);
 747   __ sub(t0, sp, fp);
 748   __ srai(t0, t0, Interpreter::logStackElementSize);
 749   __ sd(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize));
 750   __ sub(t0, esp, fp);
 751   __ srai(t0, t0, Interpreter::logStackElementSize);
 752   __ sd(t0, monitor_block_top);  // set new monitor block top
 753   // store object
 754   __ sd(x10, Address(esp, BasicObjectLock::obj_offset()));
 755   __ mv(c_rarg1, esp); // object address
 756   __ lock_object(c_rarg1);
 757 }
 758 
 759 // Generate a fixed interpreter frame. This is identical setup for
 760 // interpreted methods and for native methods hence the shared code.
 761 //
 762 // Args:
 763 //      ra: return address
 764 //      xmethod: Method*
 765 //      xlocals: pointer to locals
 766 //      xcpool: cp cache
 767 //      stack_pointer: previous sp
 768 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 769   // initialize fixed part of activation frame
 770   if (native_call) {
 771     __ add(esp, sp, - 14 * wordSize);
 772     __ mv(xbcp, zr);
 773     __ add(sp, sp, - 14 * wordSize);
 774     // add 2 zero-initialized slots for native calls
 775     __ sd(zr, Address(sp, 13 * wordSize));
 776     __ sd(zr, Address(sp, 12 * wordSize));
 777   } else {
 778     __ add(esp, sp, - 12 * wordSize);
 779     __ ld(t0, Address(xmethod, Method::const_offset()));     // get ConstMethod
 780     __ add(xbcp, t0, in_bytes(ConstMethod::codes_offset())); // get codebase
 781     __ add(sp, sp, - 12 * wordSize);
 782   }
 783   __ sd(xbcp, Address(sp, wordSize));
 784   __ mv(t0, frame::interpreter_frame_initial_sp_offset);
 785   __ sd(t0, Address(sp, 0));
 786 
 787   if (ProfileInterpreter) {
 788     Label method_data_continue;
 789     __ ld(t0, Address(xmethod, Method::method_data_offset()));
 790     __ beqz(t0, method_data_continue);
 791     __ la(t0, Address(t0, in_bytes(MethodData::data_offset())));
 792     __ bind(method_data_continue);
 793   }
 794 
 795   __ sd(xmethod, Address(sp, 7 * wordSize));
 796   __ sd(ProfileInterpreter ? t0 : zr, Address(sp, 6 * wordSize));
 797 
 798   __ sd(ra, Address(sp, 11 * wordSize));
 799   __ sd(fp, Address(sp, 10 * wordSize));
 800   __ la(fp, Address(sp, 12 * wordSize)); // include ra & fp
 801 
 802   __ ld(xcpool, Address(xmethod, Method::const_offset()));
 803   __ ld(xcpool, Address(xcpool, ConstMethod::constants_offset()));
 804   __ ld(xcpool, Address(xcpool, ConstantPool::cache_offset()));
 805   __ sd(xcpool, Address(sp, 3 * wordSize));
 806   __ sub(t0, xlocals, fp);
 807   __ srai(t0, t0, Interpreter::logStackElementSize);   // t0 = xlocals - fp();
 808   // Store relativized xlocals, see frame::interpreter_frame_locals().
 809   __ sd(t0, Address(sp, 2 * wordSize));
 810 
 811   // set sender sp
 812   // leave last_sp as null
 813   __ sd(x19_sender_sp, Address(sp, 9 * wordSize));
 814   __ sd(zr, Address(sp, 8 * wordSize));
 815 
 816   // Get mirror and store it in the frame as GC root for this Method*
 817   __ load_mirror(t2, xmethod, x15, t1);
 818   __ sd(t2, Address(sp, 4 * wordSize));
 819 
 820   if (!native_call) {
 821     __ ld(t0, Address(xmethod, Method::const_offset()));
 822     __ lhu(t0, Address(t0, ConstMethod::max_stack_offset()));
 823     __ add(t0, t0, MAX2(3, Method::extra_stack_entries()));
 824     __ slli(t0, t0, 3);
 825     __ sub(t0, sp, t0);
 826     __ andi(t0, t0, -16);
 827     __ sub(t1, t0, fp);
 828     __ srai(t1, t1, Interpreter::logStackElementSize);
 829     // Store extended SP
 830     __ sd(t1, Address(sp, 5 * wordSize));
 831     // Move SP out of the way
 832     __ mv(sp, t0);
 833   } else {
 834     // Make sure there is room for the exception oop pushed in case method throws
 835     // an exception (see TemplateInterpreterGenerator::generate_throw_exception())
 836     __ sub(t0, sp, 2 * wordSize);
 837     __ sub(t1, t0, fp);
 838     __ srai(t1, t1, Interpreter::logStackElementSize);
 839     __ sd(t1, Address(sp, 5 * wordSize));
 840     __ mv(sp, t0);
 841   }
 842 }
 843 
 844 // End of helpers
 845 
 846 // Various method entries
 847 //------------------------------------------------------------------------------------------------------------------------
 848 //
 849 //
 850 
 851 // Method entry for java.lang.ref.Reference.get.
 852 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 853   // Code: _aload_0, _getfield, _areturn
 854   // parameter size = 1
 855   //
 856   // The code that gets generated by this routine is split into 2 parts:
 857   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 858   //    2. The slow path - which is an expansion of the regular method entry.
 859   //
 860   // Notes:-
 861   // * In the G1 code we do not check whether we need to block for
 862   //   a safepoint. If G1 is enabled then we must execute the specialized
 863   //   code for Reference.get (except when the Reference object is null)
 864   //   so that we can log the value in the referent field with an SATB
 865   //   update buffer.
 866   //   If the code for the getfield template is modified so that the
 867   //   G1 pre-barrier code is executed when the current method is
 868   //   Reference.get() then going through the normal method entry
 869   //   will be fine.
 870   // * The G1 code can, however, check the receiver object (the instance
 871   //   of java.lang.Reference) and jump to the slow path if null. If the
 872   //   Reference object is null then we obviously cannot fetch the referent
 873   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 874   //   regular method entry code to generate the NPE.
 875   //
 876   // This code is based on generate_accessor_entry.
 877   //
 878   // xmethod: Method*
 879   // x19_sender_sp: senderSP must preserve for slow path, set SP to it on fast path
 880 
 881   // ra is live.  It must be saved around calls.
 882 
 883   address entry = __ pc();
 884 
 885   const int referent_offset = java_lang_ref_Reference::referent_offset();
 886   guarantee(referent_offset > 0, "referent offset not initialized");
 887 
 888   Label slow_path;
 889   const Register local_0 = c_rarg0;
 890   // Check if local 0 isn't null
 891   // If the receiver is null then it is OK to jump to the slow path.
 892   __ ld(local_0, Address(esp, 0));
 893   __ beqz(local_0, slow_path);
 894 
 895   // Load the value of the referent field.
 896   const Address field_address(local_0, referent_offset);
 897   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
 898   bs->load_at(_masm, IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ t0, /*tmp2*/ t1);
 899 
 900   // areturn
 901   __ andi(sp, x19_sender_sp, -16);  // done with stack
 902   __ ret();
 903 
 904   // generate a vanilla interpreter entry as the slow path
 905   __ bind(slow_path);
 906   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 907   return entry;
 908 }
 909 
 910 /**
 911  * Method entry for static native methods:
 912  *   int java.util.zip.CRC32.update(int crc, int b)
 913  */
 914 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
 915   // TODO: Unimplemented generate_CRC32_update_entry
 916   return nullptr;
 917 }
 918 
 919 /**
 920  * Method entry for static native methods:
 921  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
 922  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
 923  */
 924 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 925   // TODO: Unimplemented generate_CRC32_updateBytes_entry
 926   return nullptr;
 927 }
 928 
 929 /**
 930  * Method entry for intrinsic-candidate (non-native) methods:
 931  *   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
 932  *   int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
 933  * Unlike CRC32, CRC32C does not have any methods marked as native
 934  * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
 935  */
 936 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 937   // TODO: Unimplemented generate_CRC32C_updateBytes_entry
 938   return nullptr;
 939 }
 940 
 941 // Not supported
 942 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; }
 943 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; }
 944 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; }
 945 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; }
 946 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
 947 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
 948 
 949 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
 950   // See more discussion in stackOverflow.hpp.
 951 
 952   const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size());
 953   const int page_size = (int)os::vm_page_size();
 954   const int n_shadow_pages = shadow_zone_size / page_size;
 955 
 956 #ifdef ASSERT
 957   Label L_good_limit;
 958   __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
 959   __ bnez(t0, L_good_limit);
 960   __ stop("shadow zone safe limit is not initialized");
 961   __ bind(L_good_limit);
 962 
 963   Label L_good_watermark;
 964   __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 965   __ bnez(t0, L_good_watermark);
 966   __ stop("shadow zone growth watermark is not initialized");
 967   __ bind(L_good_watermark);
 968 #endif
 969 
 970   Label L_done;
 971 
 972   __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 973   __ bgtu(sp, t0, L_done);
 974 
 975   for (int p = 1; p <= n_shadow_pages; p++) {
 976     __ bang_stack_with_offset(p * page_size);
 977   }
 978 
 979   // Record the new watermark, but only if the update is above the safe limit.
 980   // Otherwise, the next time around the check above would pass the safe limit.
 981   __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
 982   __ bleu(sp, t0, L_done);
 983   __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 984 
 985   __ bind(L_done);
 986 }
 987 
 988 // Interpreter stub for calling a native method. (asm interpreter)
 989 // This sets up a somewhat different looking stack for calling the
 990 // native method than the typical interpreter frame setup.
 991 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
 992   // determine code generation flags
 993   bool inc_counter = UseCompiler || CountCompiledCalls;
 994 
 995   // x11: Method*
 996   // x30: sender sp
 997 
 998   address entry_point = __ pc();
 999 
1000   const Address constMethod       (xmethod, Method::const_offset());
1001   const Address access_flags      (xmethod, Method::access_flags_offset());
1002   const Address size_of_parameters(x12, ConstMethod::
1003                                    size_of_parameters_offset());
1004 
1005   // get parameter size (always needed)
1006   __ ld(x12, constMethod);
1007   __ load_unsigned_short(x12, size_of_parameters);
1008 
1009   // Native calls don't need the stack size check since they have no
1010   // expression stack and the arguments are already on the stack and
1011   // we only add a handful of words to the stack.
1012 
1013   // xmethod: Method*
1014   // x12: size of parameters
1015   // x30: sender sp
1016 
1017   // for natives the size of locals is zero
1018 
1019   // compute beginning of parameters (xlocals)
1020   __ shadd(xlocals, x12, esp, xlocals, 3);
1021   __ addi(xlocals, xlocals, -wordSize);
1022 
1023   // Pull SP back to minimum size: this avoids holes in the stack
1024   __ andi(sp, esp, -16);
1025 
1026   // initialize fixed part of activation frame
1027   generate_fixed_frame(true);
1028 
1029   // make sure method is native & not abstract
1030 #ifdef ASSERT
1031   __ lwu(x10, access_flags);
1032   __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute non-native method as native", false);
1033   __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter");
1034 #endif
1035 
1036   // Since at this point in the method invocation the exception
1037   // handler would try to exit the monitor of synchronized methods
1038   // which hasn't been entered yet, we set the thread local variable
1039   // _do_not_unlock_if_synchronized to true. The remove_activation
1040   // will check this flag.
1041 
1042   const Address do_not_unlock_if_synchronized(xthread,
1043                                               in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1044   __ mv(t1, true);
1045   __ sb(t1, do_not_unlock_if_synchronized);
1046 
1047   // increment invocation count & check for overflow
1048   Label invocation_counter_overflow;
1049   if (inc_counter) {
1050     generate_counter_incr(&invocation_counter_overflow);
1051   }
1052 
1053   Label continue_after_compile;
1054   __ bind(continue_after_compile);
1055 
1056   bang_stack_shadow_pages(true);
1057 
1058   // reset the _do_not_unlock_if_synchronized flag
1059   __ sb(zr, do_not_unlock_if_synchronized);
1060 
1061   // check for synchronized methods
1062   // Must happen AFTER invocation_counter check and stack overflow check,
1063   // so method is not locked if overflows.
1064   if (synchronized) {
1065     lock_method();
1066   } else {
1067     // no synchronization necessary
1068 #ifdef ASSERT
1069     __ lwu(x10, access_flags);
1070     __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization");
1071 #endif
1072   }
1073 
1074   // start execution
1075 #ifdef ASSERT
1076   __ verify_frame_setup();
1077 #endif
1078 
1079   // jvmti support
1080   __ notify_method_entry();
1081 
1082   // work registers
1083   const Register t = x18;
1084   const Register result_handler = x19;
1085 
1086   // allocate space for parameters
1087   __ ld(t, Address(xmethod, Method::const_offset()));
1088   __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
1089 
1090   __ slli(t, t, Interpreter::logStackElementSize);
1091   __ sub(x30, esp, t);
1092   __ andi(sp, x30, -16);
1093   __ mv(esp, x30);
1094 
1095   // get signature handler
1096   {
1097     Label L;
1098     __ ld(t, Address(xmethod, Method::signature_handler_offset()));
1099     __ bnez(t, L);
1100     __ call_VM(noreg,
1101                CAST_FROM_FN_PTR(address,
1102                                 InterpreterRuntime::prepare_native_call),
1103                xmethod);
1104     __ ld(t, Address(xmethod, Method::signature_handler_offset()));
1105     __ bind(L);
1106   }
1107 
1108   // call signature handler
1109   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == xlocals,
1110          "adjust this code");
1111   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1112          "adjust this code");
1113   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t0,
1114          "adjust this code");
1115 
1116   // The generated handlers do not touch xmethod (the method).
1117   // However, large signatures cannot be cached and are generated
1118   // each time here.  The slow-path generator can do a GC on return,
1119   // so we must reload it after the call.
1120   __ jalr(t);
1121   __ get_method(xmethod);        // slow path can do a GC, reload xmethod
1122 
1123 
1124   // result handler is in x10
1125   // set result handler
1126   __ mv(result_handler, x10);
1127   // Save it in the frame in case of preemption; we cannot rely on callee saved registers.
1128   __ sd(x10, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
1129 
1130   // pass mirror handle if static call
1131   {
1132     Label L;
1133     __ lwu(t, Address(xmethod, Method::access_flags_offset()));
1134     __ test_bit(t0, t, exact_log2(JVM_ACC_STATIC));
1135     __ beqz(t0, L);
1136     // get mirror
1137     __ load_mirror(t, xmethod, x28, t1);
1138     // copy mirror into activation frame
1139     __ sd(t, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
1140     // pass handle to mirror
1141     __ addi(c_rarg1, fp, frame::interpreter_frame_oop_temp_offset * wordSize);
1142     __ bind(L);
1143   }
1144 
1145   // get native function entry point in x28
1146   {
1147     Label L;
1148     __ ld(x28, Address(xmethod, Method::native_function_offset()));
1149     ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1150     __ la(t, unsatisfied);
1151     __ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8
1152 
1153     __ bne(x28, t1, L);
1154     __ call_VM(noreg,
1155                CAST_FROM_FN_PTR(address,
1156                                 InterpreterRuntime::prepare_native_call),
1157                xmethod);
1158     __ get_method(xmethod);
1159     __ ld(x28, Address(xmethod, Method::native_function_offset()));
1160     __ bind(L);
1161   }
1162 
1163   // pass JNIEnv
1164   __ add(c_rarg0, xthread, in_bytes(JavaThread::jni_environment_offset()));
1165 
1166   // It is enough that the pc() points into the right code
1167   // segment. It does not have to be the correct return pc.
1168   // For convenience we use the pc we want to resume to in
1169   // case of preemption on Object.wait.
1170   Label native_return;
1171   __ set_last_Java_frame(esp, fp, native_return, x30);
1172 
1173   // change thread state
1174 #ifdef ASSERT
1175   {
1176     Label L;
1177     __ lwu(t, Address(xthread, JavaThread::thread_state_offset()));
1178     __ addi(t0, zr, (u1)_thread_in_Java);
1179     __ beq(t, t0, L);
1180     __ stop("Wrong thread state in native stub");
1181     __ bind(L);
1182   }
1183 #endif
1184 
1185   // Change state to native
1186   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1187   __ mv(t0, _thread_in_native);
1188   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1189   __ sw(t0, Address(t1));
1190 
1191   __ push_cont_fastpath();
1192 
1193   // Call the native method.
1194   __ jalr(x28);
1195 
1196   __ pop_cont_fastpath();
1197 
1198   __ get_method(xmethod);
1199   // result potentially in x10 or f10
1200 
1201   // Restore cpu control state after JNI call
1202   __ restore_cpu_control_state_after_jni(t0);
1203 
1204   // make room for the pushes we're about to do
1205   __ sub(t0, esp, 4 * wordSize);
1206   __ andi(sp, t0, -16);
1207 
1208   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1209   // in order to extract the result of a method call. If the order of these
1210   // pushes change or anything else is added to the stack then the code in
1211   // interpreter_frame_result must also change.
1212   __ push(dtos);
1213   __ push(ltos);
1214 
1215   // change thread state
1216   // Force all preceding writes to be observed prior to thread state change
1217   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1218 
1219   __ mv(t0, _thread_in_native_trans);
1220   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1221 
1222   // Force this write out before the read below
1223   if (!UseSystemMemoryBarrier) {
1224     __ membar(MacroAssembler::AnyAny);
1225   }
1226 
1227   // check for safepoint operation in progress and/or pending suspend requests
1228   {
1229     Label L, Continue;
1230 
1231     // We need an acquire here to ensure that any subsequent load of the
1232     // global SafepointSynchronize::_state flag is ordered after this load
1233     // of the thread-local polling word. We don't want this poll to
1234     // return false (i.e. not safepointing) and a later poll of the global
1235     // SafepointSynchronize::_state spuriously to return true.
1236     //
1237     // This is to avoid a race when we're in a native->Java transition
1238     // racing the code which wakes up from a safepoint.
1239     __ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1240     __ lwu(t1, Address(xthread, JavaThread::suspend_flags_offset()));
1241     __ beqz(t1, Continue);
1242     __ bind(L);
1243 
1244     // Don't use call_VM as it will see a possible pending exception
1245     // and forward it and never return here preventing us from
1246     // clearing _last_native_pc down below. So we do a runtime call by
1247     // hand.
1248     //
1249     __ mv(c_rarg0, xthread);
1250     __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1251     __ get_method(xmethod);
1252     __ reinit_heapbase();
1253     __ bind(Continue);
1254   }
1255 
1256   // change thread state
1257   // Force all preceding writes to be observed prior to thread state change
1258   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1259 
1260   __ mv(t0, _thread_in_Java);
1261   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1262 
1263   if (LockingMode != LM_LEGACY) {
1264     // Check preemption for Object.wait()
1265     Label not_preempted;
1266     __ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1267     __ beqz(t1, not_preempted);
1268     __ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1269     __ jr(t1);
1270     __ bind(native_return);
1271     __ restore_after_resume(true /* is_native */);
1272     // reload result_handler
1273     __ ld(result_handler, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
1274     __ bind(not_preempted);
1275   } else {
1276     // any pc will do so just use this one for LM_LEGACY to keep code together.
1277     __ bind(native_return);
1278   }
1279 
1280   // reset_last_Java_frame
1281   __ reset_last_Java_frame(true);
1282 
1283   if (CheckJNICalls) {
1284     // clear_pending_jni_exception_check
1285     __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset()));
1286   }
1287 
1288   // reset handle block
1289   __ ld(t, Address(xthread, JavaThread::active_handles_offset()));
1290   __ sd(zr, Address(t, JNIHandleBlock::top_offset()));
1291 
1292   // If result is an oop unbox and store it in frame where gc will see it
1293   // and result handler will pick it up
1294 
1295   {
1296     Label no_oop;
1297     __ la(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1298     __ bne(t, result_handler, no_oop);
1299     // Unbox oop result, e.g. JNIHandles::resolve result.
1300     __ pop(ltos);
1301     __ resolve_jobject(x10, t, t1);
1302     __ sd(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
1303     // keep stack depth as expected by pushing oop which will eventually be discarded
1304     __ push(ltos);
1305     __ bind(no_oop);
1306   }
1307 
1308   {
1309     Label no_reguard;
1310     __ lwu(t0, Address(xthread, in_bytes(JavaThread::stack_guard_state_offset())));
1311     __ addi(t1, zr, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
1312     __ bne(t0, t1, no_reguard);
1313 
1314     __ push_call_clobbered_registers();
1315     __ mv(c_rarg0, xthread);
1316     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1317     __ pop_call_clobbered_registers();
1318     __ bind(no_reguard);
1319   }
1320 
1321   // The method register is junk from after the thread_in_native transition
1322   // until here.  Also can't call_VM until the bcp has been
1323   // restored.  Need bcp for throwing exception below so get it now.
1324   __ get_method(xmethod);
1325 
1326   // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
1327   // xbcp == code_base()
1328   __ ld(xbcp, Address(xmethod, Method::const_offset()));   // get ConstMethod*
1329   __ add(xbcp, xbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
1330   // handle exceptions (exception handling will handle unlocking!)
1331   {
1332     Label L;
1333     __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
1334     __ beqz(t0, L);
1335     // Note: At some point we may want to unify this with the code
1336     // used in call_VM_base(); i.e., we should use the
1337     // StubRoutines::forward_exception code. For now this doesn't work
1338     // here because the sp is not correctly set at this point.
1339     __ MacroAssembler::call_VM(noreg,
1340                                CAST_FROM_FN_PTR(address,
1341                                InterpreterRuntime::throw_pending_exception));
1342     __ should_not_reach_here();
1343     __ bind(L);
1344   }
1345 
1346   // do unlocking if necessary
1347   {
1348     Label L;
1349     __ lwu(t, Address(xmethod, Method::access_flags_offset()));
1350     __ test_bit(t0, t, exact_log2(JVM_ACC_SYNCHRONIZED));
1351     __ beqz(t0, L);
1352     // the code below should be shared with interpreter macro
1353     // assembler implementation
1354     {
1355       Label unlock;
1356       // BasicObjectLock will be first in list, since this is a
1357       // synchronized method. However, need to check that the object
1358       // has not been unlocked by an explicit monitorexit bytecode.
1359 
1360       // monitor expect in c_rarg1 for slow unlock path
1361       __ la(c_rarg1, Address(fp,   // address of first monitor
1362                              (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1363                                         wordSize - sizeof(BasicObjectLock))));
1364 
1365       __ ld(t, Address(c_rarg1, BasicObjectLock::obj_offset()));
1366       __ bnez(t, unlock);
1367 
1368       // Entry already unlocked, need to throw exception
1369       __ MacroAssembler::call_VM(noreg,
1370                                  CAST_FROM_FN_PTR(address,
1371                                                   InterpreterRuntime::throw_illegal_monitor_state_exception));
1372       __ should_not_reach_here();
1373 
1374       __ bind(unlock);
1375       __ unlock_object(c_rarg1);
1376     }
1377     __ bind(L);
1378   }
1379 
1380   // jvmti support
1381   // Note: This must happen _after_ handling/throwing any exceptions since
1382   //       the exception handler code notifies the runtime of method exits
1383   //       too. If this happens before, method entry/exit notifications are
1384   //       not properly paired (was bug - gri 11/22/99).
1385   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1386 
1387   __ pop(ltos);
1388   __ pop(dtos);
1389 
1390   __ jalr(result_handler);
1391 
1392   // remove activation
1393   __ ld(esp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1394   // remove frame anchor
1395   __ leave();
1396 
1397   // restore sender sp
1398   __ mv(sp, esp);
1399 
1400   __ ret();
1401 
1402   if (inc_counter) {
1403     // Handle overflow of counter and compile method
1404     __ bind(invocation_counter_overflow);
1405     generate_counter_overflow(continue_after_compile);
1406   }
1407 
1408   return entry_point;
1409 }
1410 
1411 //
1412 // Generic interpreted method entry to (asm) interpreter
1413 //
1414 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1415 
1416   // determine code generation flags
1417   const bool inc_counter  = UseCompiler || CountCompiledCalls;
1418 
1419   // t0: sender sp
1420   address entry_point = __ pc();
1421 
1422   const Address constMethod(xmethod, Method::const_offset());
1423   const Address access_flags(xmethod, Method::access_flags_offset());
1424   const Address size_of_parameters(x13,
1425                                    ConstMethod::size_of_parameters_offset());
1426   const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1427 
1428   // get parameter size (always needed)
1429   // need to load the const method first
1430   __ ld(x13, constMethod);
1431   __ load_unsigned_short(x12, size_of_parameters);
1432 
1433   // x12: size of parameters
1434 
1435   __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1436   __ sub(x13, x13, x12); // x13 = no. of additional locals
1437 
1438   // see if we've got enough room on the stack for locals plus overhead.
1439   generate_stack_overflow_check();
1440 
1441   // compute beginning of parameters (xlocals)
1442   __ shadd(xlocals, x12, esp, t1, 3);
1443   __ add(xlocals, xlocals, -wordSize);
1444 
1445   // Make room for additional locals
1446   __ slli(t1, x13, 3);
1447   __ sub(t0, esp, t1);
1448 
1449   // Padding between locals and fixed part of activation frame to ensure
1450   // SP is always 16-byte aligned.
1451   __ andi(sp, t0, -16);
1452 
1453   // x13 - # of additional locals
1454   // allocate space for locals
1455   // explicitly initialize locals
1456   {
1457     Label exit, loop;
1458     __ blez(x13, exit); // do nothing if x13 <= 0
1459     __ bind(loop);
1460     __ sd(zr, Address(t0));
1461     __ add(t0, t0, wordSize);
1462     __ add(x13, x13, -1); // until everything initialized
1463     __ bnez(x13, loop);
1464     __ bind(exit);
1465   }
1466 
1467   // And the base dispatch table
1468   __ get_dispatch();
1469 
1470   // initialize fixed part of activation frame
1471   generate_fixed_frame(false);
1472 
1473   // make sure method is not native & not abstract
1474 #ifdef ASSERT
1475   __ lwu(x10, access_flags);
1476   __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute native method as non-native");
1477   __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter");
1478 #endif
1479 
1480   // Since at this point in the method invocation the exception
1481   // handler would try to exit the monitor of synchronized methods
1482   // which hasn't been entered yet, we set the thread local variable
1483   // _do_not_unlock_if_synchronized to true. The remove_activation
1484   // will check this flag.
1485 
1486   const Address do_not_unlock_if_synchronized(xthread,
1487                                               in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1488   __ mv(t1, true);
1489   __ sb(t1, do_not_unlock_if_synchronized);
1490 
1491   Label no_mdp;
1492   const Register mdp = x13;
1493   __ ld(mdp, Address(xmethod, Method::method_data_offset()));
1494   __ beqz(mdp, no_mdp);
1495   __ add(mdp, mdp, in_bytes(MethodData::data_offset()));
1496   __ profile_parameters_type(mdp, x11, x12, x14); // use x11, x12, x14 as tmp registers
1497   __ bind(no_mdp);
1498 
1499   // increment invocation count & check for overflow
1500   Label invocation_counter_overflow;
1501   if (inc_counter) {
1502     generate_counter_incr(&invocation_counter_overflow);
1503   }
1504 
1505   Label continue_after_compile;
1506   __ bind(continue_after_compile);
1507 
1508   bang_stack_shadow_pages(false);
1509 
1510   // reset the _do_not_unlock_if_synchronized flag
1511   __ sb(zr, do_not_unlock_if_synchronized);
1512 
1513   // check for synchronized methods
1514   // Must happen AFTER invocation_counter check and stack overflow check,
1515   // so method is not locked if overflows.
1516   if (synchronized) {
1517     // Allocate monitor and lock method
1518     lock_method();
1519   } else {
1520     // no synchronization necessary
1521 #ifdef ASSERT
1522     __ lwu(x10, access_flags);
1523     __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization");
1524 #endif
1525   }
1526 
1527   // start execution
1528 #ifdef ASSERT
1529   __ verify_frame_setup();
1530 #endif
1531 
1532   // jvmti support
1533   __ notify_method_entry();
1534 
1535   __ dispatch_next(vtos);
1536 
1537   // invocation counter overflow
1538   if (inc_counter) {
1539     // Handle overflow of counter and compile method
1540     __ bind(invocation_counter_overflow);
1541     generate_counter_overflow(continue_after_compile);
1542   }
1543 
1544   return entry_point;
1545 }
1546 
1547 // Method entry for java.lang.Thread.currentThread
1548 address TemplateInterpreterGenerator::generate_currentThread() {
1549   address entry_point = __ pc();
1550 
1551   __ ld(x10, Address(xthread, JavaThread::vthread_offset()));
1552   __ resolve_oop_handle(x10, t0, t1);
1553   __ ret();
1554 
1555   return entry_point;
1556 }
1557 
1558 //-----------------------------------------------------------------------------
1559 // Exceptions
1560 
1561 void TemplateInterpreterGenerator::generate_throw_exception() {
1562   // Entry point in previous activation (i.e., if the caller was
1563   // interpreted)
1564   Interpreter::_rethrow_exception_entry = __ pc();
1565   // Restore sp to interpreter_frame_last_sp even though we are going
1566   // to empty the expression stack for the exception processing.
1567   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1568   // x10: exception
1569   // x13: return address/pc that threw exception
1570   __ restore_bcp();    // xbcp points to call/send
1571   __ restore_locals();
1572   __ restore_constant_pool_cache();
1573   __ reinit_heapbase();  // restore xheapbase as heapbase.
1574   __ get_dispatch();
1575 
1576   // Entry point for exceptions thrown within interpreter code
1577   Interpreter::_throw_exception_entry = __ pc();
1578   // If we came here via a NullPointerException on the receiver of a
1579   // method, xthread may be corrupt.
1580   __ get_method(xmethod);
1581   // expression stack is undefined here
1582   // x10: exception
1583   // xbcp: exception bcp
1584   __ verify_oop(x10);
1585   __ mv(c_rarg1, x10);
1586 
1587   // expression stack must be empty before entering the VM in case of
1588   // an exception
1589   __ empty_expression_stack();
1590   // find exception handler address and preserve exception oop
1591   __ call_VM(x13,
1592              CAST_FROM_FN_PTR(address,
1593                           InterpreterRuntime::exception_handler_for_exception),
1594              c_rarg1);
1595 
1596   // Restore machine SP
1597   __ restore_sp_after_call();
1598 
1599   // x10: exception handler entry point
1600   // x13: preserved exception oop
1601   // xbcp: bcp for exception handler
1602   __ push_ptr(x13); // push exception which is now the only value on the stack
1603   __ jr(x10); // jump to exception handler (may be _remove_activation_entry!)
1604 
1605   // If the exception is not handled in the current frame the frame is
1606   // removed and the exception is rethrown (i.e. exception
1607   // continuation is _rethrow_exception).
1608   //
1609   // Note: At this point the bci is still the bxi for the instruction
1610   // which caused the exception and the expression stack is
1611   // empty. Thus, for any VM calls at this point, GC will find a legal
1612   // oop map (with empty expression stack).
1613 
1614   //
1615   // JVMTI PopFrame support
1616   //
1617 
1618   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1619   __ empty_expression_stack();
1620   // Set the popframe_processing bit in pending_popframe_condition
1621   // indicating that we are currently handling popframe, so that
1622   // call_VMs that may happen later do not trigger new popframe
1623   // handling cycles.
1624   __ lwu(x13, Address(xthread, JavaThread::popframe_condition_offset()));
1625   __ ori(x13, x13, JavaThread::popframe_processing_bit);
1626   __ sw(x13, Address(xthread, JavaThread::popframe_condition_offset()));
1627 
1628   {
1629     // Check to see whether we are returning to a deoptimized frame.
1630     // (The PopFrame call ensures that the caller of the popped frame is
1631     // either interpreted or compiled and deoptimizes it if compiled.)
1632     // In this case, we can't call dispatch_next() after the frame is
1633     // popped, but instead must save the incoming arguments and restore
1634     // them after deoptimization has occurred.
1635     //
1636     // Note that we don't compare the return PC against the
1637     // deoptimization blob's unpack entry because of the presence of
1638     // adapter frames in C2.
1639     Label caller_not_deoptimized;
1640     __ ld(c_rarg1, Address(fp, frame::return_addr_offset * wordSize));
1641     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), c_rarg1);
1642     __ bnez(x10, caller_not_deoptimized);
1643 
1644     // Compute size of arguments for saving when returning to
1645     // deoptimized caller
1646     __ get_method(x10);
1647     __ ld(x10, Address(x10, Method::const_offset()));
1648     __ load_unsigned_short(x10, Address(x10, in_bytes(ConstMethod::
1649                                                       size_of_parameters_offset())));
1650     __ slli(x10, x10, Interpreter::logStackElementSize);
1651     __ restore_locals();
1652     __ sub(xlocals, xlocals, x10);
1653     __ add(xlocals, xlocals, wordSize);
1654     // Save these arguments
1655     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1656                                            Deoptimization::
1657                                            popframe_preserve_args),
1658                           xthread, x10, xlocals);
1659 
1660     __ remove_activation(vtos,
1661                          /* throw_monitor_exception */ false,
1662                          /* install_monitor_exception */ false,
1663                          /* notify_jvmdi */ false);
1664 
1665     // Inform deoptimization that it is responsible for restoring
1666     // these arguments
1667     __ mv(t0, JavaThread::popframe_force_deopt_reexecution_bit);
1668     __ sw(t0, Address(xthread, JavaThread::popframe_condition_offset()));
1669 
1670     // Continue in deoptimization handler
1671     __ ret();
1672 
1673     __ bind(caller_not_deoptimized);
1674   }
1675 
1676   __ remove_activation(vtos,
1677                        /* throw_monitor_exception */ false,
1678                        /* install_monitor_exception */ false,
1679                        /* notify_jvmdi */ false);
1680 
1681   // Restore the last_sp and null it out
1682   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1683   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
1684   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1685 
1686   __ restore_bcp();
1687   __ restore_locals();
1688   __ restore_constant_pool_cache();
1689   __ get_method(xmethod);
1690   __ get_dispatch();
1691 
1692   // The method data pointer was incremented already during
1693   // call profiling. We have to restore the mdp for the current bcp.
1694   if (ProfileInterpreter) {
1695     __ set_method_data_pointer_for_bcp();
1696   }
1697 
1698   // Clear the popframe condition flag
1699   __ sw(zr, Address(xthread, JavaThread::popframe_condition_offset()));
1700   assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
1701 
1702 #if INCLUDE_JVMTI
1703   {
1704     Label L_done;
1705 
1706     __ lbu(t0, Address(xbcp, 0));
1707     __ mv(t1, Bytecodes::_invokestatic);
1708     __ bne(t1, t0, L_done);
1709 
1710     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1711     // Detect such a case in the InterpreterRuntime function and return the member name argument,or null.
1712 
1713     __ ld(c_rarg0, Address(xlocals, 0));
1714     __ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp);
1715 
1716     __ beqz(x10, L_done);
1717 
1718     __ sd(x10, Address(esp, 0));
1719     __ bind(L_done);
1720   }
1721 #endif // INCLUDE_JVMTI
1722 
1723   // Restore machine SP
1724   __ restore_sp_after_call();
1725 
1726   __ dispatch_next(vtos);
1727   // end of PopFrame support
1728 
1729   Interpreter::_remove_activation_entry = __ pc();
1730 
1731   // preserve exception over this code sequence
1732   __ pop_ptr(x10);
1733   __ sd(x10, Address(xthread, JavaThread::vm_result_offset()));
1734   // remove the activation (without doing throws on illegalMonitorExceptions)
1735   __ remove_activation(vtos, false, true, false);
1736   // restore exception
1737   __ get_vm_result(x10, xthread);
1738 
1739   // In between activations - previous activation type unknown yet
1740   // compute continuation point - the continuation point expects the
1741   // following registers set up:
1742   //
1743   // x10: exception
1744   // ra: return address/pc that threw exception
1745   // sp: expression stack of caller
1746   // fp: fp of caller
1747   // FIXME: There's no point saving ra here because VM calls don't trash it
1748   __ sub(sp, sp, 2 * wordSize);
1749   __ sd(x10, Address(sp, 0));                   // save exception
1750   __ sd(ra, Address(sp, wordSize));             // save return address
1751   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1752                                          SharedRuntime::exception_handler_for_return_address),
1753                         xthread, ra);
1754   __ mv(x11, x10);                              // save exception handler
1755   __ ld(x10, Address(sp, 0));                   // restore exception
1756   __ ld(ra, Address(sp, wordSize));             // restore return address
1757   __ add(sp, sp, 2 * wordSize);
1758   // We might be returning to a deopt handler that expects x13 to
1759   // contain the exception pc
1760   __ mv(x13, ra);
1761   // Note that an "issuing PC" is actually the next PC after the call
1762   __ jr(x11);                                   // jump to exception
1763                                                 // handler of caller
1764 }
1765 
1766 //
1767 // JVMTI ForceEarlyReturn support
1768 //
1769 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state)  {
1770   address entry = __ pc();
1771 
1772   __ restore_bcp();
1773   __ restore_locals();
1774   __ empty_expression_stack();
1775   __ load_earlyret_value(state);
1776 
1777   __ ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset()));
1778   Address cond_addr(t0, JvmtiThreadState::earlyret_state_offset());
1779 
1780   // Clear the earlyret state
1781   assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
1782   __ sd(zr, cond_addr);
1783 
1784   __ remove_activation(state,
1785                        false, /* throw_monitor_exception */
1786                        false, /* install_monitor_exception */
1787                        true); /* notify_jvmdi */
1788   __ ret();
1789 
1790   return entry;
1791 }
1792 // end of ForceEarlyReturn support
1793 
1794 //-----------------------------------------------------------------------------
1795 // Helper for vtos entry point generation
1796 
1797 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1798                                                          address& bep,
1799                                                          address& cep,
1800                                                          address& sep,
1801                                                          address& aep,
1802                                                          address& iep,
1803                                                          address& lep,
1804                                                          address& fep,
1805                                                          address& dep,
1806                                                          address& vep) {
1807   assert(t != nullptr && t->is_valid() && t->tos_in() == vtos, "illegal template");
1808   Label L;
1809   aep = __ pc();     // atos entry point
1810       __ push_ptr();
1811       __ j(L);
1812   fep = __ pc();     // ftos entry point
1813       __ push_f();
1814       __ j(L);
1815   dep = __ pc();     // dtos entry point
1816       __ push_d();
1817       __ j(L);
1818   lep = __ pc();     // ltos entry point
1819       __ push_l();
1820       __ j(L);
1821   bep = cep = sep = iep = __ pc();     // [bcsi]tos entry point
1822       __ push_i();
1823   vep = __ pc();     // vtos entry point
1824   __ bind(L);
1825   generate_and_dispatch(t);
1826 }
1827 
1828 //-----------------------------------------------------------------------------
1829 
1830 void TemplateInterpreterGenerator::count_bytecode() {
1831   __ mv(x7, (address) &BytecodeCounter::_counter_value);
1832   __ atomic_addw(noreg, 1, x7);
1833 }
1834 
1835 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1836   __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1837   __ atomic_addw(noreg, 1, x7);
1838 }
1839 
1840 // Non-product code
1841 #ifndef PRODUCT
1842 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1843   address entry = __ pc();
1844 
1845   __ push_reg(ra);
1846   __ push(state);
1847   __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1848   __ mv(c_rarg2, x10);  // Pass itos
1849   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1850   __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1851   __ pop(state);
1852   __ pop_reg(ra);
1853   __ ret();                                   // return from result handler
1854 
1855   return entry;
1856 }
1857 
1858 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1859   // Calculate new index for counter:
1860   //   _index = (_index >> log2_number_of_codes) |
1861   //            (bytecode << log2_number_of_codes);
1862   Register index_addr = t1;
1863   Register index = t0;
1864   __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1865   __ lw(index, index_addr);
1866   __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1867   __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1868   __ orrw(index, x7, index);
1869   __ sw(index, index_addr);
1870   // Bump bucket contents:
1871   //   _counters[_index] ++;
1872   Register counter_addr = t1;
1873   __ mv(x7, (address) &BytecodePairHistogram::_counters);
1874   __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1875   __ atomic_addw(noreg, 1, counter_addr);
1876  }
1877 
1878 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1879   // Call a little run-time stub to avoid blow-up for each bytecode.
1880   // The run-time runtime saves the right registers, depending on
1881   // the tosca in-state for the given template.
1882 
1883   assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated");
1884   __ rt_call(Interpreter::trace_code(t->tos_in()));
1885   __ reinit_heapbase();
1886 }
1887 
1888 void TemplateInterpreterGenerator::stop_interpreter_at() {
1889   Label L;
1890   __ push_reg(t0);
1891   __ mv(t0, (address) &BytecodeCounter::_counter_value);
1892   __ ld(t0, Address(t0));
1893   __ mv(t1, StopInterpreterAt);
1894   __ bne(t0, t1, L);
1895   __ ebreak();
1896   __ bind(L);
1897   __ pop_reg(t0);
1898 }
1899 
1900 #endif // !PRODUCT