1 /*
   2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "classfile/javaClasses.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interpreter/bytecodeHistogram.hpp"
  32 #include "interpreter/bytecodeTracer.hpp"
  33 #include "interpreter/interp_masm.hpp"
  34 #include "interpreter/interpreter.hpp"
  35 #include "interpreter/interpreterRuntime.hpp"
  36 #include "interpreter/templateInterpreterGenerator.hpp"
  37 #include "interpreter/templateTable.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/arrayOop.hpp"
  40 #include "oops/method.inline.hpp"
  41 #include "oops/methodData.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "oops/resolvedIndyEntry.hpp"
  44 #include "oops/resolvedMethodEntry.hpp"
  45 #include "prims/jvmtiExport.hpp"
  46 #include "prims/jvmtiThreadState.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/deoptimization.hpp"
  49 #include "runtime/frame.inline.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/jniHandles.hpp"
  52 #include "runtime/sharedRuntime.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "runtime/synchronizer.hpp"
  55 #include "runtime/timer.hpp"
  56 #include "runtime/vframeArray.hpp"
  57 #include "utilities/checkedCast.hpp"
  58 #include "utilities/debug.hpp"
  59 #include "utilities/powerOfTwo.hpp"
  60 #include <sys/types.h>
  61 
  62 #ifndef PRODUCT
  63 #include "oops/method.hpp"
  64 #endif // !PRODUCT
  65 
  66 // Size of interpreter code.  Increase if too small.  Interpreter will
  67 // fail with a guarantee ("not enough space for interpreter generation");
  68 // if too small.
  69 // Run with +PrintInterpreter to get the VM to print out the size.
  70 // Max size with JVMTI
  71 int TemplateInterpreter::InterpreterCodeSize = 256 * 1024;
  72 
  73 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  74 
  75 //-----------------------------------------------------------------------------
  76 
  77 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  78   address entry = __ pc();
  79 
  80   __ andi(esp, esp, -16);
  81   __ mv(c_rarg3, esp);
  82   // xmethod
  83   // xlocals
  84   // c_rarg3: first stack arg - wordSize
  85   // adjust sp
  86 
  87   __ subi(sp, c_rarg3, 18 * wordSize);
  88   __ subi(sp, sp, 2 * wordSize);
  89   __ sd(ra, Address(sp, 0));
  90 
  91   __ call_VM(noreg,
  92              CAST_FROM_FN_PTR(address,
  93                               InterpreterRuntime::slow_signature_handler),
  94              xmethod, xlocals, c_rarg3);
  95 
  96   // x10: result handler
  97 
  98   // Stack layout:
  99   // sp: return address           <- sp
 100   //      1 garbage
 101   //      8 integer args (if static first is unused)
 102   //      1 float/double identifiers
 103   //      8 double args
 104   //        stack args              <- esp
 105   //        garbage
 106   //        expression stack bottom
 107   //        bcp (null)
 108   //        ...
 109 
 110   // Restore ra
 111   __ ld(ra, Address(sp, 0));
 112   __ addi(sp, sp , 2 * wordSize);
 113 
 114   // Do FP first so we can use c_rarg3 as temp
 115   __ lwu(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers
 116 
 117   for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
 118     const FloatRegister r = g_FPArgReg[i];
 119     Label d, done;
 120 
 121     __ test_bit(t0, c_rarg3, i);
 122     __ bnez(t0, d);
 123     __ flw(r, Address(sp, (10 + i) * wordSize));
 124     __ j(done);
 125     __ bind(d);
 126     __ fld(r, Address(sp, (10 + i) * wordSize));
 127     __ bind(done);
 128   }
 129 
 130   // c_rarg0 contains the result from the call of
 131   // InterpreterRuntime::slow_signature_handler so we don't touch it
 132   // here.  It will be loaded with the JNIEnv* later.
 133   for (int i = 1; i < Argument::n_int_register_parameters_c; i++) {
 134     const Register rm = g_INTArgReg[i];
 135     __ ld(rm, Address(sp, i * wordSize));
 136   }
 137 
 138   __ addi(sp, sp, 18 * wordSize);
 139   __ ret();
 140 
 141   return entry;
 142 }
 143 
 144 // Various method entries
 145 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 146   // xmethod: Method*
 147   // x19_sender_sp: sender sp
 148   // esp: args
 149 
 150   // These don't need a safepoint check because they aren't virtually
 151   // callable. We won't enter these intrinsics from compiled code.
 152   // If in the future we added an intrinsic which was virtually callable
 153   // we'd have to worry about how to safepoint so that this code is used.
 154 
 155   // mathematical functions inlined by compiler
 156   // (interpreter must provide identical implementation
 157   // in order to avoid monotonicity bugs when switching
 158   // from interpreter to compiler in the middle of some
 159   // computation)
 160   //
 161   // stack:
 162   //        [ arg ] <-- esp
 163   //        [ arg ]
 164   // retaddr in ra
 165 
 166   address fn = nullptr;
 167   address entry_point = nullptr;
 168   switch (kind) {
 169     case Interpreter::java_lang_math_abs:
 170       entry_point = __ pc();
 171       __ fld(f10, Address(esp));
 172       __ fabs_d(f10, f10);
 173       __ mv(sp, x19_sender_sp); // Restore caller's SP
 174       break;
 175     case Interpreter::java_lang_math_sqrt:
 176       entry_point = __ pc();
 177       __ fld(f10, Address(esp));
 178       __ fsqrt_d(f10, f10);
 179       __ mv(sp, x19_sender_sp);
 180       break;
 181     case Interpreter::java_lang_math_sin :
 182       entry_point = __ pc();
 183       __ fld(f10, Address(esp));
 184       __ mv(sp, x19_sender_sp);
 185       __ mv(x9, ra);
 186       if (StubRoutines::dsin() == nullptr) {
 187         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 188       } else {
 189         fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin());
 190       }
 191       __ rt_call(fn);
 192       __ mv(ra, x9);
 193       break;
 194     case Interpreter::java_lang_math_cos :
 195       entry_point = __ pc();
 196       __ fld(f10, Address(esp));
 197       __ mv(sp, x19_sender_sp);
 198       __ mv(x9, ra);
 199       if (StubRoutines::dcos() == nullptr) {
 200         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 201       } else {
 202         fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos());
 203       }
 204       __ rt_call(fn);
 205       __ mv(ra, x9);
 206       break;
 207     case Interpreter::java_lang_math_tan :
 208       entry_point = __ pc();
 209       __ fld(f10, Address(esp));
 210       __ mv(sp, x19_sender_sp);
 211       __ mv(x9, ra);
 212       if (StubRoutines::dtan() == nullptr) {
 213         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 214       } else {
 215         fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan());
 216       }
 217       __ rt_call(fn);
 218       __ mv(ra, x9);
 219       break;
 220     case Interpreter::java_lang_math_log :
 221       entry_point = __ pc();
 222       __ fld(f10, Address(esp));
 223       __ mv(sp, x19_sender_sp);
 224       __ mv(x9, ra);
 225       if (StubRoutines::dlog() == nullptr) {
 226         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 227       } else {
 228         fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog());
 229       }
 230       __ rt_call(fn);
 231       __ mv(ra, x9);
 232       break;
 233     case Interpreter::java_lang_math_log10 :
 234       entry_point = __ pc();
 235       __ fld(f10, Address(esp));
 236       __ mv(sp, x19_sender_sp);
 237       __ mv(x9, ra);
 238       if (StubRoutines::dlog10() == nullptr) {
 239         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 240       } else {
 241         fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10());
 242       }
 243       __ rt_call(fn);
 244       __ mv(ra, x9);
 245       break;
 246     case Interpreter::java_lang_math_exp :
 247       entry_point = __ pc();
 248       __ fld(f10, Address(esp));
 249       __ mv(sp, x19_sender_sp);
 250       __ mv(x9, ra);
 251       if (StubRoutines::dexp() == nullptr) {
 252         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 253       } else {
 254         fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp());
 255       }
 256       __ rt_call(fn);
 257       __ mv(ra, x9);
 258       break;
 259     case Interpreter::java_lang_math_pow :
 260       entry_point = __ pc();
 261       __ mv(x9, ra);
 262       __ fld(f10, Address(esp, 2 * Interpreter::stackElementSize));
 263       __ fld(f11, Address(esp));
 264       __ mv(sp, x19_sender_sp);
 265       if (StubRoutines::dpow() == nullptr) {
 266         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 267       } else {
 268         fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow());
 269       }
 270       __ rt_call(fn);
 271       __ mv(ra, x9);
 272       break;
 273     case Interpreter::java_lang_math_fmaD :
 274       if (UseFMA) {
 275         entry_point = __ pc();
 276         __ fld(f10, Address(esp, 4 * Interpreter::stackElementSize));
 277         __ fld(f11, Address(esp, 2 * Interpreter::stackElementSize));
 278         __ fld(f12, Address(esp));
 279         __ fmadd_d(f10, f10, f11, f12);
 280         __ mv(sp, x19_sender_sp); // Restore caller's SP
 281       }
 282       break;
 283     case Interpreter::java_lang_math_fmaF :
 284       if (UseFMA) {
 285         entry_point = __ pc();
 286         __ flw(f10, Address(esp, 2 * Interpreter::stackElementSize));
 287         __ flw(f11, Address(esp, Interpreter::stackElementSize));
 288         __ flw(f12, Address(esp));
 289         __ fmadd_s(f10, f10, f11, f12);
 290         __ mv(sp, x19_sender_sp); // Restore caller's SP
 291       }
 292       break;
 293     default:
 294       ;
 295   }
 296   if (entry_point != nullptr) {
 297     __ ret();
 298   }
 299 
 300   return entry_point;
 301 }
 302 
 303 // Abstract method entry
 304 // Attempt to execute abstract method. Throw exception
 305 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 306   // xmethod: Method*
 307   // x19_sender_sp: sender SP
 308 
 309   address entry_point = __ pc();
 310 
 311   // abstract method entry
 312 
 313   //  pop return address, reset last_sp to null
 314   __ empty_expression_stack();
 315   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
 316   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
 317 
 318   // throw exception
 319   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
 320                                      InterpreterRuntime::throw_AbstractMethodErrorWithMethod),
 321                                      xmethod);
 322   // the call_VM checks for exception, so we should never return here.
 323   __ should_not_reach_here();
 324 
 325   return entry_point;
 326 }
 327 
 328 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 329   address entry = __ pc();
 330 
 331 #ifdef ASSERT
 332   {
 333     Label L;
 334     __ ld(t0, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
 335     __ shadd(t0, t0, fp, t0, LogBytesPerWord);
 336     // maximal sp for current fp (stack grows negative)
 337     // check if frame is complete
 338     __ bge(t0, sp, L);
 339     __ stop ("interpreter frame not set up");
 340     __ bind(L);
 341   }
 342 #endif // ASSERT
 343   // Restore bcp under the assumption that the current frame is still
 344   // interpreted
 345   __ restore_bcp();
 346 
 347   // expression stack must be empty before entering the VM if an
 348   // exception happened
 349   __ empty_expression_stack();
 350   // throw exception
 351   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 352   return entry;
 353 }
 354 
 355 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
 356   address entry = __ pc();
 357   // expression stack must be empty before entering the VM if an
 358   // exception happened
 359   __ empty_expression_stack();
 360   // setup parameters
 361 
 362   // convention: expect aberrant index in register x11
 363   __ zext(c_rarg2, x11, 32);
 364   // convention: expect array in register x13
 365   __ mv(c_rarg1, x13);
 366   __ call_VM(noreg,
 367              CAST_FROM_FN_PTR(address,
 368                               InterpreterRuntime::
 369                               throw_ArrayIndexOutOfBoundsException),
 370              c_rarg1, c_rarg2);
 371   return entry;
 372 }
 373 
 374 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 375   address entry = __ pc();
 376 
 377   // object is at TOS
 378   __ pop_reg(c_rarg1);
 379 
 380   // expression stack must be empty before entering the VM if an
 381   // exception happened
 382   __ empty_expression_stack();
 383 
 384   __ call_VM(noreg,
 385              CAST_FROM_FN_PTR(address,
 386                               InterpreterRuntime::
 387                               throw_ClassCastException),
 388              c_rarg1);
 389   return entry;
 390 }
 391 
 392 address TemplateInterpreterGenerator::generate_exception_handler_common(
 393   const char* name, const char* message, bool pass_oop) {
 394   assert(!pass_oop || message == nullptr, "either oop or message but not both");
 395   address entry = __ pc();
 396   if (pass_oop) {
 397     // object is at TOS
 398     __ pop_reg(c_rarg2);
 399   }
 400   // expression stack must be empty before entering the VM if an
 401   // exception happened
 402   __ empty_expression_stack();
 403   // setup parameters
 404   __ la(c_rarg1, Address((address)name));
 405   if (pass_oop) {
 406     __ call_VM(x10, CAST_FROM_FN_PTR(address,
 407                                      InterpreterRuntime::
 408                                      create_klass_exception),
 409                c_rarg1, c_rarg2);
 410   } else {
 411     // kind of lame ExternalAddress can't take null because
 412     // external_word_Relocation will assert.
 413     if (message != nullptr) {
 414       __ la(c_rarg2, Address((address)message));
 415     } else {
 416       __ mv(c_rarg2, NULL_WORD);
 417     }
 418     __ call_VM(x10,
 419                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
 420                c_rarg1, c_rarg2);
 421   }
 422   // throw exception
 423   __ j(RuntimeAddress(Interpreter::throw_exception_entry()));
 424   return entry;
 425 }
 426 
 427 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 428   address entry = __ pc();
 429 
 430   // Restore stack bottom in case i2c adjusted stack
 431   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 432   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
 433   // and null it as marker that esp is now tos until next java call
 434   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 435   __ restore_bcp();
 436   __ restore_locals();
 437   __ restore_constant_pool_cache();
 438   __ get_method(xmethod);
 439 
 440   if (state == atos) {
 441     Register obj = x10;
 442     Register mdp = x11;
 443     Register tmp = x12;
 444     __ ld(mdp, Address(xmethod, Method::method_data_offset()));
 445     __ profile_return_type(mdp, obj, tmp);
 446   }
 447 
 448   const Register cache = x11;
 449   const Register index = x12;
 450 
 451   if (index_size == sizeof(u4)) {
 452     __ load_resolved_indy_entry(cache, index);
 453     __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
 454     __ shadd(esp, cache, esp, t0, 3);
 455   } else {
 456     // Pop N words from the stack
 457     assert(index_size == sizeof(u2), "Can only be u2");
 458     __ load_method_entry(cache, index);
 459     __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
 460 
 461     __ shadd(esp, cache, esp, t0, 3);
 462   }
 463 
 464   // Restore machine SP
 465   __ restore_sp_after_call();
 466 
 467   __ check_and_handle_popframe(xthread);
 468   __ check_and_handle_earlyret(xthread);
 469 
 470   __ get_dispatch();
 471   __ dispatch_next(state, step);
 472 
 473   return entry;
 474 }
 475 
 476 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
 477                                                                int step,
 478                                                                address continuation) {
 479   address entry = __ pc();
 480   __ restore_bcp();
 481   __ restore_locals();
 482   __ restore_constant_pool_cache();
 483   __ get_method(xmethod);
 484   __ get_dispatch();
 485 
 486   __ restore_sp_after_call();  // Restore SP to extended SP
 487 
 488   // Restore expression stack pointer
 489   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 490   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
 491   // null last_sp until next java call
 492   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 493 
 494   // handle exceptions
 495   {
 496     Label L;
 497     __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
 498     __ beqz(t0, L);
 499     __ call_VM(noreg,
 500                CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 501     __ should_not_reach_here();
 502     __ bind(L);
 503   }
 504 
 505   if (continuation == nullptr) {
 506     __ dispatch_next(state, step);
 507   } else {
 508     __ jump_to_entry(continuation);
 509   }
 510   return entry;
 511 }
 512 
 513 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 514   address entry = __ pc();
 515   if (type == T_OBJECT) {
 516     // retrieve result from frame
 517     __ ld(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
 518     // and verify it
 519     __ verify_oop(x10);
 520   } else {
 521    __ cast_primitive_type(type, x10);
 522   }
 523 
 524   __ ret();                                  // return from result handler
 525   return entry;
 526 }
 527 
 528 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
 529                                                                 address runtime_entry) {
 530   assert_cond(runtime_entry != nullptr);
 531   address entry = __ pc();
 532   __ push(state);
 533   __ push_cont_fastpath(xthread);
 534   __ call_VM(noreg, runtime_entry);
 535   __ pop_cont_fastpath(xthread);
 536   __ membar(MacroAssembler::AnyAny);
 537   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 538   return entry;
 539 }
 540 
 541 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
 542   if (!Continuations::enabled()) return nullptr;
 543   address start = __ pc();
 544 
 545   __ restore_bcp();
 546   __ restore_locals();
 547 
 548   // Restore constant pool cache
 549   __ ld(xcpool, Address(fp, frame::interpreter_frame_cache_offset * wordSize));
 550 
 551   // Restore Java expression stack pointer
 552   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 553   __ shadd(esp, t0, fp, t0, Interpreter::logStackElementSize);
 554   // and null it as marker that esp is now tos until next java call
 555   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 556 
 557   // Restore machine SP
 558   __ ld(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize));
 559   __ shadd(sp, t0, fp, t0, LogBytesPerWord);
 560 
 561   // Restore method
 562   __ ld(xmethod, Address(fp, frame::interpreter_frame_method_offset * wordSize));
 563 
 564   // Restore dispatch
 565   __ la(xdispatch, ExternalAddress((address)Interpreter::dispatch_table()));
 566 
 567   __ ret();
 568 
 569   return start;
 570 }
 571 
 572 
 573 // Helpers for commoning out cases in the various type of method entries.
 574 //
 575 
 576 
 577 // increment invocation count & check for overflow
 578 //
 579 // Note: checking for negative value instead of overflow
 580 //       so we have a 'sticky' overflow test
 581 //
 582 // xmethod: method
 583 //
 584 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 585   Label done;
 586   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
 587   int increment = InvocationCounter::count_increment;
 588   Label no_mdo;
 589   if (ProfileInterpreter) {
 590     // Are we profiling?
 591     __ ld(x10, Address(xmethod, Method::method_data_offset()));
 592     __ beqz(x10, no_mdo);
 593     // Increment counter in the MDO
 594     const Address mdo_invocation_counter(x10, in_bytes(MethodData::invocation_counter_offset()) +
 595                                          in_bytes(InvocationCounter::counter_offset()));
 596     const Address mask(x10, in_bytes(MethodData::invoke_mask_offset()));
 597     __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, t0, t1, false, overflow);
 598     __ j(done);
 599   }
 600   __ bind(no_mdo);
 601   // Increment counter in MethodCounters
 602   const Address invocation_counter(t1,
 603                                    MethodCounters::invocation_counter_offset() +
 604                                    InvocationCounter::counter_offset());
 605   __ get_method_counters(xmethod, t1, done);
 606   const Address mask(t1, in_bytes(MethodCounters::invoke_mask_offset()));
 607   __ increment_mask_and_jump(invocation_counter, increment, mask, t0, x11, false, overflow);
 608   __ bind(done);
 609 }
 610 
 611 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 612   __ mv(c_rarg1, zr);
 613   __ call_VM(noreg,
 614              CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), c_rarg1);
 615   __ j(do_continue);
 616 }
 617 
 618 // See if we've got enough room on the stack for locals plus overhead
 619 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
 620 // without going through the signal handler, i.e., reserved and yellow zones
 621 // will not be made usable. The shadow zone must suffice to handle the
 622 // overflow.
 623 // The expression stack grows down incrementally, so the normal guard
 624 // page mechanism will work for that.
 625 //
 626 // NOTE: Since the additional locals are also always pushed (wasn't
 627 // obvious in generate_method_entry) so the guard should work for them
 628 // too.
 629 //
 630 // Args:
 631 //      x13: number of additional locals this frame needs (what we must check)
 632 //      xmethod: Method*
 633 //
 634 // Kills:
 635 //      x10
 636 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 637 
 638   // monitor entry size: see picture of stack set
 639   // (generate_method_entry) and frame_amd64.hpp
 640   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 641 
 642   // total overhead size: entry_size + (saved fp through expr stack
 643   // bottom).  be sure to change this if you add/subtract anything
 644   // to/from the overhead area
 645   const int overhead_size =
 646     -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
 647 
 648   const int page_size = (int)os::vm_page_size();
 649 
 650   Label after_frame_check;
 651 
 652   // see if the frame is greater than one page in size. If so,
 653   // then we need to verify there is enough stack space remaining
 654   // for the additional locals.
 655   __ mv(t0, (page_size - overhead_size) / Interpreter::stackElementSize);
 656   __ bleu(x13, t0, after_frame_check);
 657 
 658   // compute sp as if this were going to be the last frame on
 659   // the stack before the red zone
 660 
 661   // locals + overhead, in bytes
 662   __ mv(x10, overhead_size);
 663   __ shadd(x10, x13, x10, t0, Interpreter::logStackElementSize);  // 2 slots per parameter.
 664 
 665   const Address stack_limit(xthread, JavaThread::stack_overflow_limit_offset());
 666   __ ld(t0, stack_limit);
 667 
 668 #ifdef ASSERT
 669   Label limit_okay;
 670   // Verify that thread stack limit is non-zero.
 671   __ bnez(t0, limit_okay);
 672   __ stop("stack overflow limit is zero");
 673   __ bind(limit_okay);
 674 #endif
 675 
 676   // Add stack limit to locals.
 677   __ add(x10, x10, t0);
 678 
 679   // Check against the current stack bottom.
 680   __ bgtu(sp, x10, after_frame_check);
 681 
 682   // Remove the incoming args, peeling the machine SP back to where it
 683   // was in the caller.  This is not strictly necessary, but unless we
 684   // do so the stack frame may have a garbage FP; this ensures a
 685   // correct call stack that we can always unwind.  The ANDI should be
 686   // unnecessary because the sender SP in x19 is always aligned, but
 687   // it doesn't hurt.
 688   __ andi(sp, x19_sender_sp, -16);
 689 
 690   // Note: the restored frame is not necessarily interpreted.
 691   // Use the shared runtime version of the StackOverflowError.
 692   assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
 693   __ far_jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
 694 
 695   // all done with frame size check
 696   __ bind(after_frame_check);
 697 }
 698 
 699 // Allocate monitor and lock method (asm interpreter)
 700 //
 701 // Args:
 702 //      xmethod: Method*
 703 //      xlocals: locals
 704 //
 705 // Kills:
 706 //      x10
 707 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
 708 //      t0, t1 (temporary regs)
 709 void TemplateInterpreterGenerator::lock_method() {
 710   // synchronize method
 711   const Address access_flags(xmethod, Method::access_flags_offset());
 712   const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 713   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 714 
 715 #ifdef ASSERT
 716   __ load_unsigned_short(x10, access_flags);
 717   __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method doesn't need synchronization", false);
 718 #endif // ASSERT
 719 
 720   // get synchronization object
 721   {
 722     Label done;
 723     __ load_unsigned_short(x10, access_flags);
 724     __ andi(t0, x10, JVM_ACC_STATIC);
 725     // get receiver (assume this is frequent case)
 726     __ ld(x10, Address(xlocals, Interpreter::local_offset_in_bytes(0)));
 727     __ beqz(t0, done);
 728     __ load_mirror(x10, xmethod, x15, t1);
 729 
 730 #ifdef ASSERT
 731     {
 732       Label L;
 733       __ bnez(x10, L);
 734       __ stop("synchronization object is null");
 735       __ bind(L);
 736     }
 737 #endif // ASSERT
 738 
 739     __ bind(done);
 740   }
 741 
 742   // add space for monitor & lock
 743   __ check_extended_sp();
 744   __ sub(sp, sp, entry_size); // add space for a monitor entry
 745   __ sub(esp, esp, entry_size);
 746   __ sub(t0, sp, fp);
 747   __ srai(t0, t0, Interpreter::logStackElementSize);
 748   __ sd(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize));
 749   __ sub(t0, esp, fp);
 750   __ srai(t0, t0, Interpreter::logStackElementSize);
 751   __ sd(t0, monitor_block_top);  // set new monitor block top
 752   // store object
 753   __ sd(x10, Address(esp, BasicObjectLock::obj_offset()));
 754   __ mv(c_rarg1, esp); // object address
 755   __ lock_object(c_rarg1);
 756 }
 757 
 758 // Generate a fixed interpreter frame. This is identical setup for
 759 // interpreted methods and for native methods hence the shared code.
 760 //
 761 // Args:
 762 //      ra: return address
 763 //      xmethod: Method*
 764 //      xlocals: pointer to locals
 765 //      xcpool: cp cache
 766 //      stack_pointer: previous sp
 767 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 768   // Save ConstMethod* in x15_const_method for later use to avoid loading multiple times
 769   Register x15_const_method = x15;
 770   __ ld(x15_const_method, Address(xmethod, Method::const_offset()));
 771 
 772   // initialize fixed part of activation frame
 773   if (native_call) {
 774     __ subi(esp, sp, 14 * wordSize);
 775     __ mv(xbcp, zr);
 776     __ subi(sp, sp, 14 * wordSize);
 777     // add 2 zero-initialized slots for native calls
 778     __ sd(zr, Address(sp, 13 * wordSize));
 779     __ sd(zr, Address(sp, 12 * wordSize));
 780   } else {
 781     __ subi(esp, sp, 12 * wordSize);
 782     __ add(xbcp, x15_const_method, in_bytes(ConstMethod::codes_offset())); // get codebase
 783     __ subi(sp, sp, 12 * wordSize);
 784   }
 785   __ sd(xbcp, Address(sp, wordSize));
 786   __ mv(t0, frame::interpreter_frame_initial_sp_offset);
 787   __ sd(t0, Address(sp, 0));
 788 
 789   if (ProfileInterpreter) {
 790     Label method_data_continue;
 791     __ ld(t0, Address(xmethod, Method::method_data_offset()));
 792     __ beqz(t0, method_data_continue);
 793     __ la(t0, Address(t0, in_bytes(MethodData::data_offset())));
 794     __ bind(method_data_continue);
 795   }
 796 
 797   __ sd(xmethod, Address(sp, 7 * wordSize));
 798   __ sd(ProfileInterpreter ? t0 : zr, Address(sp, 6 * wordSize));
 799 
 800   __ sd(ra, Address(sp, 11 * wordSize));
 801   __ sd(fp, Address(sp, 10 * wordSize));
 802   __ la(fp, Address(sp, 12 * wordSize)); // include ra & fp
 803 
 804   // Save ConstantPool* in x28_constants for later use to avoid loading multiple times
 805   Register x28_constants = x28;
 806   __ ld(x28_constants, Address(x15_const_method, ConstMethod::constants_offset()));
 807   __ ld(xcpool, Address(x28_constants, ConstantPool::cache_offset()));
 808   __ sd(xcpool, Address(sp, 3 * wordSize));
 809   __ sub(t0, xlocals, fp);
 810   __ srai(t0, t0, Interpreter::logStackElementSize);   // t0 = xlocals - fp();
 811   // Store relativized xlocals, see frame::interpreter_frame_locals().
 812   __ sd(t0, Address(sp, 2 * wordSize));
 813 
 814   // set sender sp
 815   // leave last_sp as null
 816   __ sd(x19_sender_sp, Address(sp, 9 * wordSize));
 817   __ sd(zr, Address(sp, 8 * wordSize));
 818 
 819   // Get mirror, Resolve ConstantPool* -> InstanceKlass* -> Java mirror
 820   // and store it in the frame as GC root for this Method*
 821   __ ld(t2, Address(x28_constants, ConstantPool::pool_holder_offset()));
 822   __ ld(t2, Address(t2, in_bytes(Klass::java_mirror_offset())));
 823   __ resolve_oop_handle(t2, t0, t1);
 824   __ sd(t2, Address(sp, 4 * wordSize));
 825 
 826   if (!native_call) {
 827     __ lhu(t0, Address(x15_const_method, ConstMethod::max_stack_offset()));
 828     __ add(t0, t0, MAX2(3, Method::extra_stack_entries()));
 829     __ slli(t0, t0, 3);
 830     __ sub(t0, sp, t0);
 831     __ andi(t0, t0, -16);
 832     __ sub(t1, t0, fp);
 833     __ srai(t1, t1, Interpreter::logStackElementSize);
 834     // Store extended SP
 835     __ sd(t1, Address(sp, 5 * wordSize));
 836     // Move SP out of the way
 837     __ mv(sp, t0);
 838   } else {
 839     // Make sure there is room for the exception oop pushed in case method throws
 840     // an exception (see TemplateInterpreterGenerator::generate_throw_exception())
 841     __ subi(t0, sp, 2 * wordSize);
 842     __ sub(t1, t0, fp);
 843     __ srai(t1, t1, Interpreter::logStackElementSize);
 844     __ sd(t1, Address(sp, 5 * wordSize));
 845     __ mv(sp, t0);
 846   }
 847 }
 848 
 849 // End of helpers
 850 
 851 // Various method entries
 852 //------------------------------------------------------------------------------------------------------------------------
 853 //
 854 //
 855 
 856 // Method entry for java.lang.ref.Reference.get.
 857 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 858   // Code: _aload_0, _getfield, _areturn
 859   // parameter size = 1
 860   //
 861   // The code that gets generated by this routine is split into 2 parts:
 862   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 863   //    2. The slow path - which is an expansion of the regular method entry.
 864   //
 865   // Notes:-
 866   // * In the G1 code we do not check whether we need to block for
 867   //   a safepoint. If G1 is enabled then we must execute the specialized
 868   //   code for Reference.get (except when the Reference object is null)
 869   //   so that we can log the value in the referent field with an SATB
 870   //   update buffer.
 871   //   If the code for the getfield template is modified so that the
 872   //   G1 pre-barrier code is executed when the current method is
 873   //   Reference.get() then going through the normal method entry
 874   //   will be fine.
 875   // * The G1 code can, however, check the receiver object (the instance
 876   //   of java.lang.Reference) and jump to the slow path if null. If the
 877   //   Reference object is null then we obviously cannot fetch the referent
 878   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 879   //   regular method entry code to generate the NPE.
 880   //
 881   // This code is based on generate_accessor_entry.
 882   //
 883   // xmethod: Method*
 884   // x19_sender_sp: senderSP must preserve for slow path, set SP to it on fast path
 885 
 886   // ra is live.  It must be saved around calls.
 887 
 888   address entry = __ pc();
 889 
 890   const int referent_offset = java_lang_ref_Reference::referent_offset();
 891   guarantee(referent_offset > 0, "referent offset not initialized");
 892 
 893   Label slow_path;
 894   const Register local_0 = c_rarg0;
 895   // Check if local 0 isn't null
 896   // If the receiver is null then it is OK to jump to the slow path.
 897   __ ld(local_0, Address(esp, 0));
 898   __ beqz(local_0, slow_path);
 899 
 900   // Load the value of the referent field.
 901   const Address field_address(local_0, referent_offset);
 902   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
 903   bs->load_at(_masm, IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ t0, /*tmp2*/ t1);
 904 
 905   // areturn
 906   __ andi(sp, x19_sender_sp, -16);  // done with stack
 907   __ ret();
 908 
 909   // generate a vanilla interpreter entry as the slow path
 910   __ bind(slow_path);
 911   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 912   return entry;
 913 }
 914 
 915 /**
 916  * Method entry for static native methods:
 917  *   int java.util.zip.CRC32.update(int crc, int b)
 918  */
 919 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
 920   // TODO: Unimplemented generate_CRC32_update_entry
 921   return nullptr;
 922 }
 923 
 924 /**
 925  * Method entry for static native methods:
 926  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
 927  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
 928  */
 929 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 930   // TODO: Unimplemented generate_CRC32_updateBytes_entry
 931   return nullptr;
 932 }
 933 
 934 /**
 935  * Method entry for intrinsic-candidate (non-native) methods:
 936  *   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
 937  *   int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
 938  * Unlike CRC32, CRC32C does not have any methods marked as native
 939  * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
 940  */
 941 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 942   // TODO: Unimplemented generate_CRC32C_updateBytes_entry
 943   return nullptr;
 944 }
 945 
 946 // Not supported
 947 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
 948 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
 949 
 950 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
 951   // See more discussion in stackOverflow.hpp.
 952 
 953   const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size());
 954   const int page_size = (int)os::vm_page_size();
 955   const int n_shadow_pages = shadow_zone_size / page_size;
 956 
 957 #ifdef ASSERT
 958   Label L_good_limit;
 959   __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
 960   __ bnez(t0, L_good_limit);
 961   __ stop("shadow zone safe limit is not initialized");
 962   __ bind(L_good_limit);
 963 
 964   Label L_good_watermark;
 965   __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 966   __ bnez(t0, L_good_watermark);
 967   __ stop("shadow zone growth watermark is not initialized");
 968   __ bind(L_good_watermark);
 969 #endif
 970 
 971   Label L_done;
 972 
 973   __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 974   __ bgtu(sp, t0, L_done);
 975 
 976   for (int p = 1; p <= n_shadow_pages; p++) {
 977     __ bang_stack_with_offset(p * page_size);
 978   }
 979 
 980   // Record the new watermark, but only if the update is above the safe limit.
 981   // Otherwise, the next time around the check above would pass the safe limit.
 982   __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
 983   __ bleu(sp, t0, L_done);
 984   __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 985 
 986   __ bind(L_done);
 987 }
 988 
 989 // Interpreter stub for calling a native method. (asm interpreter)
 990 // This sets up a somewhat different looking stack for calling the
 991 // native method than the typical interpreter frame setup.
 992 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
 993   // determine code generation flags
 994   bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
 995 
 996   // x11: Method*
 997   // x30: sender sp
 998 
 999   address entry_point = __ pc();
1000 
1001   const Address constMethod       (xmethod, Method::const_offset());
1002   const Address access_flags      (xmethod, Method::access_flags_offset());
1003   const Address size_of_parameters(x12, ConstMethod::
1004                                    size_of_parameters_offset());
1005 
1006   // get parameter size (always needed)
1007   __ ld(x12, constMethod);
1008   __ load_unsigned_short(x12, size_of_parameters);
1009 
1010   // Native calls don't need the stack size check since they have no
1011   // expression stack and the arguments are already on the stack and
1012   // we only add a handful of words to the stack.
1013 
1014   // xmethod: Method*
1015   // x12: size of parameters
1016   // x30: sender sp
1017 
1018   // for natives the size of locals is zero
1019 
1020   // compute beginning of parameters (xlocals)
1021   __ shadd(xlocals, x12, esp, xlocals, 3);
1022   __ subi(xlocals, xlocals, wordSize);
1023 
1024   // Pull SP back to minimum size: this avoids holes in the stack
1025   __ andi(sp, esp, -16);
1026 
1027   // initialize fixed part of activation frame
1028   generate_fixed_frame(true);
1029 
1030   // make sure method is native & not abstract
1031 #ifdef ASSERT
1032   __ load_unsigned_short(x10, access_flags);
1033   __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute non-native method as native", false);
1034   __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter");
1035 #endif
1036 
1037   // Since at this point in the method invocation the exception
1038   // handler would try to exit the monitor of synchronized methods
1039   // which hasn't been entered yet, we set the thread local variable
1040   // _do_not_unlock_if_synchronized to true. The remove_activation
1041   // will check this flag.
1042 
1043   const Address do_not_unlock_if_synchronized(xthread,
1044                                               in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1045   __ mv(t1, true);
1046   __ sb(t1, do_not_unlock_if_synchronized);
1047 
1048   // increment invocation count & check for overflow
1049   Label invocation_counter_overflow;
1050   if (inc_counter) {
1051     generate_counter_incr(&invocation_counter_overflow);
1052   }
1053 
1054   Label continue_after_compile;
1055   __ bind(continue_after_compile);
1056 
1057   bang_stack_shadow_pages(true);
1058 
1059   // reset the _do_not_unlock_if_synchronized flag
1060   __ sb(zr, do_not_unlock_if_synchronized);
1061 
1062   // check for synchronized methods
1063   // Must happen AFTER invocation_counter check and stack overflow check,
1064   // so method is not locked if overflows.
1065   if (synchronized) {
1066     lock_method();
1067   } else {
1068     // no synchronization necessary
1069 #ifdef ASSERT
1070     __ load_unsigned_short(x10, access_flags);
1071     __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization");
1072 #endif
1073   }
1074 
1075   // start execution
1076 #ifdef ASSERT
1077   __ verify_frame_setup();
1078 #endif
1079 
1080   // jvmti support
1081   __ notify_method_entry();
1082 
1083   // work registers
1084   const Register t = x18;
1085   const Register result_handler = x19;
1086 
1087   // allocate space for parameters
1088   __ ld(t, Address(xmethod, Method::const_offset()));
1089   __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
1090 
1091   __ slli(t, t, Interpreter::logStackElementSize);
1092   __ sub(x30, esp, t);
1093   __ andi(sp, x30, -16);
1094   __ mv(esp, x30);
1095 
1096   // get signature handler
1097   {
1098     Label L;
1099     __ ld(t, Address(xmethod, Method::signature_handler_offset()));
1100     __ bnez(t, L);
1101     __ call_VM(noreg,
1102                CAST_FROM_FN_PTR(address,
1103                                 InterpreterRuntime::prepare_native_call),
1104                xmethod);
1105     __ ld(t, Address(xmethod, Method::signature_handler_offset()));
1106     __ bind(L);
1107   }
1108 
1109   // call signature handler
1110   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == xlocals,
1111          "adjust this code");
1112   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1113          "adjust this code");
1114   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t0,
1115          "adjust this code");
1116 
1117   // The generated handlers do not touch xmethod (the method).
1118   // However, large signatures cannot be cached and are generated
1119   // each time here.  The slow-path generator can do a GC on return,
1120   // so we must reload it after the call.
1121   __ jalr(t);
1122   __ get_method(xmethod);        // slow path can do a GC, reload xmethod
1123 
1124 
1125   // result handler is in x10
1126   // set result handler
1127   __ mv(result_handler, x10);
1128   // Save it in the frame in case of preemption; we cannot rely on callee saved registers.
1129   __ sd(x10, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
1130 
1131   // pass mirror handle if static call
1132   {
1133     Label L;
1134     __ load_unsigned_short(t, Address(xmethod, Method::access_flags_offset()));
1135     __ test_bit(t0, t, exact_log2(JVM_ACC_STATIC));
1136     __ beqz(t0, L);
1137     // get mirror
1138     __ load_mirror(t, xmethod, x28, t1);
1139     // copy mirror into activation frame
1140     __ sd(t, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
1141     // pass handle to mirror
1142     __ addi(c_rarg1, fp, frame::interpreter_frame_oop_temp_offset * wordSize);
1143     __ bind(L);
1144   }
1145 
1146   // get native function entry point in x28
1147   {
1148     Label L;
1149     __ ld(x28, Address(xmethod, Method::native_function_offset()));
1150     ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1151     __ la(t, unsatisfied);
1152     __ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8
1153 
1154     __ bne(x28, t1, L);
1155     __ call_VM(noreg,
1156                CAST_FROM_FN_PTR(address,
1157                                 InterpreterRuntime::prepare_native_call),
1158                xmethod);
1159     __ get_method(xmethod);
1160     __ ld(x28, Address(xmethod, Method::native_function_offset()));
1161     __ bind(L);
1162   }
1163 
1164   // pass JNIEnv
1165   __ add(c_rarg0, xthread, in_bytes(JavaThread::jni_environment_offset()));
1166 
1167   // It is enough that the pc() points into the right code
1168   // segment. It does not have to be the correct return pc.
1169   // For convenience we use the pc we want to resume to in
1170   // case of preemption on Object.wait.
1171   Label native_return;
1172   __ set_last_Java_frame(esp, fp, native_return, x30);
1173 
1174   // change thread state
1175 #ifdef ASSERT
1176   {
1177     Label L;
1178     __ lwu(t, Address(xthread, JavaThread::thread_state_offset()));
1179     __ mv(t0, (u1)_thread_in_Java);
1180     __ beq(t, t0, L);
1181     __ stop("Wrong thread state in native stub");
1182     __ bind(L);
1183   }
1184 #endif
1185 
1186   // Change state to native
1187   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1188   __ mv(t0, _thread_in_native);
1189   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1190   __ sw(t0, Address(t1));
1191 
1192   __ push_cont_fastpath();
1193 
1194   // Call the native method.
1195   __ jalr(x28);
1196 
1197   __ pop_cont_fastpath();
1198 
1199   __ get_method(xmethod);
1200   // result potentially in x10 or f10
1201 
1202   // Restore cpu control state after JNI call
1203   __ restore_cpu_control_state_after_jni(t0);
1204 
1205   // make room for the pushes we're about to do
1206   __ subi(t0, esp, 4 * wordSize);
1207   __ andi(sp, t0, -16);
1208 
1209   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1210   // in order to extract the result of a method call. If the order of these
1211   // pushes change or anything else is added to the stack then the code in
1212   // interpreter_frame_result must also change.
1213   __ push(dtos);
1214   __ push(ltos);
1215 
1216   // change thread state
1217   // Force all preceding writes to be observed prior to thread state change
1218   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1219 
1220   __ mv(t0, _thread_in_native_trans);
1221   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1222 
1223   // Force this write out before the read below
1224   if (!UseSystemMemoryBarrier) {
1225     __ membar(MacroAssembler::AnyAny);
1226   }
1227 
1228   // check for safepoint operation in progress and/or pending suspend requests
1229   {
1230     Label L, Continue;
1231 
1232     // We need an acquire here to ensure that any subsequent load of the
1233     // global SafepointSynchronize::_state flag is ordered after this load
1234     // of the thread-local polling word. We don't want this poll to
1235     // return false (i.e. not safepointing) and a later poll of the global
1236     // SafepointSynchronize::_state spuriously to return true.
1237     //
1238     // This is to avoid a race when we're in a native->Java transition
1239     // racing the code which wakes up from a safepoint.
1240     __ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1241     __ lwu(t1, Address(xthread, JavaThread::suspend_flags_offset()));
1242     __ beqz(t1, Continue);
1243     __ bind(L);
1244 
1245     // Don't use call_VM as it will see a possible pending exception
1246     // and forward it and never return here preventing us from
1247     // clearing _last_native_pc down below. So we do a runtime call by
1248     // hand.
1249     //
1250     __ mv(c_rarg0, xthread);
1251     __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1252     __ get_method(xmethod);
1253     __ reinit_heapbase();
1254     __ bind(Continue);
1255   }
1256 
1257   // change thread state
1258   // Force all preceding writes to be observed prior to thread state change
1259   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1260 
1261   __ mv(t0, _thread_in_Java);
1262   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1263 
1264   if (LockingMode != LM_LEGACY) {
1265     // Check preemption for Object.wait()
1266     Label not_preempted;
1267     __ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1268     __ beqz(t1, not_preempted);
1269     __ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1270     __ jr(t1);
1271     __ bind(native_return);
1272     __ restore_after_resume(true /* is_native */);
1273     // reload result_handler
1274     __ ld(result_handler, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
1275     __ bind(not_preempted);
1276   } else {
1277     // any pc will do so just use this one for LM_LEGACY to keep code together.
1278     __ bind(native_return);
1279   }
1280 
1281   // reset_last_Java_frame
1282   __ reset_last_Java_frame(true);
1283 
1284   if (CheckJNICalls) {
1285     // clear_pending_jni_exception_check
1286     __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset()));
1287   }
1288 
1289   // reset handle block
1290   __ ld(t, Address(xthread, JavaThread::active_handles_offset()));
1291   __ sd(zr, Address(t, JNIHandleBlock::top_offset()));
1292 
1293   // If result is an oop unbox and store it in frame where gc will see it
1294   // and result handler will pick it up
1295 
1296   {
1297     Label no_oop;
1298     __ la(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1299     __ bne(t, result_handler, no_oop);
1300     // Unbox oop result, e.g. JNIHandles::resolve result.
1301     __ pop(ltos);
1302     __ resolve_jobject(x10, t, t1);
1303     __ sd(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
1304     // keep stack depth as expected by pushing oop which will eventually be discarded
1305     __ push(ltos);
1306     __ bind(no_oop);
1307   }
1308 
1309   {
1310     Label no_reguard;
1311     __ lwu(t0, Address(xthread, in_bytes(JavaThread::stack_guard_state_offset())));
1312     __ mv(t1, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
1313     __ bne(t0, t1, no_reguard);
1314 
1315     __ push_call_clobbered_registers();
1316     __ mv(c_rarg0, xthread);
1317     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1318     __ pop_call_clobbered_registers();
1319     __ bind(no_reguard);
1320   }
1321 
1322   // The method register is junk from after the thread_in_native transition
1323   // until here.  Also can't call_VM until the bcp has been
1324   // restored.  Need bcp for throwing exception below so get it now.
1325   __ get_method(xmethod);
1326 
1327   // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
1328   // xbcp == code_base()
1329   __ ld(xbcp, Address(xmethod, Method::const_offset()));   // get ConstMethod*
1330   __ add(xbcp, xbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
1331   // handle exceptions (exception handling will handle unlocking!)
1332   {
1333     Label L;
1334     __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
1335     __ beqz(t0, L);
1336     // Note: At some point we may want to unify this with the code
1337     // used in call_VM_base(); i.e., we should use the
1338     // StubRoutines::forward_exception code. For now this doesn't work
1339     // here because the sp is not correctly set at this point.
1340     __ MacroAssembler::call_VM(noreg,
1341                                CAST_FROM_FN_PTR(address,
1342                                InterpreterRuntime::throw_pending_exception));
1343     __ should_not_reach_here();
1344     __ bind(L);
1345   }
1346 
1347   // do unlocking if necessary
1348   {
1349     Label L;
1350     __ load_unsigned_short(t, Address(xmethod, Method::access_flags_offset()));
1351     __ test_bit(t0, t, exact_log2(JVM_ACC_SYNCHRONIZED));
1352     __ beqz(t0, L);
1353     // the code below should be shared with interpreter macro
1354     // assembler implementation
1355     {
1356       Label unlock;
1357       // BasicObjectLock will be first in list, since this is a
1358       // synchronized method. However, need to check that the object
1359       // has not been unlocked by an explicit monitorexit bytecode.
1360 
1361       // monitor expect in c_rarg1 for slow unlock path
1362       __ la(c_rarg1, Address(fp,   // address of first monitor
1363                              (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1364                                         wordSize - sizeof(BasicObjectLock))));
1365 
1366       __ ld(t, Address(c_rarg1, BasicObjectLock::obj_offset()));
1367       __ bnez(t, unlock);
1368 
1369       // Entry already unlocked, need to throw exception
1370       __ MacroAssembler::call_VM(noreg,
1371                                  CAST_FROM_FN_PTR(address,
1372                                                   InterpreterRuntime::throw_illegal_monitor_state_exception));
1373       __ should_not_reach_here();
1374 
1375       __ bind(unlock);
1376       __ unlock_object(c_rarg1);
1377     }
1378     __ bind(L);
1379   }
1380 
1381   #if INCLUDE_JFR
1382   __ enter_jfr_critical_section();
1383 
1384   // This poll test is to uphold the invariant that a JFR sampled frame
1385   // must not return to its caller without a prior safepoint poll check.
1386   // The earlier poll check in this routine is insufficient for this purpose
1387   // because the thread has transitioned back to Java.
1388 
1389   Label slow_path;
1390   Label fast_path;
1391   __ safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
1392   __ j(fast_path);
1393 
1394   __ bind(slow_path);
1395   __ push(dtos);
1396   __ push(ltos);
1397   __ set_last_Java_frame(esp, fp, __ pc(), t0);
1398   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), xthread);
1399   __ reset_last_Java_frame(true);
1400   __ pop(ltos);
1401   __ pop(dtos);
1402   __ bind(fast_path);
1403 
1404 #endif // INCLUDE_JFR
1405 
1406   // jvmti support
1407   // Note: This must happen _after_ handling/throwing any exceptions since
1408   //       the exception handler code notifies the runtime of method exits
1409   //       too. If this happens before, method entry/exit notifications are
1410   //       not properly paired (was bug - gri 11/22/99).
1411   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1412 
1413   __ pop(ltos);
1414   __ pop(dtos);
1415 
1416   __ jalr(result_handler);
1417 
1418   // remove activation
1419   // get sender sp
1420   __ ld(esp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize));
1421   // remove frame anchor
1422   __ leave();
1423 
1424   JFR_ONLY(__ leave_jfr_critical_section();)
1425 
1426   // restore sender sp
1427   __ mv(sp, esp);
1428 
1429   __ ret();
1430 
1431   if (inc_counter) {
1432     // Handle overflow of counter and compile method
1433     __ bind(invocation_counter_overflow);
1434     generate_counter_overflow(continue_after_compile);
1435   }
1436 
1437   return entry_point;
1438 }
1439 
1440 //
1441 // Generic interpreted method entry to (asm) interpreter
1442 //
1443 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1444 
1445   // determine code generation flags
1446   const bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1447 
1448   // t0: sender sp
1449   address entry_point = __ pc();
1450 
1451   const Address constMethod(xmethod, Method::const_offset());
1452   const Address access_flags(xmethod, Method::access_flags_offset());
1453   const Address size_of_parameters(x13,
1454                                    ConstMethod::size_of_parameters_offset());
1455   const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1456 
1457   // get parameter size (always needed)
1458   // need to load the const method first
1459   __ ld(x13, constMethod);
1460   __ load_unsigned_short(x12, size_of_parameters);
1461 
1462   // x12: size of parameters
1463 
1464   __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1465   __ sub(x13, x13, x12); // x13 = no. of additional locals
1466 
1467   // see if we've got enough room on the stack for locals plus overhead.
1468   generate_stack_overflow_check();
1469 
1470   // compute beginning of parameters (xlocals)
1471   __ shadd(xlocals, x12, esp, t1, 3);
1472   __ subi(xlocals, xlocals, wordSize);
1473 
1474   // Make room for additional locals
1475   __ slli(t1, x13, 3);
1476   __ sub(t0, esp, t1);
1477 
1478   // Padding between locals and fixed part of activation frame to ensure
1479   // SP is always 16-byte aligned.
1480   __ andi(sp, t0, -16);
1481 
1482   // x13 - # of additional locals
1483   // allocate space for locals
1484   // explicitly initialize locals
1485   {
1486     Label exit, loop;
1487     __ blez(x13, exit); // do nothing if x13 <= 0
1488     __ bind(loop);
1489     __ sd(zr, Address(t0));
1490     __ addi(t0, t0, wordSize);
1491     __ subi(x13, x13, 1); // until everything initialized
1492     __ bnez(x13, loop);
1493     __ bind(exit);
1494   }
1495 
1496   // And the base dispatch table
1497   __ get_dispatch();
1498 
1499   // initialize fixed part of activation frame
1500   generate_fixed_frame(false);
1501 
1502   // make sure method is not native & not abstract
1503 #ifdef ASSERT
1504   __ load_unsigned_short(x10, access_flags);
1505   __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute native method as non-native");
1506   __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter");
1507 #endif
1508 
1509   // Since at this point in the method invocation the exception
1510   // handler would try to exit the monitor of synchronized methods
1511   // which hasn't been entered yet, we set the thread local variable
1512   // _do_not_unlock_if_synchronized to true. The remove_activation
1513   // will check this flag.
1514 
1515   const Address do_not_unlock_if_synchronized(xthread,
1516                                               in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1517   __ mv(t1, true);
1518   __ sb(t1, do_not_unlock_if_synchronized);
1519 
1520   Label no_mdp;
1521   const Register mdp = x13;
1522   __ ld(mdp, Address(xmethod, Method::method_data_offset()));
1523   __ beqz(mdp, no_mdp);
1524   __ add(mdp, mdp, in_bytes(MethodData::data_offset()));
1525   __ profile_parameters_type(mdp, x11, x12, x14); // use x11, x12, x14 as tmp registers
1526   __ bind(no_mdp);
1527 
1528   // increment invocation count & check for overflow
1529   Label invocation_counter_overflow;
1530   if (inc_counter) {
1531     generate_counter_incr(&invocation_counter_overflow);
1532   }
1533 
1534   Label continue_after_compile;
1535   __ bind(continue_after_compile);
1536 
1537   bang_stack_shadow_pages(false);
1538 
1539   // reset the _do_not_unlock_if_synchronized flag
1540   __ sb(zr, do_not_unlock_if_synchronized);
1541 
1542   // check for synchronized methods
1543   // Must happen AFTER invocation_counter check and stack overflow check,
1544   // so method is not locked if overflows.
1545   if (synchronized) {
1546     // Allocate monitor and lock method
1547     lock_method();
1548   } else {
1549     // no synchronization necessary
1550 #ifdef ASSERT
1551     __ load_unsigned_short(x10, access_flags);
1552     __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization");
1553 #endif
1554   }
1555 
1556   // start execution
1557 #ifdef ASSERT
1558   __ verify_frame_setup();
1559 #endif
1560 
1561   // jvmti support
1562   __ notify_method_entry();
1563 
1564   __ dispatch_next(vtos);
1565 
1566   // invocation counter overflow
1567   if (inc_counter) {
1568     // Handle overflow of counter and compile method
1569     __ bind(invocation_counter_overflow);
1570     generate_counter_overflow(continue_after_compile);
1571   }
1572 
1573   return entry_point;
1574 }
1575 
1576 // Method entry for java.lang.Thread.currentThread
1577 address TemplateInterpreterGenerator::generate_currentThread() {
1578   address entry_point = __ pc();
1579 
1580   __ ld(x10, Address(xthread, JavaThread::vthread_offset()));
1581   __ resolve_oop_handle(x10, t0, t1);
1582   __ ret();
1583 
1584   return entry_point;
1585 }
1586 
1587 //-----------------------------------------------------------------------------
1588 // Exceptions
1589 
1590 void TemplateInterpreterGenerator::generate_throw_exception() {
1591   // Entry point in previous activation (i.e., if the caller was
1592   // interpreted)
1593   Interpreter::_rethrow_exception_entry = __ pc();
1594   // Restore sp to interpreter_frame_last_sp even though we are going
1595   // to empty the expression stack for the exception processing.
1596   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1597   // x10: exception
1598   // x13: return address/pc that threw exception
1599   __ restore_bcp();    // xbcp points to call/send
1600   __ restore_locals();
1601   __ restore_constant_pool_cache();
1602   __ reinit_heapbase();  // restore xheapbase as heapbase.
1603   __ get_dispatch();
1604 
1605   // Entry point for exceptions thrown within interpreter code
1606   Interpreter::_throw_exception_entry = __ pc();
1607   // If we came here via a NullPointerException on the receiver of a
1608   // method, xthread may be corrupt.
1609   __ get_method(xmethod);
1610   // expression stack is undefined here
1611   // x10: exception
1612   // xbcp: exception bcp
1613   __ verify_oop(x10);
1614   __ mv(c_rarg1, x10);
1615 
1616   // expression stack must be empty before entering the VM in case of
1617   // an exception
1618   __ empty_expression_stack();
1619   // find exception handler address and preserve exception oop
1620   __ call_VM(x13,
1621              CAST_FROM_FN_PTR(address,
1622                           InterpreterRuntime::exception_handler_for_exception),
1623              c_rarg1);
1624 
1625   // Restore machine SP
1626   __ restore_sp_after_call();
1627 
1628   // x10: exception handler entry point
1629   // x13: preserved exception oop
1630   // xbcp: bcp for exception handler
1631   __ push_ptr(x13); // push exception which is now the only value on the stack
1632   __ jr(x10); // jump to exception handler (may be _remove_activation_entry!)
1633 
1634   // If the exception is not handled in the current frame the frame is
1635   // removed and the exception is rethrown (i.e. exception
1636   // continuation is _rethrow_exception).
1637   //
1638   // Note: At this point the bci is still the bxi for the instruction
1639   // which caused the exception and the expression stack is
1640   // empty. Thus, for any VM calls at this point, GC will find a legal
1641   // oop map (with empty expression stack).
1642 
1643   //
1644   // JVMTI PopFrame support
1645   //
1646 
1647   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1648   __ empty_expression_stack();
1649   // Set the popframe_processing bit in pending_popframe_condition
1650   // indicating that we are currently handling popframe, so that
1651   // call_VMs that may happen later do not trigger new popframe
1652   // handling cycles.
1653   __ lwu(x13, Address(xthread, JavaThread::popframe_condition_offset()));
1654   __ ori(x13, x13, JavaThread::popframe_processing_bit);
1655   __ sw(x13, Address(xthread, JavaThread::popframe_condition_offset()));
1656 
1657   {
1658     // Check to see whether we are returning to a deoptimized frame.
1659     // (The PopFrame call ensures that the caller of the popped frame is
1660     // either interpreted or compiled and deoptimizes it if compiled.)
1661     // In this case, we can't call dispatch_next() after the frame is
1662     // popped, but instead must save the incoming arguments and restore
1663     // them after deoptimization has occurred.
1664     //
1665     // Note that we don't compare the return PC against the
1666     // deoptimization blob's unpack entry because of the presence of
1667     // adapter frames in C2.
1668     Label caller_not_deoptimized;
1669     __ ld(c_rarg1, Address(fp, frame::return_addr_offset * wordSize));
1670     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), c_rarg1);
1671     __ bnez(x10, caller_not_deoptimized);
1672 
1673     // Compute size of arguments for saving when returning to
1674     // deoptimized caller
1675     __ get_method(x10);
1676     __ ld(x10, Address(x10, Method::const_offset()));
1677     __ load_unsigned_short(x10, Address(x10, in_bytes(ConstMethod::
1678                                                       size_of_parameters_offset())));
1679     __ slli(x10, x10, Interpreter::logStackElementSize);
1680     __ restore_locals();
1681     __ sub(xlocals, xlocals, x10);
1682     __ addi(xlocals, xlocals, wordSize);
1683     // Save these arguments
1684     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1685                                            Deoptimization::
1686                                            popframe_preserve_args),
1687                           xthread, x10, xlocals);
1688 
1689     __ remove_activation(vtos,
1690                          /* throw_monitor_exception */ false,
1691                          /* install_monitor_exception */ false,
1692                          /* notify_jvmdi */ false);
1693 
1694     // Inform deoptimization that it is responsible for restoring
1695     // these arguments
1696     __ mv(t0, JavaThread::popframe_force_deopt_reexecution_bit);
1697     __ sw(t0, Address(xthread, JavaThread::popframe_condition_offset()));
1698 
1699     // Continue in deoptimization handler
1700     __ ret();
1701 
1702     __ bind(caller_not_deoptimized);
1703   }
1704 
1705   __ remove_activation(vtos,
1706                        /* throw_monitor_exception */ false,
1707                        /* install_monitor_exception */ false,
1708                        /* notify_jvmdi */ false);
1709 
1710   // Restore the last_sp and null it out
1711   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1712   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
1713   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1714 
1715   __ restore_bcp();
1716   __ restore_locals();
1717   __ restore_constant_pool_cache();
1718   __ get_method(xmethod);
1719   __ get_dispatch();
1720 
1721   // The method data pointer was incremented already during
1722   // call profiling. We have to restore the mdp for the current bcp.
1723   if (ProfileInterpreter) {
1724     __ set_method_data_pointer_for_bcp();
1725   }
1726 
1727   // Clear the popframe condition flag
1728   __ sw(zr, Address(xthread, JavaThread::popframe_condition_offset()));
1729   assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
1730 
1731 #if INCLUDE_JVMTI
1732   {
1733     Label L_done;
1734 
1735     __ lbu(t0, Address(xbcp, 0));
1736     __ mv(t1, Bytecodes::_invokestatic);
1737     __ bne(t1, t0, L_done);
1738 
1739     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1740     // Detect such a case in the InterpreterRuntime function and return the member name argument,or null.
1741 
1742     __ ld(c_rarg0, Address(xlocals, 0));
1743     __ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp);
1744 
1745     __ beqz(x10, L_done);
1746 
1747     __ sd(x10, Address(esp, 0));
1748     __ bind(L_done);
1749   }
1750 #endif // INCLUDE_JVMTI
1751 
1752   // Restore machine SP
1753   __ restore_sp_after_call();
1754 
1755   __ dispatch_next(vtos);
1756   // end of PopFrame support
1757 
1758   Interpreter::_remove_activation_entry = __ pc();
1759 
1760   // preserve exception over this code sequence
1761   __ pop_ptr(x10);
1762   __ sd(x10, Address(xthread, JavaThread::vm_result_oop_offset()));
1763   // remove the activation (without doing throws on illegalMonitorExceptions)
1764   __ remove_activation(vtos, false, true, false);
1765   // restore exception
1766   __ get_vm_result_oop(x10, xthread);
1767 
1768   // In between activations - previous activation type unknown yet
1769   // compute continuation point - the continuation point expects the
1770   // following registers set up:
1771   //
1772   // x10: exception
1773   // ra: return address/pc that threw exception
1774   // sp: expression stack of caller
1775   // fp: fp of caller
1776   // FIXME: There's no point saving ra here because VM calls don't trash it
1777   __ subi(sp, sp, 2 * wordSize);
1778   __ sd(x10, Address(sp, 0));                   // save exception
1779   __ sd(ra, Address(sp, wordSize));             // save return address
1780   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1781                                          SharedRuntime::exception_handler_for_return_address),
1782                         xthread, ra);
1783   __ mv(x11, x10);                              // save exception handler
1784   __ ld(x10, Address(sp, 0));                   // restore exception
1785   __ ld(ra, Address(sp, wordSize));             // restore return address
1786   __ addi(sp, sp, 2 * wordSize);
1787   // We might be returning to a deopt handler that expects x13 to
1788   // contain the exception pc
1789   __ mv(x13, ra);
1790   // Note that an "issuing PC" is actually the next PC after the call
1791   __ jr(x11);                                   // jump to exception
1792                                                 // handler of caller
1793 }
1794 
1795 //
1796 // JVMTI ForceEarlyReturn support
1797 //
1798 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state)  {
1799   address entry = __ pc();
1800 
1801   __ restore_bcp();
1802   __ restore_locals();
1803   __ empty_expression_stack();
1804   __ load_earlyret_value(state);
1805 
1806   __ ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset()));
1807   Address cond_addr(t0, JvmtiThreadState::earlyret_state_offset());
1808 
1809   // Clear the earlyret state
1810   assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
1811   __ sd(zr, cond_addr);
1812 
1813   __ remove_activation(state,
1814                        false, /* throw_monitor_exception */
1815                        false, /* install_monitor_exception */
1816                        true); /* notify_jvmdi */
1817   __ ret();
1818 
1819   return entry;
1820 }
1821 // end of ForceEarlyReturn support
1822 
1823 //-----------------------------------------------------------------------------
1824 // Helper for vtos entry point generation
1825 
1826 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1827                                                          address& bep,
1828                                                          address& cep,
1829                                                          address& sep,
1830                                                          address& aep,
1831                                                          address& iep,
1832                                                          address& lep,
1833                                                          address& fep,
1834                                                          address& dep,
1835                                                          address& vep) {
1836   assert(t != nullptr && t->is_valid() && t->tos_in() == vtos, "illegal template");
1837   Label L;
1838   aep = __ pc();     // atos entry point
1839       __ push_ptr();
1840       __ j(L);
1841   fep = __ pc();     // ftos entry point
1842       __ push_f();
1843       __ j(L);
1844   dep = __ pc();     // dtos entry point
1845       __ push_d();
1846       __ j(L);
1847   lep = __ pc();     // ltos entry point
1848       __ push_l();
1849       __ j(L);
1850   bep = cep = sep = iep = __ pc();     // [bcsi]tos entry point
1851       __ push_i();
1852   vep = __ pc();     // vtos entry point
1853   __ bind(L);
1854   generate_and_dispatch(t);
1855 }
1856 
1857 //-----------------------------------------------------------------------------
1858 
1859 // Non-product code
1860 #ifndef PRODUCT
1861 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1862   address entry = __ pc();
1863 
1864   __ push_reg(ra);
1865   __ push(state);
1866   __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1867   __ mv(c_rarg2, x10);  // Pass itos
1868   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1869   __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1870   __ pop(state);
1871   __ pop_reg(ra);
1872   __ ret();                                   // return from result handler
1873 
1874   return entry;
1875 }
1876 #endif // PRODUCT
1877 
1878 void TemplateInterpreterGenerator::count_bytecode() {
1879   __ mv(x7, (address) &BytecodeCounter::_counter_value);
1880   __ atomic_add(noreg, 1, x7);
1881 }
1882 
1883 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1884   __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1885   __ atomic_addw(noreg, 1, x7);
1886 }
1887 
1888 #ifndef PRODUCT
1889 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1890   // Calculate new index for counter:
1891   //   _index = (_index >> log2_number_of_codes) |
1892   //            (bytecode << log2_number_of_codes);
1893   Register index_addr = t1;
1894   Register index = t0;
1895   __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1896   __ lw(index, index_addr);
1897   __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1898   __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1899   __ orrw(index, x7, index);
1900   __ sw(index, index_addr);
1901   // Bump bucket contents:
1902   //   _counters[_index] ++;
1903   Register counter_addr = t1;
1904   __ mv(x7, (address) &BytecodePairHistogram::_counters);
1905   __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1906   __ atomic_addw(noreg, 1, counter_addr);
1907  }
1908 
1909 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1910   // Call a little run-time stub to avoid blow-up for each bytecode.
1911   // The run-time runtime saves the right registers, depending on
1912   // the tosca in-state for the given template.
1913 
1914   assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated");
1915   __ rt_call(Interpreter::trace_code(t->tos_in()));
1916   __ reinit_heapbase();
1917 }
1918 
1919 void TemplateInterpreterGenerator::stop_interpreter_at() {
1920   Label L;
1921   __ push_reg(t0);
1922   __ mv(t0, (address) &BytecodeCounter::_counter_value);
1923   __ ld(t0, Address(t0));
1924   __ mv(t1, StopInterpreterAt);
1925   __ bne(t0, t1, L);
1926   __ ebreak();
1927   __ bind(L);
1928   __ pop_reg(t0);
1929 }
1930 
1931 #endif // !PRODUCT