1 /*
   2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "classfile/javaClasses.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interpreter/bytecodeHistogram.hpp"
  32 #include "interpreter/bytecodeTracer.hpp"
  33 #include "interpreter/interp_masm.hpp"
  34 #include "interpreter/interpreter.hpp"
  35 #include "interpreter/interpreterRuntime.hpp"
  36 #include "interpreter/templateInterpreterGenerator.hpp"
  37 #include "interpreter/templateTable.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/arrayOop.hpp"
  40 #include "oops/method.inline.hpp"
  41 #include "oops/methodData.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "oops/resolvedIndyEntry.hpp"
  44 #include "oops/resolvedMethodEntry.hpp"
  45 #include "prims/jvmtiExport.hpp"
  46 #include "prims/jvmtiThreadState.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/deoptimization.hpp"
  49 #include "runtime/frame.inline.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/jniHandles.hpp"
  52 #include "runtime/sharedRuntime.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "runtime/synchronizer.hpp"
  55 #include "runtime/timer.hpp"
  56 #include "runtime/vframeArray.hpp"
  57 #include "utilities/checkedCast.hpp"
  58 #include "utilities/debug.hpp"
  59 #include "utilities/powerOfTwo.hpp"
  60 #include <sys/types.h>
  61 
  62 #ifndef PRODUCT
  63 #include "oops/method.hpp"
  64 #endif // !PRODUCT
  65 
  66 // Size of interpreter code.  Increase if too small.  Interpreter will
  67 // fail with a guarantee ("not enough space for interpreter generation");
  68 // if too small.
  69 // Run with +PrintInterpreter to get the VM to print out the size.
  70 // Max size with JVMTI
  71 int TemplateInterpreter::InterpreterCodeSize = 256 * 1024;
  72 
  73 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  74 
  75 //-----------------------------------------------------------------------------
  76 
  77 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  78   address entry = __ pc();
  79 
  80   __ andi(esp, esp, -16);
  81   __ mv(c_rarg3, esp);
  82   // xmethod
  83   // xlocals
  84   // c_rarg3: first stack arg - wordSize
  85   // adjust sp
  86 
  87   __ subi(sp, c_rarg3, 18 * wordSize);
  88   __ subi(sp, sp, 2 * wordSize);
  89   __ sd(ra, Address(sp, 0));
  90 
  91   __ call_VM(noreg,
  92              CAST_FROM_FN_PTR(address,
  93                               InterpreterRuntime::slow_signature_handler),
  94              xmethod, xlocals, c_rarg3);
  95 
  96   // x10: result handler
  97 
  98   // Stack layout:
  99   // sp: return address           <- sp
 100   //      1 garbage
 101   //      8 integer args (if static first is unused)
 102   //      1 float/double identifiers
 103   //      8 double args
 104   //        stack args              <- esp
 105   //        garbage
 106   //        expression stack bottom
 107   //        bcp (null)
 108   //        ...
 109 
 110   // Restore ra
 111   __ ld(ra, Address(sp, 0));
 112   __ addi(sp, sp , 2 * wordSize);
 113 
 114   // Do FP first so we can use c_rarg3 as temp
 115   __ lwu(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers
 116 
 117   for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
 118     const FloatRegister r = g_FPArgReg[i];
 119     Label d, done;
 120 
 121     __ test_bit(t0, c_rarg3, i);
 122     __ bnez(t0, d);
 123     __ flw(r, Address(sp, (10 + i) * wordSize));
 124     __ j(done);
 125     __ bind(d);
 126     __ fld(r, Address(sp, (10 + i) * wordSize));
 127     __ bind(done);
 128   }
 129 
 130   // c_rarg0 contains the result from the call of
 131   // InterpreterRuntime::slow_signature_handler so we don't touch it
 132   // here.  It will be loaded with the JNIEnv* later.
 133   for (int i = 1; i < Argument::n_int_register_parameters_c; i++) {
 134     const Register rm = g_INTArgReg[i];
 135     __ ld(rm, Address(sp, i * wordSize));
 136   }
 137 
 138   __ addi(sp, sp, 18 * wordSize);
 139   __ ret();
 140 
 141   return entry;
 142 }
 143 
 144 // Various method entries
 145 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 146   // xmethod: Method*
 147   // x19_sender_sp: sender sp
 148   // esp: args
 149 
 150   // These don't need a safepoint check because they aren't virtually
 151   // callable. We won't enter these intrinsics from compiled code.
 152   // If in the future we added an intrinsic which was virtually callable
 153   // we'd have to worry about how to safepoint so that this code is used.
 154 
 155   // mathematical functions inlined by compiler
 156   // (interpreter must provide identical implementation
 157   // in order to avoid monotonicity bugs when switching
 158   // from interpreter to compiler in the middle of some
 159   // computation)
 160   //
 161   // stack:
 162   //        [ arg ] <-- esp
 163   //        [ arg ]
 164   // retaddr in ra
 165 
 166   address fn = nullptr;
 167   address entry_point = nullptr;
 168   switch (kind) {
 169     case Interpreter::java_lang_math_abs:
 170       entry_point = __ pc();
 171       __ fld(f10, Address(esp));
 172       __ fabs_d(f10, f10);
 173       __ mv(sp, x19_sender_sp); // Restore caller's SP
 174       break;
 175     case Interpreter::java_lang_math_sqrt:
 176       entry_point = __ pc();
 177       __ fld(f10, Address(esp));
 178       __ fsqrt_d(f10, f10);
 179       __ mv(sp, x19_sender_sp);
 180       break;
 181     case Interpreter::java_lang_math_sin :
 182       entry_point = __ pc();
 183       __ fld(f10, Address(esp));
 184       __ mv(sp, x19_sender_sp);
 185       __ mv(x9, ra);
 186       if (StubRoutines::dsin() == nullptr) {
 187         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 188       } else {
 189         fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin());
 190       }
 191       __ rt_call(fn);
 192       __ mv(ra, x9);
 193       break;
 194     case Interpreter::java_lang_math_cos :
 195       entry_point = __ pc();
 196       __ fld(f10, Address(esp));
 197       __ mv(sp, x19_sender_sp);
 198       __ mv(x9, ra);
 199       if (StubRoutines::dcos() == nullptr) {
 200         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 201       } else {
 202         fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos());
 203       }
 204       __ rt_call(fn);
 205       __ mv(ra, x9);
 206       break;
 207     case Interpreter::java_lang_math_tan :
 208       entry_point = __ pc();
 209       __ fld(f10, Address(esp));
 210       __ mv(sp, x19_sender_sp);
 211       __ mv(x9, ra);
 212       if (StubRoutines::dtan() == nullptr) {
 213         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 214       } else {
 215         fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan());
 216       }
 217       __ rt_call(fn);
 218       __ mv(ra, x9);
 219       break;
 220     case Interpreter::java_lang_math_log :
 221       entry_point = __ pc();
 222       __ fld(f10, Address(esp));
 223       __ mv(sp, x19_sender_sp);
 224       __ mv(x9, ra);
 225       if (StubRoutines::dlog() == nullptr) {
 226         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 227       } else {
 228         fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog());
 229       }
 230       __ rt_call(fn);
 231       __ mv(ra, x9);
 232       break;
 233     case Interpreter::java_lang_math_log10 :
 234       entry_point = __ pc();
 235       __ fld(f10, Address(esp));
 236       __ mv(sp, x19_sender_sp);
 237       __ mv(x9, ra);
 238       if (StubRoutines::dlog10() == nullptr) {
 239         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 240       } else {
 241         fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10());
 242       }
 243       __ rt_call(fn);
 244       __ mv(ra, x9);
 245       break;
 246     case Interpreter::java_lang_math_exp :
 247       entry_point = __ pc();
 248       __ fld(f10, Address(esp));
 249       __ mv(sp, x19_sender_sp);
 250       __ mv(x9, ra);
 251       if (StubRoutines::dexp() == nullptr) {
 252         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 253       } else {
 254         fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp());
 255       }
 256       __ rt_call(fn);
 257       __ mv(ra, x9);
 258       break;
 259     case Interpreter::java_lang_math_pow :
 260       entry_point = __ pc();
 261       __ mv(x9, ra);
 262       __ fld(f10, Address(esp, 2 * Interpreter::stackElementSize));
 263       __ fld(f11, Address(esp));
 264       __ mv(sp, x19_sender_sp);
 265       if (StubRoutines::dpow() == nullptr) {
 266         fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 267       } else {
 268         fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow());
 269       }
 270       __ rt_call(fn);
 271       __ mv(ra, x9);
 272       break;
 273     case Interpreter::java_lang_math_fmaD :
 274       if (UseFMA) {
 275         entry_point = __ pc();
 276         __ fld(f10, Address(esp, 4 * Interpreter::stackElementSize));
 277         __ fld(f11, Address(esp, 2 * Interpreter::stackElementSize));
 278         __ fld(f12, Address(esp));
 279         __ fmadd_d(f10, f10, f11, f12);
 280         __ mv(sp, x19_sender_sp); // Restore caller's SP
 281       }
 282       break;
 283     case Interpreter::java_lang_math_fmaF :
 284       if (UseFMA) {
 285         entry_point = __ pc();
 286         __ flw(f10, Address(esp, 2 * Interpreter::stackElementSize));
 287         __ flw(f11, Address(esp, Interpreter::stackElementSize));
 288         __ flw(f12, Address(esp));
 289         __ fmadd_s(f10, f10, f11, f12);
 290         __ mv(sp, x19_sender_sp); // Restore caller's SP
 291       }
 292       break;
 293     default:
 294       ;
 295   }
 296   if (entry_point != nullptr) {
 297     __ ret();
 298   }
 299 
 300   return entry_point;
 301 }
 302 
 303 // Abstract method entry
 304 // Attempt to execute abstract method. Throw exception
 305 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 306   // xmethod: Method*
 307   // x19_sender_sp: sender SP
 308 
 309   address entry_point = __ pc();
 310 
 311   // abstract method entry
 312 
 313   //  pop return address, reset last_sp to null
 314   __ empty_expression_stack();
 315   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
 316   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
 317 
 318   // throw exception
 319   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
 320                                      InterpreterRuntime::throw_AbstractMethodErrorWithMethod),
 321                                      xmethod);
 322   // the call_VM checks for exception, so we should never return here.
 323   __ should_not_reach_here();
 324 
 325   return entry_point;
 326 }
 327 
 328 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 329   address entry = __ pc();
 330 
 331 #ifdef ASSERT
 332   {
 333     Label L;
 334     __ ld(t0, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
 335     __ shadd(t0, t0, fp, t0, LogBytesPerWord);
 336     // maximal sp for current fp (stack grows negative)
 337     // check if frame is complete
 338     __ bge(t0, sp, L);
 339     __ stop ("interpreter frame not set up");
 340     __ bind(L);
 341   }
 342 #endif // ASSERT
 343   // Restore bcp under the assumption that the current frame is still
 344   // interpreted
 345   __ restore_bcp();
 346 
 347   // expression stack must be empty before entering the VM if an
 348   // exception happened
 349   __ empty_expression_stack();
 350   // throw exception
 351   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 352   return entry;
 353 }
 354 
 355 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
 356   address entry = __ pc();
 357   // expression stack must be empty before entering the VM if an
 358   // exception happened
 359   __ empty_expression_stack();
 360   // setup parameters
 361 
 362   // convention: expect aberrant index in register x11
 363   __ zext(c_rarg2, x11, 32);
 364   // convention: expect array in register x13
 365   __ mv(c_rarg1, x13);
 366   __ call_VM(noreg,
 367              CAST_FROM_FN_PTR(address,
 368                               InterpreterRuntime::
 369                               throw_ArrayIndexOutOfBoundsException),
 370              c_rarg1, c_rarg2);
 371   return entry;
 372 }
 373 
 374 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 375   address entry = __ pc();
 376 
 377   // object is at TOS
 378   __ pop_reg(c_rarg1);
 379 
 380   // expression stack must be empty before entering the VM if an
 381   // exception happened
 382   __ empty_expression_stack();
 383 
 384   __ call_VM(noreg,
 385              CAST_FROM_FN_PTR(address,
 386                               InterpreterRuntime::
 387                               throw_ClassCastException),
 388              c_rarg1);
 389   return entry;
 390 }
 391 
 392 address TemplateInterpreterGenerator::generate_exception_handler_common(
 393   const char* name, const char* message, bool pass_oop) {
 394   assert(!pass_oop || message == nullptr, "either oop or message but not both");
 395   address entry = __ pc();
 396   if (pass_oop) {
 397     // object is at TOS
 398     __ pop_reg(c_rarg2);
 399   }
 400   // expression stack must be empty before entering the VM if an
 401   // exception happened
 402   __ empty_expression_stack();
 403   // setup parameters
 404   __ la(c_rarg1, Address((address)name));
 405   if (pass_oop) {
 406     __ call_VM(x10, CAST_FROM_FN_PTR(address,
 407                                      InterpreterRuntime::
 408                                      create_klass_exception),
 409                c_rarg1, c_rarg2);
 410   } else {
 411     // kind of lame ExternalAddress can't take null because
 412     // external_word_Relocation will assert.
 413     if (message != nullptr) {
 414       __ la(c_rarg2, Address((address)message));
 415     } else {
 416       __ mv(c_rarg2, NULL_WORD);
 417     }
 418     __ call_VM(x10,
 419                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
 420                c_rarg1, c_rarg2);
 421   }
 422   // throw exception
 423   __ j(RuntimeAddress(Interpreter::throw_exception_entry()));
 424   return entry;
 425 }
 426 
 427 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 428   address entry = __ pc();
 429 
 430   // Restore stack bottom in case i2c adjusted stack
 431   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 432   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
 433   // and null it as marker that esp is now tos until next java call
 434   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 435   __ restore_bcp();
 436   __ restore_locals();
 437   __ restore_constant_pool_cache();
 438   __ get_method(xmethod);
 439 
 440   if (state == atos) {
 441     Register obj = x10;
 442     Register mdp = x11;
 443     Register tmp = x12;
 444     __ ld(mdp, Address(xmethod, Method::method_data_offset()));
 445     __ profile_return_type(mdp, obj, tmp);
 446   }
 447 
 448   const Register cache = x11;
 449   const Register index = x12;
 450 
 451   if (index_size == sizeof(u4)) {
 452     __ load_resolved_indy_entry(cache, index);
 453     __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
 454     __ shadd(esp, cache, esp, t0, 3);
 455   } else {
 456     // Pop N words from the stack
 457     assert(index_size == sizeof(u2), "Can only be u2");
 458     __ load_method_entry(cache, index);
 459     __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
 460 
 461     __ shadd(esp, cache, esp, t0, 3);
 462   }
 463 
 464   // Restore machine SP
 465   __ restore_sp_after_call();
 466 
 467   __ check_and_handle_popframe(xthread);
 468   __ check_and_handle_earlyret(xthread);
 469 
 470   __ get_dispatch();
 471   __ dispatch_next(state, step);
 472 
 473   return entry;
 474 }
 475 
 476 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
 477                                                                int step,
 478                                                                address continuation) {
 479   address entry = __ pc();
 480   __ restore_bcp();
 481   __ restore_locals();
 482   __ restore_constant_pool_cache();
 483   __ get_method(xmethod);
 484   __ get_dispatch();
 485 
 486   __ restore_sp_after_call();  // Restore SP to extended SP
 487 
 488   // Restore expression stack pointer
 489   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 490   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
 491   // null last_sp until next java call
 492   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 493 
 494   // handle exceptions
 495   {
 496     Label L;
 497     __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
 498     __ beqz(t0, L);
 499     __ call_VM(noreg,
 500                CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 501     __ should_not_reach_here();
 502     __ bind(L);
 503   }
 504 
 505   if (continuation == nullptr) {
 506     __ dispatch_next(state, step);
 507   } else {
 508     __ jump_to_entry(continuation);
 509   }
 510   return entry;
 511 }
 512 
 513 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 514   address entry = __ pc();
 515   if (type == T_OBJECT) {
 516     // retrieve result from frame
 517     __ ld(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
 518     // and verify it
 519     __ verify_oop(x10);
 520   } else {
 521    __ cast_primitive_type(type, x10);
 522   }
 523 
 524   __ ret();                                  // return from result handler
 525   return entry;
 526 }
 527 
 528 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
 529                                                                 address runtime_entry) {
 530   assert_cond(runtime_entry != nullptr);
 531   address entry = __ pc();
 532   __ push(state);
 533   __ push_cont_fastpath(xthread);
 534   __ call_VM(noreg, runtime_entry);
 535   __ pop_cont_fastpath(xthread);
 536   __ membar(MacroAssembler::AnyAny);
 537   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 538   return entry;
 539 }
 540 
 541 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
 542   if (!Continuations::enabled()) return nullptr;
 543   address start = __ pc();
 544 
 545   __ restore_bcp();
 546   __ restore_locals();
 547 
 548   // Restore constant pool cache
 549   __ ld(xcpool, Address(fp, frame::interpreter_frame_cache_offset * wordSize));
 550 
 551   // Restore Java expression stack pointer
 552   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 553   __ shadd(esp, t0, fp, t0, Interpreter::logStackElementSize);
 554   // and null it as marker that esp is now tos until next java call
 555   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 556 
 557   // Restore machine SP
 558   __ ld(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize));
 559   __ shadd(sp, t0, fp, t0, LogBytesPerWord);
 560 
 561   // Restore method
 562   __ ld(xmethod, Address(fp, frame::interpreter_frame_method_offset * wordSize));
 563 
 564   // Restore dispatch
 565   __ la(xdispatch, ExternalAddress((address)Interpreter::dispatch_table()));
 566 
 567   __ ret();
 568 
 569   return start;
 570 }
 571 
 572 
 573 // Helpers for commoning out cases in the various type of method entries.
 574 //
 575 
 576 
 577 // increment invocation count & check for overflow
 578 //
 579 // Note: checking for negative value instead of overflow
 580 //       so we have a 'sticky' overflow test
 581 //
 582 // xmethod: method
 583 //
 584 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 585   Label done;
 586   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
 587   int increment = InvocationCounter::count_increment;
 588   Label no_mdo;
 589   if (ProfileInterpreter) {
 590     // Are we profiling?
 591     __ ld(x10, Address(xmethod, Method::method_data_offset()));
 592     __ beqz(x10, no_mdo);
 593     // Increment counter in the MDO
 594     const Address mdo_invocation_counter(x10, in_bytes(MethodData::invocation_counter_offset()) +
 595                                          in_bytes(InvocationCounter::counter_offset()));
 596     const Address mask(x10, in_bytes(MethodData::invoke_mask_offset()));
 597     __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, t0, t1, false, overflow);
 598     __ j(done);
 599   }
 600   __ bind(no_mdo);
 601   // Increment counter in MethodCounters
 602   const Address invocation_counter(t1,
 603                                    MethodCounters::invocation_counter_offset() +
 604                                    InvocationCounter::counter_offset());
 605   __ get_method_counters(xmethod, t1, done);
 606   const Address mask(t1, in_bytes(MethodCounters::invoke_mask_offset()));
 607   __ increment_mask_and_jump(invocation_counter, increment, mask, t0, x11, false, overflow);
 608   __ bind(done);
 609 }
 610 
 611 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 612   __ mv(c_rarg1, zr);
 613   __ call_VM(noreg,
 614              CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), c_rarg1);
 615   __ j(do_continue);
 616 }
 617 
 618 // See if we've got enough room on the stack for locals plus overhead
 619 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
 620 // without going through the signal handler, i.e., reserved and yellow zones
 621 // will not be made usable. The shadow zone must suffice to handle the
 622 // overflow.
 623 // The expression stack grows down incrementally, so the normal guard
 624 // page mechanism will work for that.
 625 //
 626 // NOTE: Since the additional locals are also always pushed (wasn't
 627 // obvious in generate_method_entry) so the guard should work for them
 628 // too.
 629 //
 630 // Args:
 631 //      x13: number of additional locals this frame needs (what we must check)
 632 //      xmethod: Method*
 633 //
 634 // Kills:
 635 //      x10
 636 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 637 
 638   // monitor entry size: see picture of stack set
 639   // (generate_method_entry) and frame_amd64.hpp
 640   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 641 
 642   // total overhead size: entry_size + (saved fp through expr stack
 643   // bottom).  be sure to change this if you add/subtract anything
 644   // to/from the overhead area
 645   const int overhead_size =
 646     -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
 647 
 648   const int page_size = (int)os::vm_page_size();
 649 
 650   Label after_frame_check;
 651 
 652   // see if the frame is greater than one page in size. If so,
 653   // then we need to verify there is enough stack space remaining
 654   // for the additional locals.
 655   __ mv(t0, (page_size - overhead_size) / Interpreter::stackElementSize);
 656   __ bleu(x13, t0, after_frame_check);
 657 
 658   // compute sp as if this were going to be the last frame on
 659   // the stack before the red zone
 660 
 661   // locals + overhead, in bytes
 662   __ mv(x10, overhead_size);
 663   __ shadd(x10, x13, x10, t0, Interpreter::logStackElementSize);  // 2 slots per parameter.
 664 
 665   const Address stack_limit(xthread, JavaThread::stack_overflow_limit_offset());
 666   __ ld(t0, stack_limit);
 667 
 668 #ifdef ASSERT
 669   Label limit_okay;
 670   // Verify that thread stack limit is non-zero.
 671   __ bnez(t0, limit_okay);
 672   __ stop("stack overflow limit is zero");
 673   __ bind(limit_okay);
 674 #endif
 675 
 676   // Add stack limit to locals.
 677   __ add(x10, x10, t0);
 678 
 679   // Check against the current stack bottom.
 680   __ bgtu(sp, x10, after_frame_check);
 681 
 682   // Remove the incoming args, peeling the machine SP back to where it
 683   // was in the caller.  This is not strictly necessary, but unless we
 684   // do so the stack frame may have a garbage FP; this ensures a
 685   // correct call stack that we can always unwind.  The ANDI should be
 686   // unnecessary because the sender SP in x19 is always aligned, but
 687   // it doesn't hurt.
 688   __ andi(sp, x19_sender_sp, -16);
 689 
 690   // Note: the restored frame is not necessarily interpreted.
 691   // Use the shared runtime version of the StackOverflowError.
 692   assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
 693   __ far_jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
 694 
 695   // all done with frame size check
 696   __ bind(after_frame_check);
 697 }
 698 
 699 // Allocate monitor and lock method (asm interpreter)
 700 //
 701 // Args:
 702 //      xmethod: Method*
 703 //      xlocals: locals
 704 //
 705 // Kills:
 706 //      x10
 707 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
 708 //      t0, t1 (temporary regs)
 709 void TemplateInterpreterGenerator::lock_method() {
 710   // synchronize method
 711   const Address access_flags(xmethod, Method::access_flags_offset());
 712   const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 713   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 714 
 715 #ifdef ASSERT
 716   __ load_unsigned_short(x10, access_flags);
 717   __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method doesn't need synchronization", false);
 718 #endif // ASSERT
 719 
 720   // get synchronization object
 721   {
 722     Label done;
 723     __ load_unsigned_short(x10, access_flags);
 724     __ andi(t0, x10, JVM_ACC_STATIC);
 725     // get receiver (assume this is frequent case)
 726     __ ld(x10, Address(xlocals, Interpreter::local_offset_in_bytes(0)));
 727     __ beqz(t0, done);
 728     __ load_mirror(x10, xmethod, x15, t1);
 729 
 730 #ifdef ASSERT
 731     {
 732       Label L;
 733       __ bnez(x10, L);
 734       __ stop("synchronization object is null");
 735       __ bind(L);
 736     }
 737 #endif // ASSERT
 738 
 739     __ bind(done);
 740   }
 741 
 742   // add space for monitor & lock
 743   __ check_extended_sp();
 744   __ sub(sp, sp, entry_size); // add space for a monitor entry
 745   __ sub(esp, esp, entry_size);
 746   __ sub(t0, sp, fp);
 747   __ srai(t0, t0, Interpreter::logStackElementSize);
 748   __ sd(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize));
 749   __ sub(t0, esp, fp);
 750   __ srai(t0, t0, Interpreter::logStackElementSize);
 751   __ sd(t0, monitor_block_top);  // set new monitor block top
 752   // store object
 753   __ sd(x10, Address(esp, BasicObjectLock::obj_offset()));
 754   __ mv(c_rarg1, esp); // object address
 755   __ lock_object(c_rarg1);
 756 }
 757 
 758 // Generate a fixed interpreter frame. This is identical setup for
 759 // interpreted methods and for native methods hence the shared code.
 760 //
 761 // Args:
 762 //      ra: return address
 763 //      xmethod: Method*
 764 //      xlocals: pointer to locals
 765 //      xcpool: cp cache
 766 //      stack_pointer: previous sp
 767 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 768   // initialize fixed part of activation frame
 769   if (native_call) {
 770     __ subi(esp, sp, 14 * wordSize);
 771     __ mv(xbcp, zr);
 772     __ subi(sp, sp, 14 * wordSize);
 773     // add 2 zero-initialized slots for native calls
 774     __ sd(zr, Address(sp, 13 * wordSize));
 775     __ sd(zr, Address(sp, 12 * wordSize));
 776   } else {
 777     __ subi(esp, sp, 12 * wordSize);
 778     __ ld(t0, Address(xmethod, Method::const_offset()));     // get ConstMethod
 779     __ add(xbcp, t0, in_bytes(ConstMethod::codes_offset())); // get codebase
 780     __ subi(sp, sp, 12 * wordSize);
 781   }
 782   __ sd(xbcp, Address(sp, wordSize));
 783   __ mv(t0, frame::interpreter_frame_initial_sp_offset);
 784   __ sd(t0, Address(sp, 0));
 785 
 786   if (ProfileInterpreter) {
 787     Label method_data_continue;
 788     __ ld(t0, Address(xmethod, Method::method_data_offset()));
 789     __ beqz(t0, method_data_continue);
 790     __ la(t0, Address(t0, in_bytes(MethodData::data_offset())));
 791     __ bind(method_data_continue);
 792   }
 793 
 794   __ sd(xmethod, Address(sp, 7 * wordSize));
 795   __ sd(ProfileInterpreter ? t0 : zr, Address(sp, 6 * wordSize));
 796 
 797   __ sd(ra, Address(sp, 11 * wordSize));
 798   __ sd(fp, Address(sp, 10 * wordSize));
 799   __ la(fp, Address(sp, 12 * wordSize)); // include ra & fp
 800 
 801   __ ld(xcpool, Address(xmethod, Method::const_offset()));
 802   __ ld(xcpool, Address(xcpool, ConstMethod::constants_offset()));
 803   __ ld(xcpool, Address(xcpool, ConstantPool::cache_offset()));
 804   __ sd(xcpool, Address(sp, 3 * wordSize));
 805   __ sub(t0, xlocals, fp);
 806   __ srai(t0, t0, Interpreter::logStackElementSize);   // t0 = xlocals - fp();
 807   // Store relativized xlocals, see frame::interpreter_frame_locals().
 808   __ sd(t0, Address(sp, 2 * wordSize));
 809 
 810   // set sender sp
 811   // leave last_sp as null
 812   __ sd(x19_sender_sp, Address(sp, 9 * wordSize));
 813   __ sd(zr, Address(sp, 8 * wordSize));
 814 
 815   // Get mirror and store it in the frame as GC root for this Method*
 816   __ load_mirror(t2, xmethod, x15, t1);
 817   __ sd(t2, Address(sp, 4 * wordSize));
 818 
 819   if (!native_call) {
 820     __ ld(t0, Address(xmethod, Method::const_offset()));
 821     __ lhu(t0, Address(t0, ConstMethod::max_stack_offset()));
 822     __ add(t0, t0, MAX2(3, Method::extra_stack_entries()));
 823     __ slli(t0, t0, 3);
 824     __ sub(t0, sp, t0);
 825     __ andi(t0, t0, -16);
 826     __ sub(t1, t0, fp);
 827     __ srai(t1, t1, Interpreter::logStackElementSize);
 828     // Store extended SP
 829     __ sd(t1, Address(sp, 5 * wordSize));
 830     // Move SP out of the way
 831     __ mv(sp, t0);
 832   } else {
 833     // Make sure there is room for the exception oop pushed in case method throws
 834     // an exception (see TemplateInterpreterGenerator::generate_throw_exception())
 835     __ subi(t0, sp, 2 * wordSize);
 836     __ sub(t1, t0, fp);
 837     __ srai(t1, t1, Interpreter::logStackElementSize);
 838     __ sd(t1, Address(sp, 5 * wordSize));
 839     __ mv(sp, t0);
 840   }
 841 }
 842 
 843 // End of helpers
 844 
 845 // Various method entries
 846 //------------------------------------------------------------------------------------------------------------------------
 847 //
 848 //
 849 
 850 // Method entry for java.lang.ref.Reference.get.
 851 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 852   // Code: _aload_0, _getfield, _areturn
 853   // parameter size = 1
 854   //
 855   // The code that gets generated by this routine is split into 2 parts:
 856   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 857   //    2. The slow path - which is an expansion of the regular method entry.
 858   //
 859   // Notes:-
 860   // * In the G1 code we do not check whether we need to block for
 861   //   a safepoint. If G1 is enabled then we must execute the specialized
 862   //   code for Reference.get (except when the Reference object is null)
 863   //   so that we can log the value in the referent field with an SATB
 864   //   update buffer.
 865   //   If the code for the getfield template is modified so that the
 866   //   G1 pre-barrier code is executed when the current method is
 867   //   Reference.get() then going through the normal method entry
 868   //   will be fine.
 869   // * The G1 code can, however, check the receiver object (the instance
 870   //   of java.lang.Reference) and jump to the slow path if null. If the
 871   //   Reference object is null then we obviously cannot fetch the referent
 872   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 873   //   regular method entry code to generate the NPE.
 874   //
 875   // This code is based on generate_accessor_entry.
 876   //
 877   // xmethod: Method*
 878   // x19_sender_sp: senderSP must preserve for slow path, set SP to it on fast path
 879 
 880   // ra is live.  It must be saved around calls.
 881 
 882   address entry = __ pc();
 883 
 884   const int referent_offset = java_lang_ref_Reference::referent_offset();
 885   guarantee(referent_offset > 0, "referent offset not initialized");
 886 
 887   Label slow_path;
 888   const Register local_0 = c_rarg0;
 889   // Check if local 0 isn't null
 890   // If the receiver is null then it is OK to jump to the slow path.
 891   __ ld(local_0, Address(esp, 0));
 892   __ beqz(local_0, slow_path);
 893 
 894   // Load the value of the referent field.
 895   const Address field_address(local_0, referent_offset);
 896   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
 897   bs->load_at(_masm, IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ t0, /*tmp2*/ t1);
 898 
 899   // areturn
 900   __ andi(sp, x19_sender_sp, -16);  // done with stack
 901   __ ret();
 902 
 903   // generate a vanilla interpreter entry as the slow path
 904   __ bind(slow_path);
 905   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 906   return entry;
 907 }
 908 
 909 /**
 910  * Method entry for static native methods:
 911  *   int java.util.zip.CRC32.update(int crc, int b)
 912  */
 913 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
 914   // TODO: Unimplemented generate_CRC32_update_entry
 915   return nullptr;
 916 }
 917 
 918 /**
 919  * Method entry for static native methods:
 920  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
 921  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
 922  */
 923 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 924   // TODO: Unimplemented generate_CRC32_updateBytes_entry
 925   return nullptr;
 926 }
 927 
 928 /**
 929  * Method entry for intrinsic-candidate (non-native) methods:
 930  *   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
 931  *   int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
 932  * Unlike CRC32, CRC32C does not have any methods marked as native
 933  * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
 934  */
 935 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 936   // TODO: Unimplemented generate_CRC32C_updateBytes_entry
 937   return nullptr;
 938 }
 939 
 940 // Not supported
 941 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
 942 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
 943 
 944 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
 945   // See more discussion in stackOverflow.hpp.
 946 
 947   const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size());
 948   const int page_size = (int)os::vm_page_size();
 949   const int n_shadow_pages = shadow_zone_size / page_size;
 950 
 951 #ifdef ASSERT
 952   Label L_good_limit;
 953   __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
 954   __ bnez(t0, L_good_limit);
 955   __ stop("shadow zone safe limit is not initialized");
 956   __ bind(L_good_limit);
 957 
 958   Label L_good_watermark;
 959   __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 960   __ bnez(t0, L_good_watermark);
 961   __ stop("shadow zone growth watermark is not initialized");
 962   __ bind(L_good_watermark);
 963 #endif
 964 
 965   Label L_done;
 966 
 967   __ ld(t0, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 968   __ bgtu(sp, t0, L_done);
 969 
 970   for (int p = 1; p <= n_shadow_pages; p++) {
 971     __ bang_stack_with_offset(p * page_size);
 972   }
 973 
 974   // Record the new watermark, but only if the update is above the safe limit.
 975   // Otherwise, the next time around the check above would pass the safe limit.
 976   __ ld(t0, Address(xthread, JavaThread::shadow_zone_safe_limit()));
 977   __ bleu(sp, t0, L_done);
 978   __ sd(sp, Address(xthread, JavaThread::shadow_zone_growth_watermark()));
 979 
 980   __ bind(L_done);
 981 }
 982 
 983 // Interpreter stub for calling a native method. (asm interpreter)
 984 // This sets up a somewhat different looking stack for calling the
 985 // native method than the typical interpreter frame setup.
 986 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
 987   // determine code generation flags
 988   bool inc_counter = UseCompiler || CountCompiledCalls;
 989 
 990   // x11: Method*
 991   // x30: sender sp
 992 
 993   address entry_point = __ pc();
 994 
 995   const Address constMethod       (xmethod, Method::const_offset());
 996   const Address access_flags      (xmethod, Method::access_flags_offset());
 997   const Address size_of_parameters(x12, ConstMethod::
 998                                    size_of_parameters_offset());
 999 
1000   // get parameter size (always needed)
1001   __ ld(x12, constMethod);
1002   __ load_unsigned_short(x12, size_of_parameters);
1003 
1004   // Native calls don't need the stack size check since they have no
1005   // expression stack and the arguments are already on the stack and
1006   // we only add a handful of words to the stack.
1007 
1008   // xmethod: Method*
1009   // x12: size of parameters
1010   // x30: sender sp
1011 
1012   // for natives the size of locals is zero
1013 
1014   // compute beginning of parameters (xlocals)
1015   __ shadd(xlocals, x12, esp, xlocals, 3);
1016   __ subi(xlocals, xlocals, wordSize);
1017 
1018   // Pull SP back to minimum size: this avoids holes in the stack
1019   __ andi(sp, esp, -16);
1020 
1021   // initialize fixed part of activation frame
1022   generate_fixed_frame(true);
1023 
1024   // make sure method is native & not abstract
1025 #ifdef ASSERT
1026   __ load_unsigned_short(x10, access_flags);
1027   __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute non-native method as native", false);
1028   __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter");
1029 #endif
1030 
1031   // Since at this point in the method invocation the exception
1032   // handler would try to exit the monitor of synchronized methods
1033   // which hasn't been entered yet, we set the thread local variable
1034   // _do_not_unlock_if_synchronized to true. The remove_activation
1035   // will check this flag.
1036 
1037   const Address do_not_unlock_if_synchronized(xthread,
1038                                               in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1039   __ mv(t1, true);
1040   __ sb(t1, do_not_unlock_if_synchronized);
1041 
1042   // increment invocation count & check for overflow
1043   Label invocation_counter_overflow;
1044   if (inc_counter) {
1045     generate_counter_incr(&invocation_counter_overflow);
1046   }
1047 
1048   Label continue_after_compile;
1049   __ bind(continue_after_compile);
1050 
1051   bang_stack_shadow_pages(true);
1052 
1053   // reset the _do_not_unlock_if_synchronized flag
1054   __ sb(zr, do_not_unlock_if_synchronized);
1055 
1056   // check for synchronized methods
1057   // Must happen AFTER invocation_counter check and stack overflow check,
1058   // so method is not locked if overflows.
1059   if (synchronized) {
1060     lock_method();
1061   } else {
1062     // no synchronization necessary
1063 #ifdef ASSERT
1064     __ load_unsigned_short(x10, access_flags);
1065     __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization");
1066 #endif
1067   }
1068 
1069   // start execution
1070 #ifdef ASSERT
1071   __ verify_frame_setup();
1072 #endif
1073 
1074   // jvmti support
1075   __ notify_method_entry();
1076 
1077   // work registers
1078   const Register t = x18;
1079   const Register result_handler = x19;
1080 
1081   // allocate space for parameters
1082   __ ld(t, Address(xmethod, Method::const_offset()));
1083   __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
1084 
1085   __ slli(t, t, Interpreter::logStackElementSize);
1086   __ sub(x30, esp, t);
1087   __ andi(sp, x30, -16);
1088   __ mv(esp, x30);
1089 
1090   // get signature handler
1091   {
1092     Label L;
1093     __ ld(t, Address(xmethod, Method::signature_handler_offset()));
1094     __ bnez(t, L);
1095     __ call_VM(noreg,
1096                CAST_FROM_FN_PTR(address,
1097                                 InterpreterRuntime::prepare_native_call),
1098                xmethod);
1099     __ ld(t, Address(xmethod, Method::signature_handler_offset()));
1100     __ bind(L);
1101   }
1102 
1103   // call signature handler
1104   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == xlocals,
1105          "adjust this code");
1106   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1107          "adjust this code");
1108   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t0,
1109          "adjust this code");
1110 
1111   // The generated handlers do not touch xmethod (the method).
1112   // However, large signatures cannot be cached and are generated
1113   // each time here.  The slow-path generator can do a GC on return,
1114   // so we must reload it after the call.
1115   __ jalr(t);
1116   __ get_method(xmethod);        // slow path can do a GC, reload xmethod
1117 
1118 
1119   // result handler is in x10
1120   // set result handler
1121   __ mv(result_handler, x10);
1122   // Save it in the frame in case of preemption; we cannot rely on callee saved registers.
1123   __ sd(x10, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
1124 
1125   // pass mirror handle if static call
1126   {
1127     Label L;
1128     __ load_unsigned_short(t, Address(xmethod, Method::access_flags_offset()));
1129     __ test_bit(t0, t, exact_log2(JVM_ACC_STATIC));
1130     __ beqz(t0, L);
1131     // get mirror
1132     __ load_mirror(t, xmethod, x28, t1);
1133     // copy mirror into activation frame
1134     __ sd(t, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
1135     // pass handle to mirror
1136     __ addi(c_rarg1, fp, frame::interpreter_frame_oop_temp_offset * wordSize);
1137     __ bind(L);
1138   }
1139 
1140   // get native function entry point in x28
1141   {
1142     Label L;
1143     __ ld(x28, Address(xmethod, Method::native_function_offset()));
1144     ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1145     __ la(t, unsatisfied);
1146     __ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8
1147 
1148     __ bne(x28, t1, L);
1149     __ call_VM(noreg,
1150                CAST_FROM_FN_PTR(address,
1151                                 InterpreterRuntime::prepare_native_call),
1152                xmethod);
1153     __ get_method(xmethod);
1154     __ ld(x28, Address(xmethod, Method::native_function_offset()));
1155     __ bind(L);
1156   }
1157 
1158   // pass JNIEnv
1159   __ add(c_rarg0, xthread, in_bytes(JavaThread::jni_environment_offset()));
1160 
1161   // It is enough that the pc() points into the right code
1162   // segment. It does not have to be the correct return pc.
1163   // For convenience we use the pc we want to resume to in
1164   // case of preemption on Object.wait.
1165   Label native_return;
1166   __ set_last_Java_frame(esp, fp, native_return, x30);
1167 
1168   // change thread state
1169 #ifdef ASSERT
1170   {
1171     Label L;
1172     __ lwu(t, Address(xthread, JavaThread::thread_state_offset()));
1173     __ mv(t0, (u1)_thread_in_Java);
1174     __ beq(t, t0, L);
1175     __ stop("Wrong thread state in native stub");
1176     __ bind(L);
1177   }
1178 #endif
1179 
1180   // Change state to native
1181   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1182   __ mv(t0, _thread_in_native);
1183   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1184   __ sw(t0, Address(t1));
1185 
1186   __ push_cont_fastpath();
1187 
1188   // Call the native method.
1189   __ jalr(x28);
1190 
1191   __ pop_cont_fastpath();
1192 
1193   __ get_method(xmethod);
1194   // result potentially in x10 or f10
1195 
1196   // Restore cpu control state after JNI call
1197   __ restore_cpu_control_state_after_jni(t0);
1198 
1199   // make room for the pushes we're about to do
1200   __ subi(t0, esp, 4 * wordSize);
1201   __ andi(sp, t0, -16);
1202 
1203   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1204   // in order to extract the result of a method call. If the order of these
1205   // pushes change or anything else is added to the stack then the code in
1206   // interpreter_frame_result must also change.
1207   __ push(dtos);
1208   __ push(ltos);
1209 
1210   // change thread state
1211   // Force all preceding writes to be observed prior to thread state change
1212   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1213 
1214   __ mv(t0, _thread_in_native_trans);
1215   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1216 
1217   // Force this write out before the read below
1218   if (!UseSystemMemoryBarrier) {
1219     __ membar(MacroAssembler::AnyAny);
1220   }
1221 
1222   // check for safepoint operation in progress and/or pending suspend requests
1223   {
1224     Label L, Continue;
1225 
1226     // We need an acquire here to ensure that any subsequent load of the
1227     // global SafepointSynchronize::_state flag is ordered after this load
1228     // of the thread-local polling word. We don't want this poll to
1229     // return false (i.e. not safepointing) and a later poll of the global
1230     // SafepointSynchronize::_state spuriously to return true.
1231     //
1232     // This is to avoid a race when we're in a native->Java transition
1233     // racing the code which wakes up from a safepoint.
1234     __ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1235     __ lwu(t1, Address(xthread, JavaThread::suspend_flags_offset()));
1236     __ beqz(t1, Continue);
1237     __ bind(L);
1238 
1239     // Don't use call_VM as it will see a possible pending exception
1240     // and forward it and never return here preventing us from
1241     // clearing _last_native_pc down below. So we do a runtime call by
1242     // hand.
1243     //
1244     __ mv(c_rarg0, xthread);
1245     __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1246     __ get_method(xmethod);
1247     __ reinit_heapbase();
1248     __ bind(Continue);
1249   }
1250 
1251   // change thread state
1252   // Force all preceding writes to be observed prior to thread state change
1253   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1254 
1255   __ mv(t0, _thread_in_Java);
1256   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1257 
1258   if (LockingMode != LM_LEGACY) {
1259     // Check preemption for Object.wait()
1260     Label not_preempted;
1261     __ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1262     __ beqz(t1, not_preempted);
1263     __ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1264     __ jr(t1);
1265     __ bind(native_return);
1266     __ restore_after_resume(true /* is_native */);
1267     // reload result_handler
1268     __ ld(result_handler, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
1269     __ bind(not_preempted);
1270   } else {
1271     // any pc will do so just use this one for LM_LEGACY to keep code together.
1272     __ bind(native_return);
1273   }
1274 
1275   // reset_last_Java_frame
1276   __ reset_last_Java_frame(true);
1277 
1278   if (CheckJNICalls) {
1279     // clear_pending_jni_exception_check
1280     __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset()));
1281   }
1282 
1283   // reset handle block
1284   __ ld(t, Address(xthread, JavaThread::active_handles_offset()));
1285   __ sd(zr, Address(t, JNIHandleBlock::top_offset()));
1286 
1287   // If result is an oop unbox and store it in frame where gc will see it
1288   // and result handler will pick it up
1289 
1290   {
1291     Label no_oop;
1292     __ la(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1293     __ bne(t, result_handler, no_oop);
1294     // Unbox oop result, e.g. JNIHandles::resolve result.
1295     __ pop(ltos);
1296     __ resolve_jobject(x10, t, t1);
1297     __ sd(x10, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
1298     // keep stack depth as expected by pushing oop which will eventually be discarded
1299     __ push(ltos);
1300     __ bind(no_oop);
1301   }
1302 
1303   {
1304     Label no_reguard;
1305     __ lwu(t0, Address(xthread, in_bytes(JavaThread::stack_guard_state_offset())));
1306     __ mv(t1, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
1307     __ bne(t0, t1, no_reguard);
1308 
1309     __ push_call_clobbered_registers();
1310     __ mv(c_rarg0, xthread);
1311     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1312     __ pop_call_clobbered_registers();
1313     __ bind(no_reguard);
1314   }
1315 
1316   // The method register is junk from after the thread_in_native transition
1317   // until here.  Also can't call_VM until the bcp has been
1318   // restored.  Need bcp for throwing exception below so get it now.
1319   __ get_method(xmethod);
1320 
1321   // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
1322   // xbcp == code_base()
1323   __ ld(xbcp, Address(xmethod, Method::const_offset()));   // get ConstMethod*
1324   __ add(xbcp, xbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
1325   // handle exceptions (exception handling will handle unlocking!)
1326   {
1327     Label L;
1328     __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
1329     __ beqz(t0, L);
1330     // Note: At some point we may want to unify this with the code
1331     // used in call_VM_base(); i.e., we should use the
1332     // StubRoutines::forward_exception code. For now this doesn't work
1333     // here because the sp is not correctly set at this point.
1334     __ MacroAssembler::call_VM(noreg,
1335                                CAST_FROM_FN_PTR(address,
1336                                InterpreterRuntime::throw_pending_exception));
1337     __ should_not_reach_here();
1338     __ bind(L);
1339   }
1340 
1341   // do unlocking if necessary
1342   {
1343     Label L;
1344     __ load_unsigned_short(t, Address(xmethod, Method::access_flags_offset()));
1345     __ test_bit(t0, t, exact_log2(JVM_ACC_SYNCHRONIZED));
1346     __ beqz(t0, L);
1347     // the code below should be shared with interpreter macro
1348     // assembler implementation
1349     {
1350       Label unlock;
1351       // BasicObjectLock will be first in list, since this is a
1352       // synchronized method. However, need to check that the object
1353       // has not been unlocked by an explicit monitorexit bytecode.
1354 
1355       // monitor expect in c_rarg1 for slow unlock path
1356       __ la(c_rarg1, Address(fp,   // address of first monitor
1357                              (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1358                                         wordSize - sizeof(BasicObjectLock))));
1359 
1360       __ ld(t, Address(c_rarg1, BasicObjectLock::obj_offset()));
1361       __ bnez(t, unlock);
1362 
1363       // Entry already unlocked, need to throw exception
1364       __ MacroAssembler::call_VM(noreg,
1365                                  CAST_FROM_FN_PTR(address,
1366                                                   InterpreterRuntime::throw_illegal_monitor_state_exception));
1367       __ should_not_reach_here();
1368 
1369       __ bind(unlock);
1370       __ unlock_object(c_rarg1);
1371     }
1372     __ bind(L);
1373   }
1374 
1375   // jvmti support
1376   // Note: This must happen _after_ handling/throwing any exceptions since
1377   //       the exception handler code notifies the runtime of method exits
1378   //       too. If this happens before, method entry/exit notifications are
1379   //       not properly paired (was bug - gri 11/22/99).
1380   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1381 
1382   __ pop(ltos);
1383   __ pop(dtos);
1384 
1385   __ jalr(result_handler);
1386 
1387   // remove activation
1388   __ ld(esp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1389   // remove frame anchor
1390   __ leave();
1391 
1392   // restore sender sp
1393   __ mv(sp, esp);
1394 
1395   __ ret();
1396 
1397   if (inc_counter) {
1398     // Handle overflow of counter and compile method
1399     __ bind(invocation_counter_overflow);
1400     generate_counter_overflow(continue_after_compile);
1401   }
1402 
1403   return entry_point;
1404 }
1405 
1406 //
1407 // Generic interpreted method entry to (asm) interpreter
1408 //
1409 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1410 
1411   // determine code generation flags
1412   const bool inc_counter  = UseCompiler || CountCompiledCalls;
1413 
1414   // t0: sender sp
1415   address entry_point = __ pc();
1416 
1417   const Address constMethod(xmethod, Method::const_offset());
1418   const Address access_flags(xmethod, Method::access_flags_offset());
1419   const Address size_of_parameters(x13,
1420                                    ConstMethod::size_of_parameters_offset());
1421   const Address size_of_locals(x13, ConstMethod::size_of_locals_offset());
1422 
1423   // get parameter size (always needed)
1424   // need to load the const method first
1425   __ ld(x13, constMethod);
1426   __ load_unsigned_short(x12, size_of_parameters);
1427 
1428   // x12: size of parameters
1429 
1430   __ load_unsigned_short(x13, size_of_locals); // get size of locals in words
1431   __ sub(x13, x13, x12); // x13 = no. of additional locals
1432 
1433   // see if we've got enough room on the stack for locals plus overhead.
1434   generate_stack_overflow_check();
1435 
1436   // compute beginning of parameters (xlocals)
1437   __ shadd(xlocals, x12, esp, t1, 3);
1438   __ subi(xlocals, xlocals, wordSize);
1439 
1440   // Make room for additional locals
1441   __ slli(t1, x13, 3);
1442   __ sub(t0, esp, t1);
1443 
1444   // Padding between locals and fixed part of activation frame to ensure
1445   // SP is always 16-byte aligned.
1446   __ andi(sp, t0, -16);
1447 
1448   // x13 - # of additional locals
1449   // allocate space for locals
1450   // explicitly initialize locals
1451   {
1452     Label exit, loop;
1453     __ blez(x13, exit); // do nothing if x13 <= 0
1454     __ bind(loop);
1455     __ sd(zr, Address(t0));
1456     __ addi(t0, t0, wordSize);
1457     __ subi(x13, x13, 1); // until everything initialized
1458     __ bnez(x13, loop);
1459     __ bind(exit);
1460   }
1461 
1462   // And the base dispatch table
1463   __ get_dispatch();
1464 
1465   // initialize fixed part of activation frame
1466   generate_fixed_frame(false);
1467 
1468   // make sure method is not native & not abstract
1469 #ifdef ASSERT
1470   __ load_unsigned_short(x10, access_flags);
1471   __ verify_access_flags(x10, JVM_ACC_NATIVE, "tried to execute native method as non-native");
1472   __ verify_access_flags(x10, JVM_ACC_ABSTRACT, "tried to execute abstract method in interpreter");
1473 #endif
1474 
1475   // Since at this point in the method invocation the exception
1476   // handler would try to exit the monitor of synchronized methods
1477   // which hasn't been entered yet, we set the thread local variable
1478   // _do_not_unlock_if_synchronized to true. The remove_activation
1479   // will check this flag.
1480 
1481   const Address do_not_unlock_if_synchronized(xthread,
1482                                               in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1483   __ mv(t1, true);
1484   __ sb(t1, do_not_unlock_if_synchronized);
1485 
1486   Label no_mdp;
1487   const Register mdp = x13;
1488   __ ld(mdp, Address(xmethod, Method::method_data_offset()));
1489   __ beqz(mdp, no_mdp);
1490   __ add(mdp, mdp, in_bytes(MethodData::data_offset()));
1491   __ profile_parameters_type(mdp, x11, x12, x14); // use x11, x12, x14 as tmp registers
1492   __ bind(no_mdp);
1493 
1494   // increment invocation count & check for overflow
1495   Label invocation_counter_overflow;
1496   if (inc_counter) {
1497     generate_counter_incr(&invocation_counter_overflow);
1498   }
1499 
1500   Label continue_after_compile;
1501   __ bind(continue_after_compile);
1502 
1503   bang_stack_shadow_pages(false);
1504 
1505   // reset the _do_not_unlock_if_synchronized flag
1506   __ sb(zr, do_not_unlock_if_synchronized);
1507 
1508   // check for synchronized methods
1509   // Must happen AFTER invocation_counter check and stack overflow check,
1510   // so method is not locked if overflows.
1511   if (synchronized) {
1512     // Allocate monitor and lock method
1513     lock_method();
1514   } else {
1515     // no synchronization necessary
1516 #ifdef ASSERT
1517     __ load_unsigned_short(x10, access_flags);
1518     __ verify_access_flags(x10, JVM_ACC_SYNCHRONIZED, "method needs synchronization");
1519 #endif
1520   }
1521 
1522   // start execution
1523 #ifdef ASSERT
1524   __ verify_frame_setup();
1525 #endif
1526 
1527   // jvmti support
1528   __ notify_method_entry();
1529 
1530   __ dispatch_next(vtos);
1531 
1532   // invocation counter overflow
1533   if (inc_counter) {
1534     // Handle overflow of counter and compile method
1535     __ bind(invocation_counter_overflow);
1536     generate_counter_overflow(continue_after_compile);
1537   }
1538 
1539   return entry_point;
1540 }
1541 
1542 // Method entry for java.lang.Thread.currentThread
1543 address TemplateInterpreterGenerator::generate_currentThread() {
1544   address entry_point = __ pc();
1545 
1546   __ ld(x10, Address(xthread, JavaThread::vthread_offset()));
1547   __ resolve_oop_handle(x10, t0, t1);
1548   __ ret();
1549 
1550   return entry_point;
1551 }
1552 
1553 //-----------------------------------------------------------------------------
1554 // Exceptions
1555 
1556 void TemplateInterpreterGenerator::generate_throw_exception() {
1557   // Entry point in previous activation (i.e., if the caller was
1558   // interpreted)
1559   Interpreter::_rethrow_exception_entry = __ pc();
1560   // Restore sp to interpreter_frame_last_sp even though we are going
1561   // to empty the expression stack for the exception processing.
1562   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1563   // x10: exception
1564   // x13: return address/pc that threw exception
1565   __ restore_bcp();    // xbcp points to call/send
1566   __ restore_locals();
1567   __ restore_constant_pool_cache();
1568   __ reinit_heapbase();  // restore xheapbase as heapbase.
1569   __ get_dispatch();
1570 
1571   // Entry point for exceptions thrown within interpreter code
1572   Interpreter::_throw_exception_entry = __ pc();
1573   // If we came here via a NullPointerException on the receiver of a
1574   // method, xthread may be corrupt.
1575   __ get_method(xmethod);
1576   // expression stack is undefined here
1577   // x10: exception
1578   // xbcp: exception bcp
1579   __ verify_oop(x10);
1580   __ mv(c_rarg1, x10);
1581 
1582   // expression stack must be empty before entering the VM in case of
1583   // an exception
1584   __ empty_expression_stack();
1585   // find exception handler address and preserve exception oop
1586   __ call_VM(x13,
1587              CAST_FROM_FN_PTR(address,
1588                           InterpreterRuntime::exception_handler_for_exception),
1589              c_rarg1);
1590 
1591   // Restore machine SP
1592   __ restore_sp_after_call();
1593 
1594   // x10: exception handler entry point
1595   // x13: preserved exception oop
1596   // xbcp: bcp for exception handler
1597   __ push_ptr(x13); // push exception which is now the only value on the stack
1598   __ jr(x10); // jump to exception handler (may be _remove_activation_entry!)
1599 
1600   // If the exception is not handled in the current frame the frame is
1601   // removed and the exception is rethrown (i.e. exception
1602   // continuation is _rethrow_exception).
1603   //
1604   // Note: At this point the bci is still the bxi for the instruction
1605   // which caused the exception and the expression stack is
1606   // empty. Thus, for any VM calls at this point, GC will find a legal
1607   // oop map (with empty expression stack).
1608 
1609   //
1610   // JVMTI PopFrame support
1611   //
1612 
1613   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1614   __ empty_expression_stack();
1615   // Set the popframe_processing bit in pending_popframe_condition
1616   // indicating that we are currently handling popframe, so that
1617   // call_VMs that may happen later do not trigger new popframe
1618   // handling cycles.
1619   __ lwu(x13, Address(xthread, JavaThread::popframe_condition_offset()));
1620   __ ori(x13, x13, JavaThread::popframe_processing_bit);
1621   __ sw(x13, Address(xthread, JavaThread::popframe_condition_offset()));
1622 
1623   {
1624     // Check to see whether we are returning to a deoptimized frame.
1625     // (The PopFrame call ensures that the caller of the popped frame is
1626     // either interpreted or compiled and deoptimizes it if compiled.)
1627     // In this case, we can't call dispatch_next() after the frame is
1628     // popped, but instead must save the incoming arguments and restore
1629     // them after deoptimization has occurred.
1630     //
1631     // Note that we don't compare the return PC against the
1632     // deoptimization blob's unpack entry because of the presence of
1633     // adapter frames in C2.
1634     Label caller_not_deoptimized;
1635     __ ld(c_rarg1, Address(fp, frame::return_addr_offset * wordSize));
1636     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), c_rarg1);
1637     __ bnez(x10, caller_not_deoptimized);
1638 
1639     // Compute size of arguments for saving when returning to
1640     // deoptimized caller
1641     __ get_method(x10);
1642     __ ld(x10, Address(x10, Method::const_offset()));
1643     __ load_unsigned_short(x10, Address(x10, in_bytes(ConstMethod::
1644                                                       size_of_parameters_offset())));
1645     __ slli(x10, x10, Interpreter::logStackElementSize);
1646     __ restore_locals();
1647     __ sub(xlocals, xlocals, x10);
1648     __ addi(xlocals, xlocals, wordSize);
1649     // Save these arguments
1650     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1651                                            Deoptimization::
1652                                            popframe_preserve_args),
1653                           xthread, x10, xlocals);
1654 
1655     __ remove_activation(vtos,
1656                          /* throw_monitor_exception */ false,
1657                          /* install_monitor_exception */ false,
1658                          /* notify_jvmdi */ false);
1659 
1660     // Inform deoptimization that it is responsible for restoring
1661     // these arguments
1662     __ mv(t0, JavaThread::popframe_force_deopt_reexecution_bit);
1663     __ sw(t0, Address(xthread, JavaThread::popframe_condition_offset()));
1664 
1665     // Continue in deoptimization handler
1666     __ ret();
1667 
1668     __ bind(caller_not_deoptimized);
1669   }
1670 
1671   __ remove_activation(vtos,
1672                        /* throw_monitor_exception */ false,
1673                        /* install_monitor_exception */ false,
1674                        /* notify_jvmdi */ false);
1675 
1676   // Restore the last_sp and null it out
1677   __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1678   __ shadd(esp, t0, fp,  t0,  LogBytesPerWord);
1679   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1680 
1681   __ restore_bcp();
1682   __ restore_locals();
1683   __ restore_constant_pool_cache();
1684   __ get_method(xmethod);
1685   __ get_dispatch();
1686 
1687   // The method data pointer was incremented already during
1688   // call profiling. We have to restore the mdp for the current bcp.
1689   if (ProfileInterpreter) {
1690     __ set_method_data_pointer_for_bcp();
1691   }
1692 
1693   // Clear the popframe condition flag
1694   __ sw(zr, Address(xthread, JavaThread::popframe_condition_offset()));
1695   assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
1696 
1697 #if INCLUDE_JVMTI
1698   {
1699     Label L_done;
1700 
1701     __ lbu(t0, Address(xbcp, 0));
1702     __ mv(t1, Bytecodes::_invokestatic);
1703     __ bne(t1, t0, L_done);
1704 
1705     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1706     // Detect such a case in the InterpreterRuntime function and return the member name argument,or null.
1707 
1708     __ ld(c_rarg0, Address(xlocals, 0));
1709     __ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp);
1710 
1711     __ beqz(x10, L_done);
1712 
1713     __ sd(x10, Address(esp, 0));
1714     __ bind(L_done);
1715   }
1716 #endif // INCLUDE_JVMTI
1717 
1718   // Restore machine SP
1719   __ restore_sp_after_call();
1720 
1721   __ dispatch_next(vtos);
1722   // end of PopFrame support
1723 
1724   Interpreter::_remove_activation_entry = __ pc();
1725 
1726   // preserve exception over this code sequence
1727   __ pop_ptr(x10);
1728   __ sd(x10, Address(xthread, JavaThread::vm_result_offset()));
1729   // remove the activation (without doing throws on illegalMonitorExceptions)
1730   __ remove_activation(vtos, false, true, false);
1731   // restore exception
1732   __ get_vm_result(x10, xthread);
1733 
1734   // In between activations - previous activation type unknown yet
1735   // compute continuation point - the continuation point expects the
1736   // following registers set up:
1737   //
1738   // x10: exception
1739   // ra: return address/pc that threw exception
1740   // sp: expression stack of caller
1741   // fp: fp of caller
1742   // FIXME: There's no point saving ra here because VM calls don't trash it
1743   __ subi(sp, sp, 2 * wordSize);
1744   __ sd(x10, Address(sp, 0));                   // save exception
1745   __ sd(ra, Address(sp, wordSize));             // save return address
1746   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1747                                          SharedRuntime::exception_handler_for_return_address),
1748                         xthread, ra);
1749   __ mv(x11, x10);                              // save exception handler
1750   __ ld(x10, Address(sp, 0));                   // restore exception
1751   __ ld(ra, Address(sp, wordSize));             // restore return address
1752   __ addi(sp, sp, 2 * wordSize);
1753   // We might be returning to a deopt handler that expects x13 to
1754   // contain the exception pc
1755   __ mv(x13, ra);
1756   // Note that an "issuing PC" is actually the next PC after the call
1757   __ jr(x11);                                   // jump to exception
1758                                                 // handler of caller
1759 }
1760 
1761 //
1762 // JVMTI ForceEarlyReturn support
1763 //
1764 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state)  {
1765   address entry = __ pc();
1766 
1767   __ restore_bcp();
1768   __ restore_locals();
1769   __ empty_expression_stack();
1770   __ load_earlyret_value(state);
1771 
1772   __ ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset()));
1773   Address cond_addr(t0, JvmtiThreadState::earlyret_state_offset());
1774 
1775   // Clear the earlyret state
1776   assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
1777   __ sd(zr, cond_addr);
1778 
1779   __ remove_activation(state,
1780                        false, /* throw_monitor_exception */
1781                        false, /* install_monitor_exception */
1782                        true); /* notify_jvmdi */
1783   __ ret();
1784 
1785   return entry;
1786 }
1787 // end of ForceEarlyReturn support
1788 
1789 //-----------------------------------------------------------------------------
1790 // Helper for vtos entry point generation
1791 
1792 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1793                                                          address& bep,
1794                                                          address& cep,
1795                                                          address& sep,
1796                                                          address& aep,
1797                                                          address& iep,
1798                                                          address& lep,
1799                                                          address& fep,
1800                                                          address& dep,
1801                                                          address& vep) {
1802   assert(t != nullptr && t->is_valid() && t->tos_in() == vtos, "illegal template");
1803   Label L;
1804   aep = __ pc();     // atos entry point
1805       __ push_ptr();
1806       __ j(L);
1807   fep = __ pc();     // ftos entry point
1808       __ push_f();
1809       __ j(L);
1810   dep = __ pc();     // dtos entry point
1811       __ push_d();
1812       __ j(L);
1813   lep = __ pc();     // ltos entry point
1814       __ push_l();
1815       __ j(L);
1816   bep = cep = sep = iep = __ pc();     // [bcsi]tos entry point
1817       __ push_i();
1818   vep = __ pc();     // vtos entry point
1819   __ bind(L);
1820   generate_and_dispatch(t);
1821 }
1822 
1823 //-----------------------------------------------------------------------------
1824 
1825 // Non-product code
1826 #ifndef PRODUCT
1827 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1828   address entry = __ pc();
1829 
1830   __ push_reg(ra);
1831   __ push(state);
1832   __ push_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1833   __ mv(c_rarg2, x10);  // Pass itos
1834   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3);
1835   __ pop_reg(RegSet::range(x10, x17) + RegSet::range(x5, x7) + RegSet::range(x28, x31), sp);
1836   __ pop(state);
1837   __ pop_reg(ra);
1838   __ ret();                                   // return from result handler
1839 
1840   return entry;
1841 }
1842 
1843 void TemplateInterpreterGenerator::count_bytecode() {
1844   __ mv(x7, (address) &BytecodeCounter::_counter_value);
1845   __ atomic_add(noreg, 1, x7);
1846 }
1847 
1848 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1849   __ mv(x7, (address) &BytecodeHistogram::_counters[t->bytecode()]);
1850   __ atomic_addw(noreg, 1, x7);
1851 }
1852 
1853 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1854   // Calculate new index for counter:
1855   //   _index = (_index >> log2_number_of_codes) |
1856   //            (bytecode << log2_number_of_codes);
1857   Register index_addr = t1;
1858   Register index = t0;
1859   __ mv(index_addr, (address) &BytecodePairHistogram::_index);
1860   __ lw(index, index_addr);
1861   __ mv(x7, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1862   __ srli(index, index, BytecodePairHistogram::log2_number_of_codes);
1863   __ orrw(index, x7, index);
1864   __ sw(index, index_addr);
1865   // Bump bucket contents:
1866   //   _counters[_index] ++;
1867   Register counter_addr = t1;
1868   __ mv(x7, (address) &BytecodePairHistogram::_counters);
1869   __ shadd(counter_addr, index, x7, counter_addr, LogBytesPerInt);
1870   __ atomic_addw(noreg, 1, counter_addr);
1871  }
1872 
1873 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1874   // Call a little run-time stub to avoid blow-up for each bytecode.
1875   // The run-time runtime saves the right registers, depending on
1876   // the tosca in-state for the given template.
1877 
1878   assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated");
1879   __ rt_call(Interpreter::trace_code(t->tos_in()));
1880   __ reinit_heapbase();
1881 }
1882 
1883 void TemplateInterpreterGenerator::stop_interpreter_at() {
1884   Label L;
1885   __ push_reg(t0);
1886   __ mv(t0, (address) &BytecodeCounter::_counter_value);
1887   __ ld(t0, Address(t0));
1888   __ mv(t1, StopInterpreterAt);
1889   __ bne(t0, t1, L);
1890   __ ebreak();
1891   __ bind(L);
1892   __ pop_reg(t0);
1893 }
1894 
1895 #endif // !PRODUCT