1 /*
   2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "compiler/disassembler.hpp"
  29 #include "compiler/compiler_globals.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interpreter/bytecodeHistogram.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "interpreter/interp_masm.hpp"
  35 #include "interpreter/templateInterpreterGenerator.hpp"
  36 #include "interpreter/templateTable.hpp"
  37 #include "interpreter/bytecodeTracer.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/arrayOop.hpp"
  40 #include "oops/method.hpp"
  41 #include "oops/methodCounters.hpp"
  42 #include "oops/methodData.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "oops/resolvedIndyEntry.hpp"
  45 #include "oops/resolvedMethodEntry.hpp"
  46 #include "prims/jvmtiExport.hpp"
  47 #include "prims/jvmtiThreadState.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/deoptimization.hpp"
  50 #include "runtime/frame.inline.hpp"
  51 #include "runtime/globals.hpp"
  52 #include "runtime/jniHandles.hpp"
  53 #include "runtime/sharedRuntime.hpp"
  54 #include "runtime/stubRoutines.hpp"
  55 #include "runtime/synchronizer.hpp"
  56 #include "runtime/timer.hpp"
  57 #include "runtime/vframeArray.hpp"
  58 #include "utilities/checkedCast.hpp"
  59 #include "utilities/debug.hpp"
  60 #include "utilities/powerOfTwo.hpp"
  61 #include <sys/types.h>
  62 
  63 // Size of interpreter code.  Increase if too small.  Interpreter will
  64 // fail with a guarantee ("not enough space for interpreter generation");
  65 // if too small.
  66 // Run with +PrintInterpreter to get the VM to print out the size.
  67 // Max size with JVMTI
  68 int TemplateInterpreter::InterpreterCodeSize = 200 * 1024;
  69 
  70 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  71 
  72 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  73   address entry = __ pc();
  74 
  75   __ andr(esp, esp, -16);
  76   __ mov(c_rarg3, esp);
  77   // rmethod
  78   // rlocals
  79   // c_rarg3: first stack arg - wordSize
  80 
  81   // adjust sp
  82   __ sub(sp, c_rarg3, 18 * wordSize);
  83   __ str(lr, Address(__ pre(sp, -2 * wordSize)));
  84   __ call_VM(noreg,
  85              CAST_FROM_FN_PTR(address,
  86                               InterpreterRuntime::slow_signature_handler),
  87              rmethod, rlocals, c_rarg3);
  88 
  89   // r0: result handler
  90 
  91   // Stack layout:
  92   // rsp: return address           <- sp
  93   //      1 garbage
  94   //      8 integer args (if static first is unused)
  95   //      1 float/double identifiers
  96   //      8 double args
  97   //        stack args              <- esp
  98   //        garbage
  99   //        expression stack bottom
 100   //        bcp (null)
 101   //        ...
 102 
 103   // Restore LR
 104   __ ldr(lr, Address(__ post(sp, 2 * wordSize)));
 105 
 106   // Do FP first so we can use c_rarg3 as temp
 107   __ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers
 108 
 109   for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
 110     const FloatRegister r = as_FloatRegister(i);
 111 
 112     Label d, done;
 113 
 114     __ tbnz(c_rarg3, i, d);
 115     __ ldrs(r, Address(sp, (10 + i) * wordSize));
 116     __ b(done);
 117     __ bind(d);
 118     __ ldrd(r, Address(sp, (10 + i) * wordSize));
 119     __ bind(done);
 120   }
 121 
 122   // c_rarg0 contains the result from the call of
 123   // InterpreterRuntime::slow_signature_handler so we don't touch it
 124   // here.  It will be loaded with the JNIEnv* later.
 125   __ ldr(c_rarg1, Address(sp, 1 * wordSize));
 126   for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) {
 127     Register rm = as_Register(i), rn = as_Register(i+1);
 128     __ ldp(rm, rn, Address(sp, i * wordSize));
 129   }
 130 
 131   __ add(sp, sp, 18 * wordSize);
 132   __ ret(lr);
 133 
 134   return entry;
 135 }
 136 
 137 
 138 //
 139 // Various method entries
 140 //
 141 
 142 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 143   // rmethod: Method*
 144   // r19_sender_sp: sender sp
 145   // esp: args
 146 
 147   // These don't need a safepoint check because they aren't virtually
 148   // callable. We won't enter these intrinsics from compiled code.
 149   // If in the future we added an intrinsic which was virtually callable
 150   // we'd have to worry about how to safepoint so that this code is used.
 151 
 152   // mathematical functions inlined by compiler
 153   // (interpreter must provide identical implementation
 154   // in order to avoid monotonicity bugs when switching
 155   // from interpreter to compiler in the middle of some
 156   // computation)
 157   //
 158   // stack:
 159   //        [ arg ] <-- esp
 160   //        [ arg ]
 161   // retaddr in lr
 162 
 163   address entry_point = nullptr;
 164   Register continuation = lr;
 165   switch (kind) {
 166   case Interpreter::java_lang_math_abs:
 167     entry_point = __ pc();
 168     __ ldrd(v0, Address(esp));
 169     __ fabsd(v0, v0);
 170     __ mov(sp, r19_sender_sp); // Restore caller's SP
 171     break;
 172   case Interpreter::java_lang_math_sqrt:
 173     entry_point = __ pc();
 174     __ ldrd(v0, Address(esp));
 175     __ fsqrtd(v0, v0);
 176     __ mov(sp, r19_sender_sp);
 177     break;
 178   case Interpreter::java_lang_math_sin :
 179   case Interpreter::java_lang_math_cos :
 180   case Interpreter::java_lang_math_tan :
 181   case Interpreter::java_lang_math_log :
 182   case Interpreter::java_lang_math_log10 :
 183   case Interpreter::java_lang_math_exp :
 184     entry_point = __ pc();
 185     __ ldrd(v0, Address(esp));
 186     __ mov(sp, r19_sender_sp);
 187     __ mov(r23, lr);
 188     continuation = r23;  // The first free callee-saved register
 189     generate_transcendental_entry(kind, 1);
 190     break;
 191   case Interpreter::java_lang_math_pow :
 192     entry_point = __ pc();
 193     __ mov(r23, lr);
 194     continuation = r23;
 195     __ ldrd(v0, Address(esp, 2 * Interpreter::stackElementSize));
 196     __ ldrd(v1, Address(esp));
 197     __ mov(sp, r19_sender_sp);
 198     generate_transcendental_entry(kind, 2);
 199     break;
 200   case Interpreter::java_lang_math_fmaD :
 201     if (UseFMA) {
 202       entry_point = __ pc();
 203       __ ldrd(v0, Address(esp, 4 * Interpreter::stackElementSize));
 204       __ ldrd(v1, Address(esp, 2 * Interpreter::stackElementSize));
 205       __ ldrd(v2, Address(esp));
 206       __ fmaddd(v0, v0, v1, v2);
 207       __ mov(sp, r19_sender_sp); // Restore caller's SP
 208     }
 209     break;
 210   case Interpreter::java_lang_math_fmaF :
 211     if (UseFMA) {
 212       entry_point = __ pc();
 213       __ ldrs(v0, Address(esp, 2 * Interpreter::stackElementSize));
 214       __ ldrs(v1, Address(esp, Interpreter::stackElementSize));
 215       __ ldrs(v2, Address(esp));
 216       __ fmadds(v0, v0, v1, v2);
 217       __ mov(sp, r19_sender_sp); // Restore caller's SP
 218     }
 219     break;
 220   default:
 221     ;
 222   }
 223   if (entry_point) {
 224     __ br(continuation);
 225   }
 226 
 227   return entry_point;
 228 }
 229 
 230   // double trigonometrics and transcendentals
 231   // static jdouble dsin(jdouble x);
 232   // static jdouble dcos(jdouble x);
 233   // static jdouble dtan(jdouble x);
 234   // static jdouble dlog(jdouble x);
 235   // static jdouble dlog10(jdouble x);
 236   // static jdouble dexp(jdouble x);
 237   // static jdouble dpow(jdouble x, jdouble y);
 238 
 239 void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) {
 240   address fn;
 241   switch (kind) {
 242   case Interpreter::java_lang_math_sin :
 243     if (StubRoutines::dsin() == nullptr) {
 244       fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 245     } else {
 246       fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin());
 247     }
 248     break;
 249   case Interpreter::java_lang_math_cos :
 250     if (StubRoutines::dcos() == nullptr) {
 251       fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 252     } else {
 253       fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos());
 254     }
 255     break;
 256   case Interpreter::java_lang_math_tan :
 257     if (StubRoutines::dtan() == nullptr) {
 258       fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 259     } else {
 260       fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan());
 261     }
 262     break;
 263   case Interpreter::java_lang_math_log :
 264     if (StubRoutines::dlog() == nullptr) {
 265       fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 266     } else {
 267       fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog());
 268     }
 269     break;
 270   case Interpreter::java_lang_math_log10 :
 271     if (StubRoutines::dlog10() == nullptr) {
 272       fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 273     } else {
 274       fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10());
 275     }
 276     break;
 277   case Interpreter::java_lang_math_exp :
 278     if (StubRoutines::dexp() == nullptr) {
 279       fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 280     } else {
 281       fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp());
 282     }
 283     break;
 284   case Interpreter::java_lang_math_pow :
 285     if (StubRoutines::dpow() == nullptr) {
 286       fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 287     } else {
 288       fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow());
 289     }
 290     break;
 291   default:
 292     ShouldNotReachHere();
 293     fn = nullptr;  // unreachable
 294   }
 295   __ mov(rscratch1, fn);
 296   __ blr(rscratch1);
 297 }
 298 
 299 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() {
 300   assert(VM_Version::supports_float16(), "this intrinsic is not supported");
 301   // r19_sender_sp: sender sp
 302   // stack:
 303   //        [ arg ] <-- esp
 304   //        [ arg ]
 305   // retaddr in lr
 306   // result in v0
 307 
 308   address entry_point = __ pc();
 309   __ ldrw(c_rarg0, Address(esp));
 310   __ flt16_to_flt(v0, c_rarg0, v1);
 311   __ mov(sp, r19_sender_sp); // Restore caller's SP
 312   __ br(lr);
 313   return entry_point;
 314 }
 315 
 316 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() {
 317   assert(VM_Version::supports_float16(), "this intrinsic is not supported");
 318   // r19_sender_sp: sender sp
 319   // stack:
 320   //        [ arg ] <-- esp
 321   //        [ arg ]
 322   // retaddr in lr
 323   // result in c_rarg0
 324 
 325   address entry_point = __ pc();
 326   __ ldrs(v0, Address(esp));
 327   __ flt_to_flt16(c_rarg0, v0, v1);
 328   __ mov(sp, r19_sender_sp); // Restore caller's SP
 329   __ br(lr);
 330   return entry_point;
 331 }
 332 
 333 // Abstract method entry
 334 // Attempt to execute abstract method. Throw exception
 335 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 336   // rmethod: Method*
 337   // r19_sender_sp: sender SP
 338 
 339   address entry_point = __ pc();
 340 
 341   // abstract method entry
 342 
 343   //  pop return address, reset last_sp to null
 344   __ empty_expression_stack();
 345   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
 346   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
 347 
 348   // throw exception
 349   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
 350                                      InterpreterRuntime::throw_AbstractMethodErrorWithMethod),
 351                                      rmethod);
 352   // the call_VM checks for exception, so we should never return here.
 353   __ should_not_reach_here();
 354 
 355   return entry_point;
 356 }
 357 
 358 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 359   address entry = __ pc();
 360 
 361 #ifdef ASSERT
 362   {
 363     Label L;
 364     __ ldr(rscratch1, Address(rfp,
 365                        frame::interpreter_frame_monitor_block_top_offset *
 366                        wordSize));
 367     __ lea(rscratch1, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
 368     __ mov(rscratch2, sp);
 369     __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
 370                            // grows negative)
 371     __ br(Assembler::HS, L); // check if frame is complete
 372     __ stop ("interpreter frame not set up");
 373     __ bind(L);
 374   }
 375 #endif // ASSERT
 376   // Restore bcp under the assumption that the current frame is still
 377   // interpreted
 378   __ restore_bcp();
 379 
 380   // expression stack must be empty before entering the VM if an
 381   // exception happened
 382   __ empty_expression_stack();
 383   // throw exception
 384   __ call_VM(noreg,
 385              CAST_FROM_FN_PTR(address,
 386                               InterpreterRuntime::throw_StackOverflowError));
 387   return entry;
 388 }
 389 
 390 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
 391   address entry = __ pc();
 392   // expression stack must be empty before entering the VM if an
 393   // exception happened
 394   __ empty_expression_stack();
 395   // setup parameters
 396 
 397   // ??? convention: expect aberrant index in register r1
 398   __ movw(c_rarg2, r1);
 399   // ??? convention: expect array in register r3
 400   __ mov(c_rarg1, r3);
 401   __ call_VM(noreg,
 402              CAST_FROM_FN_PTR(address,
 403                               InterpreterRuntime::
 404                               throw_ArrayIndexOutOfBoundsException),
 405              c_rarg1, c_rarg2);
 406   return entry;
 407 }
 408 
 409 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 410   address entry = __ pc();
 411 
 412   // object is at TOS
 413   __ pop(c_rarg1);
 414 
 415   // expression stack must be empty before entering the VM if an
 416   // exception happened
 417   __ empty_expression_stack();
 418 
 419   __ call_VM(noreg,
 420              CAST_FROM_FN_PTR(address,
 421                               InterpreterRuntime::
 422                               throw_ClassCastException),
 423              c_rarg1);
 424   return entry;
 425 }
 426 
 427 address TemplateInterpreterGenerator::generate_exception_handler_common(
 428         const char* name, const char* message, bool pass_oop) {
 429   assert(!pass_oop || message == nullptr, "either oop or message but not both");
 430   address entry = __ pc();
 431   if (pass_oop) {
 432     // object is at TOS
 433     __ pop(c_rarg2);
 434   }
 435   // expression stack must be empty before entering the VM if an
 436   // exception happened
 437   __ empty_expression_stack();
 438   // setup parameters
 439   __ lea(c_rarg1, Address((address)name));
 440   if (pass_oop) {
 441     __ call_VM(r0, CAST_FROM_FN_PTR(address,
 442                                     InterpreterRuntime::
 443                                     create_klass_exception),
 444                c_rarg1, c_rarg2);
 445   } else {
 446     // kind of lame ExternalAddress can't take null because
 447     // external_word_Relocation will assert.
 448     if (message != nullptr) {
 449       __ lea(c_rarg2, Address((address)message));
 450     } else {
 451       __ mov(c_rarg2, NULL_WORD);
 452     }
 453     __ call_VM(r0,
 454                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
 455                c_rarg1, c_rarg2);
 456   }
 457   // throw exception
 458   __ b(address(Interpreter::throw_exception_entry()));
 459   return entry;
 460 }
 461 
 462 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 463   address entry = __ pc();
 464 
 465   // Restore stack bottom in case i2c adjusted stack
 466   __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 467   __ lea(esp, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
 468   // and null it as marker that esp is now tos until next java call
 469   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 470   __ restore_bcp();
 471   __ restore_locals();
 472   __ restore_constant_pool_cache();
 473   __ get_method(rmethod);
 474 
 475   if (state == atos) {
 476     Register obj = r0;
 477     Register mdp = r1;
 478     Register tmp = r2;
 479     __ profile_return_type(mdp, obj, tmp);
 480   }
 481 
 482   const Register cache = r1;
 483   const Register index = r2;
 484 
 485   if (index_size == sizeof(u4)) {
 486     __ load_resolved_indy_entry(cache, index);
 487     __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
 488     __ add(esp, esp, cache, Assembler::LSL, 3);
 489   } else {
 490     // Pop N words from the stack
 491     assert(index_size == sizeof(u2), "Can only be u2");
 492     __ load_method_entry(cache, index);
 493     __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
 494     __ add(esp, esp, cache, Assembler::LSL, 3);
 495   }
 496 
 497   // Restore machine SP
 498   __ restore_sp_after_call();
 499 
 500   __ check_and_handle_popframe(rthread);
 501   __ check_and_handle_earlyret(rthread);
 502 
 503   __ get_dispatch();
 504   __ dispatch_next(state, step);
 505 
 506   return entry;
 507 }
 508 
 509 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
 510                                                                int step,
 511                                                                address continuation) {
 512   address entry = __ pc();
 513   __ restore_bcp();
 514   __ restore_locals();
 515   __ restore_constant_pool_cache();
 516   __ get_method(rmethod);
 517   __ get_dispatch();
 518 
 519   __ restore_sp_after_call();  // Restore SP to extended SP
 520 
 521   // Restore expression stack pointer
 522   __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 523   __ lea(esp, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
 524   // null last_sp until next java call
 525   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 526 
 527 #if INCLUDE_JVMCI
 528   // Check if we need to take lock at entry of synchronized method.  This can
 529   // only occur on method entry so emit it only for vtos with step 0.
 530   if (EnableJVMCI && state == vtos && step == 0) {
 531     Label L;
 532     __ ldrb(rscratch1, Address(rthread, JavaThread::pending_monitorenter_offset()));
 533     __ cbz(rscratch1, L);
 534     // Clear flag.
 535     __ strb(zr, Address(rthread, JavaThread::pending_monitorenter_offset()));
 536     // Take lock.
 537     lock_method();
 538     __ bind(L);
 539   } else {
 540 #ifdef ASSERT
 541     if (EnableJVMCI) {
 542       Label L;
 543       __ ldrb(rscratch1, Address(rthread, JavaThread::pending_monitorenter_offset()));
 544       __ cbz(rscratch1, L);
 545       __ stop("unexpected pending monitor in deopt entry");
 546       __ bind(L);
 547     }
 548 #endif
 549   }
 550 #endif
 551   // handle exceptions
 552   {
 553     Label L;
 554     __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 555     __ cbz(rscratch1, L);
 556     __ call_VM(noreg,
 557                CAST_FROM_FN_PTR(address,
 558                                 InterpreterRuntime::throw_pending_exception));
 559     __ should_not_reach_here();
 560     __ bind(L);
 561   }
 562 
 563   if (continuation == nullptr) {
 564     __ dispatch_next(state, step);
 565   } else {
 566     __ jump_to_entry(continuation);
 567   }
 568   return entry;
 569 }
 570 
 571 address TemplateInterpreterGenerator::generate_result_handler_for(
 572         BasicType type) {
 573     address entry = __ pc();
 574   switch (type) {
 575   case T_BOOLEAN: __ c2bool(r0);         break;
 576   case T_CHAR   : __ uxth(r0, r0);       break;
 577   case T_BYTE   : __ sxtb(r0, r0);        break;
 578   case T_SHORT  : __ sxth(r0, r0);        break;
 579   case T_INT    : __ uxtw(r0, r0);        break;  // FIXME: We almost certainly don't need this
 580   case T_LONG   : /* nothing to do */        break;
 581   case T_VOID   : /* nothing to do */        break;
 582   case T_FLOAT  : /* nothing to do */        break;
 583   case T_DOUBLE : /* nothing to do */        break;
 584   case T_OBJECT :
 585     // retrieve result from frame
 586     __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
 587     // and verify it
 588     __ verify_oop(r0);
 589     break;
 590   default       : ShouldNotReachHere();
 591   }
 592   __ ret(lr);                                  // return from result handler
 593   return entry;
 594 }
 595 
 596 address TemplateInterpreterGenerator::generate_safept_entry_for(
 597         TosState state,
 598         address runtime_entry) {
 599   address entry = __ pc();
 600   __ push(state);
 601   __ push_cont_fastpath(rthread);
 602   __ call_VM(noreg, runtime_entry);
 603   __ pop_cont_fastpath(rthread);
 604   __ membar(Assembler::AnyAny);
 605   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 606   return entry;
 607 }
 608 
 609 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
 610   if (!Continuations::enabled()) return nullptr;
 611   address start = __ pc();
 612 
 613   __ restore_bcp();
 614   __ restore_locals();
 615 
 616   // Restore constant pool cache
 617   __ ldr(rcpool, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 618 
 619   // Restore Java expression stack pointer
 620   __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 621   __ lea(esp, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
 622   // and null it as marker that esp is now tos until next java call
 623   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 624 
 625   // Restore machine SP
 626   __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
 627   __ lea(sp, Address(rfp, rscratch1, Address::lsl(LogBytesPerWord)));
 628 
 629   // Restore method
 630   __ ldr(rmethod, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 631 
 632   // Restore dispatch
 633   uint64_t offset;
 634   __ adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
 635   __ add(rdispatch, rdispatch, offset);
 636 
 637   __ ret(lr);
 638 
 639   return start;
 640 }
 641 
 642 
 643 // Helpers for commoning out cases in the various type of method entries.
 644 //
 645 
 646 
 647 // increment invocation count & check for overflow
 648 //
 649 // Note: checking for negative value instead of overflow
 650 //       so we have a 'sticky' overflow test
 651 //
 652 // rmethod: method
 653 //
 654 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 655   Label done;
 656   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
 657   int increment = InvocationCounter::count_increment;
 658   Label no_mdo;
 659   if (ProfileInterpreter) {
 660     // Are we profiling?
 661     __ ldr(r0, Address(rmethod, Method::method_data_offset()));
 662     __ cbz(r0, no_mdo);
 663     // Increment counter in the MDO
 664     const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
 665                                               in_bytes(InvocationCounter::counter_offset()));
 666     const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
 667     __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
 668     __ b(done);
 669   }
 670   __ bind(no_mdo);
 671   // Increment counter in MethodCounters
 672   const Address invocation_counter(rscratch2,
 673                 MethodCounters::invocation_counter_offset() +
 674                 InvocationCounter::counter_offset());
 675   __ get_method_counters(rmethod, rscratch2, done);
 676   const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
 677   __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
 678   __ bind(done);
 679 }
 680 
 681 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 682 
 683   // Asm interpreter on entry
 684   // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
 685   // Everything as it was on entry
 686 
 687   // InterpreterRuntime::frequency_counter_overflow takes two
 688   // arguments, the first (thread) is passed by call_VM, the second
 689   // indicates if the counter overflow occurs at a backwards branch
 690   // (null bcp).  We pass zero for it.  The call returns the address
 691   // of the verified entry point for the method or null if the
 692   // compilation did not complete (either went background or bailed
 693   // out).
 694   __ mov(c_rarg1, 0);
 695   __ call_VM(noreg,
 696              CAST_FROM_FN_PTR(address,
 697                               InterpreterRuntime::frequency_counter_overflow),
 698              c_rarg1);
 699 
 700   __ b(do_continue);
 701 }
 702 
 703 // See if we've got enough room on the stack for locals plus overhead
 704 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
 705 // without going through the signal handler, i.e., reserved and yellow zones
 706 // will not be made usable. The shadow zone must suffice to handle the
 707 // overflow.
 708 // The expression stack grows down incrementally, so the normal guard
 709 // page mechanism will work for that.
 710 //
 711 // NOTE: Since the additional locals are also always pushed (wasn't
 712 // obvious in generate_method_entry) so the guard should work for them
 713 // too.
 714 //
 715 // Args:
 716 //      r3: number of additional locals this frame needs (what we must check)
 717 //      rmethod: Method*
 718 //
 719 // Kills:
 720 //      r0
 721 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 722 
 723   // monitor entry size: see picture of stack set
 724   // (generate_method_entry) and frame_amd64.hpp
 725   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 726 
 727   // total overhead size: entry_size + (saved rbp through expr stack
 728   // bottom).  be sure to change this if you add/subtract anything
 729   // to/from the overhead area
 730   const int overhead_size =
 731     -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
 732 
 733   const size_t page_size = os::vm_page_size();
 734 
 735   Label after_frame_check;
 736 
 737   // see if the frame is greater than one page in size. If so,
 738   // then we need to verify there is enough stack space remaining
 739   // for the additional locals.
 740   //
 741   // Note that we use SUBS rather than CMP here because the immediate
 742   // field of this instruction may overflow.  SUBS can cope with this
 743   // because it is a macro that will expand to some number of MOV
 744   // instructions and a register operation.
 745   __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize);
 746   __ br(Assembler::LS, after_frame_check);
 747 
 748   // compute rsp as if this were going to be the last frame on
 749   // the stack before the red zone
 750 
 751   // locals + overhead, in bytes
 752   __ mov(r0, overhead_size);
 753   __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize);  // 2 slots per parameter.
 754 
 755   const Address stack_limit(rthread, JavaThread::stack_overflow_limit_offset());
 756   __ ldr(rscratch1, stack_limit);
 757 
 758 #ifdef ASSERT
 759   Label limit_okay;
 760   // Verify that thread stack limit is non-zero.
 761   __ cbnz(rscratch1, limit_okay);
 762   __ stop("stack overflow limit is zero");
 763   __ bind(limit_okay);
 764 #endif
 765 
 766   // Add stack limit to locals.
 767   __ add(r0, r0, rscratch1);
 768 
 769   // Check against the current stack bottom.
 770   __ cmp(sp, r0);
 771   __ br(Assembler::HI, after_frame_check);
 772 
 773   // Remove the incoming args, peeling the machine SP back to where it
 774   // was in the caller.  This is not strictly necessary, but unless we
 775   // do so the stack frame may have a garbage FP; this ensures a
 776   // correct call stack that we can always unwind.  The ANDR should be
 777   // unnecessary because the sender SP in r19 is always aligned, but
 778   // it doesn't hurt.
 779   __ andr(sp, r19_sender_sp, -16);
 780 
 781   // Note: the restored frame is not necessarily interpreted.
 782   // Use the shared runtime version of the StackOverflowError.
 783   assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
 784   __ far_jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
 785 
 786   // all done with frame size check
 787   __ bind(after_frame_check);
 788 }
 789 
 790 // Allocate monitor and lock method (asm interpreter)
 791 //
 792 // Args:
 793 //      rmethod: Method*
 794 //      rlocals: locals
 795 //
 796 // Kills:
 797 //      r0
 798 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
 799 //      rscratch1, rscratch2 (scratch regs)
 800 void TemplateInterpreterGenerator::lock_method() {
 801   // synchronize method
 802   const Address access_flags(rmethod, Method::access_flags_offset());
 803   const Address monitor_block_top(
 804         rfp,
 805         frame::interpreter_frame_monitor_block_top_offset * wordSize);
 806   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 807 
 808 #ifdef ASSERT
 809   {
 810     Label L;
 811     __ ldrh(r0, access_flags);
 812     __ tst(r0, JVM_ACC_SYNCHRONIZED);
 813     __ br(Assembler::NE, L);
 814     __ stop("method doesn't need synchronization");
 815     __ bind(L);
 816   }
 817 #endif // ASSERT
 818 
 819   // get synchronization object
 820   {
 821     Label done;
 822     __ ldrh(r0, access_flags);
 823     __ tst(r0, JVM_ACC_STATIC);
 824     // get receiver (assume this is frequent case)
 825     __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
 826     __ br(Assembler::EQ, done);
 827     __ load_mirror(r0, rmethod, r5, rscratch2);
 828 
 829 #ifdef ASSERT
 830     {
 831       Label L;
 832       __ cbnz(r0, L);
 833       __ stop("synchronization object is null");
 834       __ bind(L);
 835     }
 836 #endif // ASSERT
 837 
 838     __ bind(done);
 839   }
 840 
 841   // add space for monitor & lock
 842   __ check_extended_sp();
 843   __ sub(sp, sp, entry_size); // add space for a monitor entry
 844   __ sub(esp, esp, entry_size);
 845   __ sub(rscratch1, sp, rfp);
 846   __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
 847   __ str(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
 848   __ sub(rscratch1, esp, rfp);
 849   __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
 850   __ str(rscratch1, monitor_block_top);  // set new monitor block top
 851 
 852   // store object
 853   __ str(r0, Address(esp, BasicObjectLock::obj_offset()));
 854   __ mov(c_rarg1, esp); // object address
 855   __ lock_object(c_rarg1);
 856 }
 857 
 858 // Generate a fixed interpreter frame. This is identical setup for
 859 // interpreted methods and for native methods hence the shared code.
 860 //
 861 // Args:
 862 //      lr: return address
 863 //      rmethod: Method*
 864 //      rlocals: pointer to locals
 865 //      rcpool: cp cache
 866 //      stack_pointer: previous sp
 867 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 868   // initialize fixed part of activation frame
 869   if (native_call) {
 870     __ sub(esp, sp, 14 *  wordSize);
 871     __ mov(rbcp, zr);
 872     __ mov(rscratch1, frame::interpreter_frame_initial_sp_offset);
 873     __ stp(rscratch1, zr, Address(__ pre(sp, -14 * wordSize)));
 874     // add 2 zero-initialized slots for native calls
 875     __ stp(zr, zr, Address(sp, 12 * wordSize));
 876   } else {
 877     __ sub(esp, sp, 12 *  wordSize);
 878     __ ldr(rscratch1, Address(rmethod, Method::const_offset()));    // get ConstMethod
 879     __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase
 880     __ mov(rscratch1, frame::interpreter_frame_initial_sp_offset);
 881     __ stp(rscratch1, rbcp, Address(__ pre(sp, -12 * wordSize)));
 882   }
 883 
 884   if (ProfileInterpreter) {
 885     Label method_data_continue;
 886     __ ldr(rscratch1, Address(rmethod, Method::method_data_offset()));
 887     __ cbz(rscratch1, method_data_continue);
 888     __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset())));
 889     __ bind(method_data_continue);
 890     __ stp(rscratch1, rmethod, Address(sp, 6 * wordSize));  // save Method* and mdp (method data pointer)
 891   } else {
 892     __ stp(zr, rmethod, Address(sp, 6 * wordSize));         // save Method* (no mdp)
 893   }
 894 
 895   __ protect_return_address();
 896   __ stp(rfp, lr, Address(sp, 10 * wordSize));
 897   __ lea(rfp, Address(sp, 10 * wordSize));
 898 
 899   __ ldr(rcpool, Address(rmethod, Method::const_offset()));
 900   __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
 901   __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset()));
 902   __ sub(rscratch1, rlocals, rfp);
 903   __ lsr(rscratch1, rscratch1, Interpreter::logStackElementSize);   // rscratch1 = rlocals - fp();
 904   // Store relativized rlocals, see frame::interpreter_frame_locals().
 905   __ stp(rscratch1, rcpool, Address(sp, 2 * wordSize));
 906 
 907   // set sender sp
 908   // leave last_sp as null
 909   __ stp(zr, r19_sender_sp, Address(sp, 8 * wordSize));
 910 
 911   // Get mirror
 912   __ load_mirror(r10, rmethod, r5, rscratch2);
 913   if (! native_call) {
 914     __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 915     __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
 916     __ add(rscratch1, rscratch1, MAX2(3, Method::extra_stack_entries()));
 917     __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3);
 918     __ andr(rscratch1, rscratch1, -16);
 919     __ sub(rscratch2, rscratch1, rfp);
 920     __ asr(rscratch2, rscratch2, Interpreter::logStackElementSize);
 921     // Store extended SP and mirror
 922     __ stp(r10, rscratch2, Address(sp, 4 * wordSize));
 923     // Move SP out of the way
 924     __ mov(sp, rscratch1);
 925   } else {
 926     // Make sure there is room for the exception oop pushed in case method throws
 927     // an exception (see TemplateInterpreterGenerator::generate_throw_exception())
 928     __ sub(rscratch1, sp, 2 * wordSize);
 929     __ sub(rscratch2, rscratch1, rfp);
 930     __ asr(rscratch2, rscratch2, Interpreter::logStackElementSize);
 931     __ stp(r10, rscratch2, Address(sp, 4 * wordSize));
 932     __ mov(sp, rscratch1);
 933   }
 934 }
 935 
 936 // End of helpers
 937 
 938 // Various method entries
 939 //------------------------------------------------------------------------------------------------------------------------
 940 //
 941 //
 942 
 943 // Method entry for java.lang.ref.Reference.get.
 944 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 945   // Code: _aload_0, _getfield, _areturn
 946   // parameter size = 1
 947   //
 948   // The code that gets generated by this routine is split into 2 parts:
 949   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 950   //    2. The slow path - which is an expansion of the regular method entry.
 951   //
 952   // Notes:-
 953   // * In the G1 code we do not check whether we need to block for
 954   //   a safepoint. If G1 is enabled then we must execute the specialized
 955   //   code for Reference.get (except when the Reference object is null)
 956   //   so that we can log the value in the referent field with an SATB
 957   //   update buffer.
 958   //   If the code for the getfield template is modified so that the
 959   //   G1 pre-barrier code is executed when the current method is
 960   //   Reference.get() then going through the normal method entry
 961   //   will be fine.
 962   // * The G1 code can, however, check the receiver object (the instance
 963   //   of java.lang.Reference) and jump to the slow path if null. If the
 964   //   Reference object is null then we obviously cannot fetch the referent
 965   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 966   //   regular method entry code to generate the NPE.
 967   //
 968   // This code is based on generate_accessor_entry.
 969   //
 970   // rmethod: Method*
 971   // r19_sender_sp: senderSP must preserve for slow path, set SP to it on fast path
 972 
 973   // LR is live.  It must be saved around calls.
 974 
 975   address entry = __ pc();
 976 
 977   const int referent_offset = java_lang_ref_Reference::referent_offset();
 978 
 979   Label slow_path;
 980   const Register local_0 = c_rarg0;
 981   // Check if local 0 != null
 982   // If the receiver is null then it is OK to jump to the slow path.
 983   __ ldr(local_0, Address(esp, 0));
 984   __ cbz(local_0, slow_path);
 985 
 986   // Load the value of the referent field.
 987   const Address field_address(local_0, referent_offset);
 988   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
 989   bs->load_at(_masm, IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ rscratch1, /*tmp2*/ rscratch2);
 990 
 991   // areturn
 992   __ andr(sp, r19_sender_sp, -16);  // done with stack
 993   __ ret(lr);
 994 
 995   // generate a vanilla interpreter entry as the slow path
 996   __ bind(slow_path);
 997   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 998   return entry;
 999 
1000 }
1001 
1002 /**
1003  * Method entry for static native methods:
1004  *   int java.util.zip.CRC32.update(int crc, int b)
1005  */
1006 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
1007   assert(UseCRC32Intrinsics, "this intrinsic is not supported");
1008   address entry = __ pc();
1009 
1010   // rmethod: Method*
1011   // r19_sender_sp: senderSP must preserved for slow path
1012   // esp: args
1013 
1014   Label slow_path;
1015   // If we need a safepoint check, generate full interpreter entry.
1016   __ safepoint_poll(slow_path, false /* at_return */, false /* acquire */, false /* in_nmethod */);
1017 
1018   // We don't generate local frame and don't align stack because
1019   // we call stub code and there is no safepoint on this path.
1020 
1021   // Load parameters
1022   const Register crc = c_rarg0;  // crc
1023   const Register val = c_rarg1;  // source java byte value
1024   const Register tbl = c_rarg2;  // scratch
1025 
1026   // Arguments are reversed on java expression stack
1027   __ ldrw(val, Address(esp, 0));              // byte value
1028   __ ldrw(crc, Address(esp, wordSize));       // Initial CRC
1029 
1030   uint64_t offset;
1031   __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
1032   __ add(tbl, tbl, offset);
1033 
1034   __ mvnw(crc, crc); // ~crc
1035   __ update_byte_crc32(crc, val, tbl);
1036   __ mvnw(crc, crc); // ~crc
1037 
1038   // result in c_rarg0
1039 
1040   __ andr(sp, r19_sender_sp, -16);
1041   __ ret(lr);
1042 
1043   // generate a vanilla native entry as the slow path
1044   __ bind(slow_path);
1045   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
1046   return entry;
1047 }
1048 
1049 /**
1050  * Method entry for static native methods:
1051  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
1052  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
1053  */
1054 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
1055   assert(UseCRC32Intrinsics, "this intrinsic is not supported");
1056   address entry = __ pc();
1057 
1058   // rmethod,: Method*
1059   // r19_sender_sp: senderSP must preserved for slow path
1060 
1061   Label slow_path;
1062   // If we need a safepoint check, generate full interpreter entry.
1063   __ safepoint_poll(slow_path, false /* at_return */, false /* acquire */, false /* in_nmethod */);
1064 
1065   // We don't generate local frame and don't align stack because
1066   // we call stub code and there is no safepoint on this path.
1067 
1068   // Load parameters
1069   const Register crc = c_rarg0;  // crc
1070   const Register buf = c_rarg1;  // source java byte array address
1071   const Register len = c_rarg2;  // length
1072   const Register off = len;      // offset (never overlaps with 'len')
1073 
1074   // Arguments are reversed on java expression stack
1075   // Calculate address of start element
1076   if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
1077     __ ldr(buf, Address(esp, 2*wordSize)); // long buf
1078     __ ldrw(off, Address(esp, wordSize)); // offset
1079     __ add(buf, buf, off); // + offset
1080     __ ldrw(crc,   Address(esp, 4*wordSize)); // Initial CRC
1081   } else {
1082     __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array
1083     __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
1084     __ ldrw(off, Address(esp, wordSize)); // offset
1085     __ add(buf, buf, off); // + offset
1086     __ ldrw(crc,   Address(esp, 3*wordSize)); // Initial CRC
1087   }
1088   // Can now load 'len' since we're finished with 'off'
1089   __ ldrw(len, Address(esp, 0x0)); // Length
1090 
1091   __ andr(sp, r19_sender_sp, -16); // Restore the caller's SP
1092 
1093   // We are frameless so we can just jump to the stub.
1094   __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
1095 
1096   // generate a vanilla native entry as the slow path
1097   __ bind(slow_path);
1098   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
1099   return entry;
1100 }
1101 
1102 /**
1103  * Method entry for intrinsic-candidate (non-native) methods:
1104  *   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
1105  *   int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
1106  * Unlike CRC32, CRC32C does not have any methods marked as native
1107  * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
1108  */
1109 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
1110   assert(UseCRC32CIntrinsics, "this intrinsic is not supported");
1111   address entry = __ pc();
1112 
1113   // Prepare jump to stub using parameters from the stack
1114   const Register crc = c_rarg0; // initial crc
1115   const Register buf = c_rarg1; // source java byte array address
1116   const Register len = c_rarg2; // len argument to the kernel
1117 
1118   const Register end = len; // index of last element to process
1119   const Register off = crc; // offset
1120 
1121   __ ldrw(end, Address(esp)); // int end
1122   __ ldrw(off, Address(esp, wordSize)); // int offset
1123   __ sub(len, end, off);
1124   __ ldr(buf, Address(esp, 2*wordSize)); // byte[] buf | long buf
1125   __ add(buf, buf, off); // + offset
1126   if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
1127     __ ldrw(crc, Address(esp, 4*wordSize)); // long crc
1128   } else {
1129     __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
1130     __ ldrw(crc, Address(esp, 3*wordSize)); // long crc
1131   }
1132 
1133   __ andr(sp, r19_sender_sp, -16); // Restore the caller's SP
1134 
1135   // Jump to the stub.
1136   __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()));
1137 
1138   return entry;
1139 }
1140 
1141 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
1142   // See more discussion in stackOverflow.hpp.
1143 
1144   const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size());
1145   const int page_size = (int)os::vm_page_size();
1146   const int n_shadow_pages = shadow_zone_size / page_size;
1147 
1148 #ifdef ASSERT
1149   Label L_good_limit;
1150   __ ldr(rscratch1, Address(rthread, JavaThread::shadow_zone_safe_limit()));
1151   __ cbnz(rscratch1, L_good_limit);
1152   __ stop("shadow zone safe limit is not initialized");
1153   __ bind(L_good_limit);
1154 
1155   Label L_good_watermark;
1156   __ ldr(rscratch1, Address(rthread, JavaThread::shadow_zone_growth_watermark()));
1157   __ cbnz(rscratch1, L_good_watermark);
1158   __ stop("shadow zone growth watermark is not initialized");
1159   __ bind(L_good_watermark);
1160 #endif
1161 
1162   Label L_done;
1163 
1164   __ ldr(rscratch1, Address(rthread, JavaThread::shadow_zone_growth_watermark()));
1165   __ cmp(sp, rscratch1);
1166   __ br(Assembler::HI, L_done);
1167 
1168   for (int p = 1; p <= n_shadow_pages; p++) {
1169     __ sub(rscratch2, sp, p*page_size);
1170     __ str(zr, Address(rscratch2));
1171   }
1172 
1173   // Record the new watermark, but only if the update is above the safe limit.
1174   // Otherwise, the next time around the check above would pass the safe limit.
1175   __ ldr(rscratch1, Address(rthread, JavaThread::shadow_zone_safe_limit()));
1176   __ cmp(sp, rscratch1);
1177   __ br(Assembler::LS, L_done);
1178   __ mov(rscratch1, sp);
1179   __ str(rscratch1, Address(rthread, JavaThread::shadow_zone_growth_watermark()));
1180 
1181   __ bind(L_done);
1182 }
1183 
1184 // Interpreter stub for calling a native method. (asm interpreter)
1185 // This sets up a somewhat different looking stack for calling the
1186 // native method than the typical interpreter frame setup.
1187 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) {
1188   // determine code generation flags
1189   bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1190 
1191   // r1: Method*
1192   // rscratch1: sender sp
1193 
1194   address entry_point = __ pc();
1195 
1196   const Address constMethod       (rmethod, Method::const_offset());
1197   const Address access_flags      (rmethod, Method::access_flags_offset());
1198   const Address size_of_parameters(r2, ConstMethod::
1199                                        size_of_parameters_offset());
1200 
1201   // get parameter size (always needed)
1202   __ ldr(r2, constMethod);
1203   __ load_unsigned_short(r2, size_of_parameters);
1204 
1205   // Native calls don't need the stack size check since they have no
1206   // expression stack and the arguments are already on the stack and
1207   // we only add a handful of words to the stack.
1208 
1209   // rmethod: Method*
1210   // r2: size of parameters
1211   // rscratch1: sender sp
1212 
1213   // for natives the size of locals is zero
1214 
1215   // compute beginning of parameters (rlocals)
1216   __ add(rlocals, esp, r2, ext::uxtx, 3);
1217   __ add(rlocals, rlocals, -wordSize);
1218 
1219   // Pull SP back to minimum size: this avoids holes in the stack
1220   __ andr(sp, esp, -16);
1221 
1222   // initialize fixed part of activation frame
1223   generate_fixed_frame(true);
1224 
1225   // make sure method is native & not abstract
1226 #ifdef ASSERT
1227   __ ldrh(r0, access_flags);
1228   {
1229     Label L;
1230     __ tst(r0, JVM_ACC_NATIVE);
1231     __ br(Assembler::NE, L);
1232     __ stop("tried to execute non-native method as native");
1233     __ bind(L);
1234   }
1235   {
1236     Label L;
1237     __ tst(r0, JVM_ACC_ABSTRACT);
1238     __ br(Assembler::EQ, L);
1239     __ stop("tried to execute abstract method in interpreter");
1240     __ bind(L);
1241   }
1242 #endif
1243 
1244   // Since at this point in the method invocation the exception
1245   // handler would try to exit the monitor of synchronized methods
1246   // which hasn't been entered yet, we set the thread local variable
1247   // _do_not_unlock_if_synchronized to true. The remove_activation
1248   // will check this flag.
1249 
1250    const Address do_not_unlock_if_synchronized(rthread,
1251         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1252   __ mov(rscratch2, true);
1253   __ strb(rscratch2, do_not_unlock_if_synchronized);
1254 
1255   // increment invocation count & check for overflow
1256   Label invocation_counter_overflow;
1257   if (inc_counter) {
1258     generate_counter_incr(&invocation_counter_overflow);
1259   }
1260 
1261   Label continue_after_compile;
1262   __ bind(continue_after_compile);
1263 
1264   bang_stack_shadow_pages(true);
1265 
1266   // reset the _do_not_unlock_if_synchronized flag
1267   __ strb(zr, do_not_unlock_if_synchronized);
1268 
1269   // check for synchronized methods
1270   // Must happen AFTER invocation_counter check and stack overflow check,
1271   // so method is not locked if overflows.
1272   if (synchronized) {
1273     lock_method();
1274   } else {
1275     // no synchronization necessary
1276 #ifdef ASSERT
1277     {
1278       Label L;
1279       __ ldrh(r0, access_flags);
1280       __ tst(r0, JVM_ACC_SYNCHRONIZED);
1281       __ br(Assembler::EQ, L);
1282       __ stop("method needs synchronization");
1283       __ bind(L);
1284     }
1285 #endif
1286   }
1287 
1288   // start execution
1289 #ifdef ASSERT
1290   {
1291     Label L;
1292     const Address monitor_block_top(rfp,
1293                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
1294     __ ldr(rscratch1, monitor_block_top);
1295     __ lea(rscratch1, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
1296     __ cmp(esp, rscratch1);
1297     __ br(Assembler::EQ, L);
1298     __ stop("broken stack frame setup in interpreter 1");
1299     __ bind(L);
1300   }
1301 #endif
1302 
1303   // jvmti support
1304   __ notify_method_entry();
1305 
1306   if (runtime_upcalls) {
1307     __ generate_runtime_upcalls_on_method_entry();
1308   }
1309 
1310   // work registers
1311   const Register t = r17;
1312   const Register result_handler = r19;
1313 
1314   // allocate space for parameters
1315   __ ldr(t, Address(rmethod, Method::const_offset()));
1316   __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
1317 
1318   __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize);
1319   __ andr(sp, rscratch1, -16);
1320   __ mov(esp, rscratch1);
1321 
1322   // get signature handler
1323   {
1324     Label L;
1325     __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
1326     __ cbnz(t, L);
1327     __ call_VM(noreg,
1328                CAST_FROM_FN_PTR(address,
1329                                 InterpreterRuntime::prepare_native_call),
1330                rmethod);
1331     __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
1332     __ bind(L);
1333   }
1334 
1335   // call signature handler
1336   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
1337          "adjust this code");
1338   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1339          "adjust this code");
1340   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
1341           "adjust this code");
1342 
1343   // The generated handlers do not touch rmethod (the method).
1344   // However, large signatures cannot be cached and are generated
1345   // each time here.  The slow-path generator can do a GC on return,
1346   // so we must reload it after the call.
1347   __ blr(t);
1348   __ get_method(rmethod);        // slow path can do a GC, reload rmethod
1349 
1350 
1351   // result handler is in r0
1352   // set result handler
1353   __ mov(result_handler, r0);
1354   // Save it in the frame in case of preemption; we cannot rely on callee saved registers.
1355   __ str(r0, Address(rfp, frame::interpreter_frame_result_handler_offset * wordSize));
1356 
1357   // pass mirror handle if static call
1358   {
1359     Label L;
1360     __ ldrh(t, Address(rmethod, Method::access_flags_offset()));
1361     __ tbz(t, exact_log2(JVM_ACC_STATIC), L);
1362     // get mirror
1363     __ load_mirror(t, rmethod, r10, rscratch2);
1364     // copy mirror into activation frame
1365     __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
1366     // pass handle to mirror
1367     __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
1368     __ bind(L);
1369   }
1370 
1371   // get native function entry point in r10
1372   {
1373     Label L;
1374     __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1375     ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1376     __ lea(rscratch2, unsatisfied);
1377     __ ldr(rscratch2, rscratch2);
1378     __ cmp(r10, rscratch2);
1379     __ br(Assembler::NE, L);
1380     __ call_VM(noreg,
1381                CAST_FROM_FN_PTR(address,
1382                                 InterpreterRuntime::prepare_native_call),
1383                rmethod);
1384     __ get_method(rmethod);
1385     __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1386     __ bind(L);
1387   }
1388 
1389   // pass JNIEnv
1390   __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
1391 
1392   // It is enough that the pc() points into the right code
1393   // segment. It does not have to be the correct return pc.
1394   // For convenience we use the pc we want to resume to in
1395   // case of preemption on Object.wait.
1396   Label native_return;
1397   __ set_last_Java_frame(esp, rfp, native_return, rscratch1);
1398 
1399   // change thread state
1400 #ifdef ASSERT
1401   {
1402     Label L;
1403     __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
1404     __ cmp(t, (u1)_thread_in_Java);
1405     __ br(Assembler::EQ, L);
1406     __ stop("Wrong thread state in native stub");
1407     __ bind(L);
1408   }
1409 #endif
1410 
1411   // Change state to native
1412   __ mov(rscratch1, _thread_in_native);
1413   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1414   __ stlrw(rscratch1, rscratch2);
1415 
1416   __ push_cont_fastpath();
1417 
1418   // Call the native method.
1419   __ blr(r10);
1420 
1421   __ pop_cont_fastpath();
1422 
1423   __ get_method(rmethod);
1424   // result potentially in r0 or v0
1425 
1426   // Restore cpu control state after JNI call
1427   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1428 
1429   // make room for the pushes we're about to do
1430   __ sub(rscratch1, esp, 4 * wordSize);
1431   __ andr(sp, rscratch1, -16);
1432 
1433   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1434   // in order to extract the result of a method call. If the order of these
1435   // pushes change or anything else is added to the stack then the code in
1436   // interpreter_frame_result must also change.
1437   __ push(dtos);
1438   __ push(ltos);
1439 
1440   __ verify_sve_vector_length();
1441 
1442   // change thread state
1443   __ mov(rscratch1, _thread_in_native_trans);
1444   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1445   __ stlrw(rscratch1, rscratch2);
1446 
1447   // Force this write out before the read below
1448   if (!UseSystemMemoryBarrier) {
1449     __ dmb(Assembler::ISH);
1450   }
1451 
1452   // check for safepoint operation in progress and/or pending suspend requests
1453   {
1454     Label L, Continue;
1455 
1456     // No need for acquire as Java threads always disarm themselves.
1457     __ safepoint_poll(L, true /* at_return */, false /* acquire */, false /* in_nmethod */);
1458     __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
1459     __ cbz(rscratch2, Continue);
1460     __ bind(L);
1461 
1462     // Don't use call_VM as it will see a possible pending exception
1463     // and forward it and never return here preventing us from
1464     // clearing _last_native_pc down below. So we do a runtime call by
1465     // hand.
1466     //
1467     __ mov(c_rarg0, rthread);
1468     __ lea(rscratch2, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
1469     __ blr(rscratch2);
1470     __ get_method(rmethod);
1471     __ reinit_heapbase();
1472     __ bind(Continue);
1473   }
1474 
1475   // change thread state
1476   __ mov(rscratch1, _thread_in_Java);
1477   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1478   __ stlrw(rscratch1, rscratch2);
1479 
1480   if (LockingMode != LM_LEGACY) {
1481     // Check preemption for Object.wait()
1482     Label not_preempted;
1483     __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1484     __ cbz(rscratch1, not_preempted);
1485     __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1486     __ br(rscratch1);
1487     __ bind(native_return);
1488     __ restore_after_resume(true /* is_native */);
1489     // reload result_handler
1490     __ ldr(result_handler, Address(rfp, frame::interpreter_frame_result_handler_offset*wordSize));
1491     __ bind(not_preempted);
1492   } else {
1493     // any pc will do so just use this one for LM_LEGACY to keep code together.
1494     __ bind(native_return);
1495   }
1496 
1497   // reset_last_Java_frame
1498   __ reset_last_Java_frame(true);
1499 
1500   if (CheckJNICalls) {
1501     // clear_pending_jni_exception_check
1502     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1503   }
1504 
1505   // reset handle block
1506   __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
1507   __ str(zr, Address(t, JNIHandleBlock::top_offset()));
1508 
1509   // If result is an oop unbox and store it in frame where gc will see it
1510   // and result handler will pick it up
1511 
1512   {
1513     Label no_oop;
1514     __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1515     __ cmp(t, result_handler);
1516     __ br(Assembler::NE, no_oop);
1517     // Unbox oop result, e.g. JNIHandles::resolve result.
1518     __ pop(ltos);
1519     __ resolve_jobject(r0, t, rscratch2);
1520     __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
1521     // keep stack depth as expected by pushing oop which will eventually be discarded
1522     __ push(ltos);
1523     __ bind(no_oop);
1524   }
1525 
1526   {
1527     Label no_reguard;
1528     __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
1529     __ ldrw(rscratch1, Address(rscratch1));
1530     __ cmp(rscratch1, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
1531     __ br(Assembler::NE, no_reguard);
1532 
1533     __ push_call_clobbered_registers();
1534     __ mov(c_rarg0, rthread);
1535     __ lea(rscratch2, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1536     __ blr(rscratch2);
1537     __ pop_call_clobbered_registers();
1538 
1539     __ bind(no_reguard);
1540   }
1541 
1542   // The method register is junk from after the thread_in_native transition
1543   // until here.  Also can't call_VM until the bcp has been
1544   // restored.  Need bcp for throwing exception below so get it now.
1545   __ get_method(rmethod);
1546 
1547   // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
1548   // rbcp == code_base()
1549   __ ldr(rbcp, Address(rmethod, Method::const_offset()));   // get ConstMethod*
1550   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
1551   // handle exceptions (exception handling will handle unlocking!)
1552   {
1553     Label L;
1554     __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
1555     __ cbz(rscratch1, L);
1556     // Note: At some point we may want to unify this with the code
1557     // used in call_VM_base(); i.e., we should use the
1558     // StubRoutines::forward_exception code. For now this doesn't work
1559     // here because the rsp is not correctly set at this point.
1560     __ MacroAssembler::call_VM(noreg,
1561                                CAST_FROM_FN_PTR(address,
1562                                InterpreterRuntime::throw_pending_exception));
1563     __ should_not_reach_here();
1564     __ bind(L);
1565   }
1566 
1567   // do unlocking if necessary
1568   {
1569     Label L;
1570     __ ldrh(t, Address(rmethod, Method::access_flags_offset()));
1571     __ tbz(t, exact_log2(JVM_ACC_SYNCHRONIZED), L);
1572     // the code below should be shared with interpreter macro
1573     // assembler implementation
1574     {
1575       Label unlock;
1576       // BasicObjectLock will be first in list, since this is a
1577       // synchronized method. However, need to check that the object
1578       // has not been unlocked by an explicit monitorexit bytecode.
1579 
1580       // monitor expect in c_rarg1 for slow unlock path
1581       __ lea (c_rarg1, Address(rfp,   // address of first monitor
1582                                (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1583                                           wordSize - sizeof(BasicObjectLock))));
1584 
1585       __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset()));
1586       __ cbnz(t, unlock);
1587 
1588       // Entry already unlocked, need to throw exception
1589       __ MacroAssembler::call_VM(noreg,
1590                                  CAST_FROM_FN_PTR(address,
1591                    InterpreterRuntime::throw_illegal_monitor_state_exception));
1592       __ should_not_reach_here();
1593 
1594       __ bind(unlock);
1595       __ unlock_object(c_rarg1);
1596     }
1597     __ bind(L);
1598   }
1599 
1600   // jvmti support
1601   // Note: This must happen _after_ handling/throwing any exceptions since
1602   //       the exception handler code notifies the runtime of method exits
1603   //       too. If this happens before, method entry/exit notifications are
1604   //       not properly paired (was bug - gri 11/22/99).
1605   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1606 
1607   // restore potential result in r0:d0, call result handler to
1608   // restore potential result in ST0 & handle result
1609 
1610   __ pop(ltos);
1611   __ pop(dtos);
1612 
1613   __ blr(result_handler);
1614 
1615   // remove activation
1616   __ ldr(esp, Address(rfp,
1617                     frame::interpreter_frame_sender_sp_offset *
1618                     wordSize)); // get sender sp
1619   // remove frame anchor
1620   __ leave();
1621 
1622   // restore sender sp
1623   __ mov(sp, esp);
1624 
1625   __ ret(lr);
1626 
1627   if (inc_counter) {
1628     // Handle overflow of counter and compile method
1629     __ bind(invocation_counter_overflow);
1630     generate_counter_overflow(continue_after_compile);
1631   }
1632 
1633   return entry_point;
1634 }
1635 
1636 //
1637 // Generic interpreted method entry to (asm) interpreter
1638 //
1639 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) {
1640   // determine code generation flags
1641   bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly;
1642 
1643   // rscratch1: sender sp
1644   address entry_point = __ pc();
1645 
1646   const Address constMethod(rmethod, Method::const_offset());
1647   const Address access_flags(rmethod, Method::access_flags_offset());
1648   const Address size_of_parameters(r3,
1649                                    ConstMethod::size_of_parameters_offset());
1650   const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
1651 
1652   // get parameter size (always needed)
1653   // need to load the const method first
1654   __ ldr(r3, constMethod);
1655   __ load_unsigned_short(r2, size_of_parameters);
1656 
1657   // r2: size of parameters
1658 
1659   __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
1660   __ sub(r3, r3, r2); // r3 = no. of additional locals
1661 
1662   // see if we've got enough room on the stack for locals plus overhead.
1663   generate_stack_overflow_check();
1664 
1665   // compute beginning of parameters (rlocals)
1666   __ add(rlocals, esp, r2, ext::uxtx, 3);
1667   __ sub(rlocals, rlocals, wordSize);
1668 
1669   __ mov(rscratch1, esp);
1670 
1671   // r3 - # of additional locals
1672   // allocate space for locals
1673   // explicitly initialize locals
1674   // Initializing memory allocated for locals in the same direction as
1675   // the stack grows to ensure page initialization order according
1676   // to windows-aarch64 stack page growth requirement (see
1677   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-160#stack)
1678   {
1679     Label exit, loop;
1680     __ ands(zr, r3, r3);
1681     __ br(Assembler::LE, exit); // do nothing if r3 <= 0
1682     __ bind(loop);
1683     __ str(zr, Address(__ pre(rscratch1, -wordSize)));
1684     __ sub(r3, r3, 1); // until everything initialized
1685     __ cbnz(r3, loop);
1686     __ bind(exit);
1687   }
1688 
1689   // Padding between locals and fixed part of activation frame to ensure
1690   // SP is always 16-byte aligned.
1691   __ andr(sp, rscratch1, -16);
1692 
1693   // And the base dispatch table
1694   __ get_dispatch();
1695 
1696   // initialize fixed part of activation frame
1697   generate_fixed_frame(false);
1698 
1699   // make sure method is not native & not abstract
1700 #ifdef ASSERT
1701   __ ldrh(r0, access_flags);
1702   {
1703     Label L;
1704     __ tst(r0, JVM_ACC_NATIVE);
1705     __ br(Assembler::EQ, L);
1706     __ stop("tried to execute native method as non-native");
1707     __ bind(L);
1708   }
1709  {
1710     Label L;
1711     __ tst(r0, JVM_ACC_ABSTRACT);
1712     __ br(Assembler::EQ, L);
1713     __ stop("tried to execute abstract method in interpreter");
1714     __ bind(L);
1715   }
1716 #endif
1717 
1718   // Since at this point in the method invocation the exception
1719   // handler would try to exit the monitor of synchronized methods
1720   // which hasn't been entered yet, we set the thread local variable
1721   // _do_not_unlock_if_synchronized to true. The remove_activation
1722   // will check this flag.
1723 
1724    const Address do_not_unlock_if_synchronized(rthread,
1725         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1726   __ mov(rscratch2, true);
1727   __ strb(rscratch2, do_not_unlock_if_synchronized);
1728 
1729   Register mdp = r3;
1730   __ profile_parameters_type(mdp, r1, r2);
1731 
1732   // increment invocation count & check for overflow
1733   Label invocation_counter_overflow;
1734   if (inc_counter) {
1735     generate_counter_incr(&invocation_counter_overflow);
1736   }
1737 
1738   Label continue_after_compile;
1739   __ bind(continue_after_compile);
1740 
1741   bang_stack_shadow_pages(false);
1742 
1743   // reset the _do_not_unlock_if_synchronized flag
1744   __ strb(zr, do_not_unlock_if_synchronized);
1745 
1746   // check for synchronized methods
1747   // Must happen AFTER invocation_counter check and stack overflow check,
1748   // so method is not locked if overflows.
1749   if (synchronized) {
1750     // Allocate monitor and lock method
1751     lock_method();
1752   } else {
1753     // no synchronization necessary
1754 #ifdef ASSERT
1755     {
1756       Label L;
1757       __ ldrh(r0, access_flags);
1758       __ tst(r0, JVM_ACC_SYNCHRONIZED);
1759       __ br(Assembler::EQ, L);
1760       __ stop("method needs synchronization");
1761       __ bind(L);
1762     }
1763 #endif
1764   }
1765 
1766   // start execution
1767 #ifdef ASSERT
1768   {
1769     Label L;
1770      const Address monitor_block_top (rfp,
1771                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
1772     __ ldr(rscratch1, monitor_block_top);
1773     __ lea(rscratch1, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
1774     __ cmp(esp, rscratch1);
1775     __ br(Assembler::EQ, L);
1776     __ stop("broken stack frame setup in interpreter 2");
1777     __ bind(L);
1778   }
1779 #endif
1780 
1781   // jvmti support
1782   __ notify_method_entry();
1783 
1784   // runtime upcalls
1785   if (runtime_upcalls) {
1786     __ generate_runtime_upcalls_on_method_entry();
1787   }
1788 
1789   __ dispatch_next(vtos);
1790 
1791   // invocation counter overflow
1792   if (inc_counter) {
1793     // Handle overflow of counter and compile method
1794     __ bind(invocation_counter_overflow);
1795     generate_counter_overflow(continue_after_compile);
1796   }
1797 
1798   return entry_point;
1799 }
1800 
1801 // Method entry for java.lang.Thread.currentThread
1802 address TemplateInterpreterGenerator::generate_currentThread() {
1803   address entry_point = __ pc();
1804 
1805   __ ldr(r0, Address(rthread, JavaThread::vthread_offset()));
1806   __ resolve_oop_handle(r0, rscratch1, rscratch2);
1807   __ ret(lr);
1808 
1809   return entry_point;
1810 }
1811 
1812 // Not supported
1813 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; }
1814 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; }
1815 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; }
1816 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; }
1817 
1818 //-----------------------------------------------------------------------------
1819 // Exceptions
1820 
1821 void TemplateInterpreterGenerator::generate_throw_exception() {
1822   // Entry point in previous activation (i.e., if the caller was
1823   // interpreted)
1824   Interpreter::_rethrow_exception_entry = __ pc();
1825   // Restore sp to interpreter_frame_last_sp even though we are going
1826   // to empty the expression stack for the exception processing.
1827   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1828   // r0: exception
1829   // r3: return address/pc that threw exception
1830   __ restore_bcp();    // rbcp points to call/send
1831   __ restore_locals();
1832   __ restore_constant_pool_cache();
1833   __ reinit_heapbase();  // restore rheapbase as heapbase.
1834   __ get_dispatch();
1835 
1836   // Entry point for exceptions thrown within interpreter code
1837   Interpreter::_throw_exception_entry = __ pc();
1838   // If we came here via a NullPointerException on the receiver of a
1839   // method, rmethod may be corrupt.
1840   __ get_method(rmethod);
1841   // expression stack is undefined here
1842   // r0: exception
1843   // rbcp: exception bcp
1844   __ verify_oop(r0);
1845   __ mov(c_rarg1, r0);
1846 
1847   // expression stack must be empty before entering the VM in case of
1848   // an exception
1849   __ empty_expression_stack();
1850   // find exception handler address and preserve exception oop
1851   __ call_VM(r3,
1852              CAST_FROM_FN_PTR(address,
1853                           InterpreterRuntime::exception_handler_for_exception),
1854              c_rarg1);
1855 
1856   // Restore machine SP
1857   __ restore_sp_after_call();
1858 
1859   // r0: exception handler entry point
1860   // r3: preserved exception oop
1861   // rbcp: bcp for exception handler
1862   __ push_ptr(r3); // push exception which is now the only value on the stack
1863   __ br(r0); // jump to exception handler (may be _remove_activation_entry!)
1864 
1865   // If the exception is not handled in the current frame the frame is
1866   // removed and the exception is rethrown (i.e. exception
1867   // continuation is _rethrow_exception).
1868   //
1869   // Note: At this point the bci is still the bxi for the instruction
1870   // which caused the exception and the expression stack is
1871   // empty. Thus, for any VM calls at this point, GC will find a legal
1872   // oop map (with empty expression stack).
1873 
1874   //
1875   // JVMTI PopFrame support
1876   //
1877 
1878   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1879   __ empty_expression_stack();
1880   // Set the popframe_processing bit in pending_popframe_condition
1881   // indicating that we are currently handling popframe, so that
1882   // call_VMs that may happen later do not trigger new popframe
1883   // handling cycles.
1884   __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
1885   __ orr(r3, r3, JavaThread::popframe_processing_bit);
1886   __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
1887 
1888   {
1889     // Check to see whether we are returning to a deoptimized frame.
1890     // (The PopFrame call ensures that the caller of the popped frame is
1891     // either interpreted or compiled and deoptimizes it if compiled.)
1892     // In this case, we can't call dispatch_next() after the frame is
1893     // popped, but instead must save the incoming arguments and restore
1894     // them after deoptimization has occurred.
1895     //
1896     // Note that we don't compare the return PC against the
1897     // deoptimization blob's unpack entry because of the presence of
1898     // adapter frames in C2.
1899     Label caller_not_deoptimized;
1900     __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize));
1901     // This is a return address, so requires authenticating for PAC.
1902     __ authenticate_return_address(c_rarg1);
1903     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1904                                InterpreterRuntime::interpreter_contains), c_rarg1);
1905     __ cbnz(r0, caller_not_deoptimized);
1906 
1907     // Compute size of arguments for saving when returning to
1908     // deoptimized caller
1909     __ get_method(r0);
1910     __ ldr(r0, Address(r0, Method::const_offset()));
1911     __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod::
1912                                                     size_of_parameters_offset())));
1913     __ lsl(r0, r0, Interpreter::logStackElementSize);
1914     __ restore_locals(); // XXX do we need this?
1915     __ sub(rlocals, rlocals, r0);
1916     __ add(rlocals, rlocals, wordSize);
1917     // Save these arguments
1918     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1919                                            Deoptimization::
1920                                            popframe_preserve_args),
1921                           rthread, r0, rlocals);
1922 
1923     __ remove_activation(vtos,
1924                          /* throw_monitor_exception */ false,
1925                          /* install_monitor_exception */ false,
1926                          /* notify_jvmdi */ false);
1927 
1928     // Inform deoptimization that it is responsible for restoring
1929     // these arguments
1930     __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit);
1931     __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
1932 
1933     // Continue in deoptimization handler
1934     __ ret(lr);
1935 
1936     __ bind(caller_not_deoptimized);
1937   }
1938 
1939   __ remove_activation(vtos,
1940                        /* throw_monitor_exception */ false,
1941                        /* install_monitor_exception */ false,
1942                        /* notify_jvmdi */ false);
1943 
1944   // Restore the last_sp and null it out
1945   __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1946   __ lea(esp, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
1947   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1948 
1949   __ restore_bcp();
1950   __ restore_locals();
1951   __ restore_constant_pool_cache();
1952   __ get_method(rmethod);
1953   __ get_dispatch();
1954 
1955   // The method data pointer was incremented already during
1956   // call profiling. We have to restore the mdp for the current bcp.
1957   if (ProfileInterpreter) {
1958     __ set_method_data_pointer_for_bcp();
1959   }
1960 
1961   // Clear the popframe condition flag
1962   __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset()));
1963   assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
1964 
1965 #if INCLUDE_JVMTI
1966   {
1967     Label L_done;
1968 
1969     __ ldrb(rscratch1, Address(rbcp, 0));
1970     __ cmpw(rscratch1, Bytecodes::_invokestatic);
1971     __ br(Assembler::NE, L_done);
1972 
1973     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1974     // Detect such a case in the InterpreterRuntime function and return the member name argument, or null.
1975 
1976     __ ldr(c_rarg0, Address(rlocals, 0));
1977     __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp);
1978 
1979     __ cbz(r0, L_done);
1980 
1981     __ str(r0, Address(esp, 0));
1982     __ bind(L_done);
1983   }
1984 #endif // INCLUDE_JVMTI
1985 
1986   // Restore machine SP
1987   __ restore_sp_after_call();
1988 
1989   __ dispatch_next(vtos);
1990   // end of PopFrame support
1991 
1992   Interpreter::_remove_activation_entry = __ pc();
1993 
1994   // preserve exception over this code sequence
1995   __ pop_ptr(r0);
1996   __ str(r0, Address(rthread, JavaThread::vm_result_offset()));
1997   // remove the activation (without doing throws on illegalMonitorExceptions)
1998   __ remove_activation(vtos, false, true, false);
1999   // restore exception
2000   __ get_vm_result(r0, rthread);
2001 
2002   // In between activations - previous activation type unknown yet
2003   // compute continuation point - the continuation point expects the
2004   // following registers set up:
2005   //
2006   // r0: exception
2007   // lr: return address/pc that threw exception
2008   // esp: expression stack of caller
2009   // rfp: fp of caller
2010   __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize)));  // save exception & return address
2011   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
2012                           SharedRuntime::exception_handler_for_return_address),
2013                         rthread, lr);
2014   __ mov(r1, r0);                               // save exception handler
2015   __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize)));  // restore exception & return address
2016   // We might be returning to a deopt handler that expects r3 to
2017   // contain the exception pc
2018   __ mov(r3, lr);
2019   // Note that an "issuing PC" is actually the next PC after the call
2020   __ br(r1);                                    // jump to exception
2021                                                 // handler of caller
2022 }
2023 
2024 
2025 //
2026 // JVMTI ForceEarlyReturn support
2027 //
2028 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
2029   address entry = __ pc();
2030 
2031   __ restore_bcp();
2032   __ restore_locals();
2033   __ empty_expression_stack();
2034   __ load_earlyret_value(state);
2035 
2036   __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
2037   Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset());
2038 
2039   // Clear the earlyret state
2040   assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
2041   __ str(zr, cond_addr);
2042 
2043   __ remove_activation(state,
2044                        false, /* throw_monitor_exception */
2045                        false, /* install_monitor_exception */
2046                        true); /* notify_jvmdi */
2047   __ ret(lr);
2048 
2049   return entry;
2050 } // end of ForceEarlyReturn support
2051 
2052 
2053 
2054 //-----------------------------------------------------------------------------
2055 // Helper for vtos entry point generation
2056 
2057 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
2058                                                          address& bep,
2059                                                          address& cep,
2060                                                          address& sep,
2061                                                          address& aep,
2062                                                          address& iep,
2063                                                          address& lep,
2064                                                          address& fep,
2065                                                          address& dep,
2066                                                          address& vep) {
2067   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
2068   Label L;
2069   aep = __ pc();     // atos entry point
2070       __ push_ptr();
2071       __ b(L);
2072   fep = __ pc();     // ftos entry point
2073       __ push_f();
2074       __ b(L);
2075   dep = __ pc();     // dtos entry point
2076       __ push_d();
2077       __ b(L);
2078   lep = __ pc();     // ltos entry point
2079       __ push_l();
2080       __ b(L);
2081   bep = cep = sep = iep = __ pc();     // [bcsi]tos entry point
2082       __ push_i();
2083   vep = __ pc();     // vtos entry point
2084   __ bind(L);
2085   generate_and_dispatch(t);
2086 }
2087 
2088 //-----------------------------------------------------------------------------
2089 
2090 void TemplateInterpreterGenerator::count_bytecode() {
2091   if (CountBytecodesPerThread) {
2092     Address bc_counter_addr(rthread, Thread::bc_counter_offset());
2093     __ ldr(r10, bc_counter_addr);
2094     __ add(r10, r10, 1);
2095     __ str(r10, bc_counter_addr);
2096   }
2097   if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) {
2098     __ mov(r10, (address) &BytecodeCounter::_counter_value);
2099     __ atomic_add(noreg, 1, r10);
2100   }
2101 }
2102 
2103 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
2104   __ mov(r10, (address) &BytecodeHistogram::_counters[t->bytecode()]);
2105   __ atomic_addw(noreg, 1, r10);
2106 }
2107 
2108 // Non-product code
2109 #ifndef PRODUCT
2110 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
2111   address entry = __ pc();
2112 
2113   __ protect_return_address();
2114   __ push(lr);
2115   __ push(state);
2116   __ push(RegSet::range(r0, r15), sp);
2117   __ mov(c_rarg2, r0);  // Pass itos
2118   __ call_VM(noreg,
2119              CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode),
2120              c_rarg1, c_rarg2, c_rarg3);
2121   __ pop(RegSet::range(r0, r15), sp);
2122   __ pop(state);
2123   __ pop(lr);
2124   __ authenticate_return_address();
2125   __ ret(lr);                                   // return from result handler
2126 
2127   return entry;
2128 }
2129 
2130 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
2131   // Calculate new index for counter:
2132   //   _index = (_index >> log2_number_of_codes) |
2133   //            (bytecode << log2_number_of_codes);
2134   Register index_addr = rscratch1;
2135   Register index = rscratch2;
2136   __ mov(index_addr, (address) &BytecodePairHistogram::_index);
2137   __ ldrw(index, index_addr);
2138   __ mov(r10,
2139          ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
2140   __ orrw(index, r10, index, Assembler::LSR,
2141           BytecodePairHistogram::log2_number_of_codes);
2142   __ strw(index, index_addr);
2143 
2144   // Bump bucket contents:
2145   //   _counters[_index] ++;
2146   Register counter_addr = rscratch1;
2147   __ mov(r10, (address) &BytecodePairHistogram::_counters);
2148   __ lea(counter_addr, Address(r10, index, Address::lsl(LogBytesPerInt)));
2149   __ atomic_addw(noreg, 1, counter_addr);
2150 }
2151 
2152 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
2153   // Call a little run-time stub to avoid blow-up for each bytecode.
2154   // The run-time runtime saves the right registers, depending on
2155   // the tosca in-state for the given template.
2156 
2157   assert(Interpreter::trace_code(t->tos_in()) != nullptr,
2158          "entry must have been generated");
2159   __ bl(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
2160   __ reinit_heapbase();
2161 }
2162 
2163 
2164 void TemplateInterpreterGenerator::stop_interpreter_at() {
2165   Label L;
2166   __ push(rscratch1);
2167   __ mov(rscratch1, (address) &BytecodeCounter::_counter_value);
2168   __ ldr(rscratch1, Address(rscratch1));
2169   __ mov(rscratch2, StopInterpreterAt);
2170   __ cmpw(rscratch1, rscratch2);
2171   __ br(Assembler::NE, L);
2172   __ brk(0);
2173   __ bind(L);
2174   __ pop(rscratch1);
2175 }
2176 
2177 #endif // !PRODUCT