1 /*
   2  * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "classfile/javaClasses.hpp"
  29 #include "interpreter/bytecodeHistogram.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "interpreter/templateInterpreterGenerator.hpp"
  34 #include "interpreter/templateTable.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/methodData.hpp"
  37 #include "oops/method.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "oops/resolvedIndyEntry.hpp"
  40 #include "oops/resolvedMethodEntry.hpp"
  41 #include "prims/jvmtiExport.hpp"
  42 #include "prims/jvmtiThreadState.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/deoptimization.hpp"
  46 #include "runtime/frame.inline.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/synchronizer.hpp"
  51 #include "runtime/timer.hpp"
  52 #include "runtime/vframeArray.hpp"
  53 #include "utilities/align.hpp"
  54 #include "utilities/debug.hpp"
  55 #include "utilities/macros.hpp"
  56 
  57 // Size of interpreter code.  Increase if too small.  Interpreter will
  58 // fail with a guarantee ("not enough space for interpreter generation");
  59 // if too small.
  60 // Run with +PrintInterpreter to get the VM to print out the size.
  61 // Max size with JVMTI
  62 int TemplateInterpreter::InterpreterCodeSize = 180 * 1024;
  63 
  64 #define __ _masm->
  65 
  66 //------------------------------------------------------------------------------------------------------------------------
  67 
  68 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  69   address entry = __ pc();
  70 
  71   // callee-save register for saving LR, shared with generate_native_entry
  72   const Register Rsaved_ret_addr = Rtmp_save0;
  73 
  74   __ mov(Rsaved_ret_addr, LR);
  75 
  76   __ mov(R1, Rmethod);
  77   __ mov(R2, Rlocals);
  78   __ mov(R3, SP);
  79 
  80 
  81   // Safer to save R9 (when scratched) since callers may have been
  82   // written assuming R9 survives. This is suboptimal but
  83   // probably not important for this slow case call site.
  84   // Note for R9 saving: slow_signature_handler may copy register
  85   // arguments above the current SP (passed as R3). It is safe for
  86   // call_VM to use push and pop to protect additional values on the
  87   // stack if needed.
  88   __ call_VM(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), true /* save R9 if needed*/);
  89   __ add(SP, SP, wordSize);     // Skip R0
  90   __ pop(RegisterSet(R1, R3));  // Load arguments passed in registers
  91 #ifdef __ABI_HARD__
  92   // Few alternatives to an always-load-FP-registers approach:
  93   // - parse method signature to detect FP arguments
  94   // - keep a counter/flag on a stack indicationg number of FP arguments in the method.
  95   // The later has been originally implemented and tested but a conditional path could
  96   // eliminate any gain imposed by avoiding 8 double word loads.
  97   __ fldmiad(SP, FloatRegisterSet(D0, 8), writeback);
  98 #endif // __ABI_HARD__
  99 
 100   __ ret(Rsaved_ret_addr);
 101 
 102   return entry;
 103 }
 104 
 105 
 106 //
 107 // Various method entries (that c++ and asm interpreter agree upon)
 108 //------------------------------------------------------------------------------------------------------------------------
 109 //
 110 //
 111 
 112 // Abstract method entry
 113 // Attempt to execute abstract method. Throw exception
 114 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 115   address entry_point = __ pc();
 116 
 117 
 118   __ empty_expression_stack();
 119 
 120   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
 121 
 122   DEBUG_ONLY(STOP("generate_abstract_entry");) // Should not reach here
 123   return entry_point;
 124 }
 125 
 126 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 127   address entry_point = nullptr;
 128   Register continuation = LR;
 129   bool use_runtime_call = false;
 130   switch (kind) {
 131   case Interpreter::java_lang_math_abs:
 132     entry_point = __ pc();
 133 #ifdef __SOFTFP__
 134     use_runtime_call = true;
 135     __ ldrd(R0, Address(SP));
 136 #else // !__SOFTFP__
 137     __ ldr_double(D0, Address(SP));
 138     __ abs_double(D0, D0);
 139 #endif // __SOFTFP__
 140     break;
 141   case Interpreter::java_lang_math_sqrt:
 142     entry_point = __ pc();
 143 #ifdef __SOFTFP__
 144     use_runtime_call = true;
 145     __ ldrd(R0, Address(SP));
 146 #else // !__SOFTFP__
 147     __ ldr_double(D0, Address(SP));
 148     __ sqrt_double(D0, D0);
 149 #endif // __SOFTFP__
 150     break;
 151   case Interpreter::java_lang_math_sin:
 152   case Interpreter::java_lang_math_cos:
 153   case Interpreter::java_lang_math_tan:
 154   case Interpreter::java_lang_math_log:
 155   case Interpreter::java_lang_math_log10:
 156   case Interpreter::java_lang_math_exp:
 157     entry_point = __ pc();
 158     use_runtime_call = true;
 159 #ifdef __SOFTFP__
 160     __ ldrd(R0, Address(SP));
 161 #else // !__SOFTFP__
 162     __ ldr_double(D0, Address(SP));
 163 #endif // __SOFTFP__
 164     break;
 165   case Interpreter::java_lang_math_pow:
 166     entry_point = __ pc();
 167     use_runtime_call = true;
 168 #ifdef __SOFTFP__
 169     __ ldrd(R0, Address(SP, 2 * Interpreter::stackElementSize));
 170     __ ldrd(R2, Address(SP));
 171 #else // !__SOFTFP__
 172     __ ldr_double(D0, Address(SP, 2 * Interpreter::stackElementSize));
 173     __ ldr_double(D1, Address(SP));
 174 #endif // __SOFTFP__
 175     break;
 176   case Interpreter::java_lang_math_fmaD:
 177   case Interpreter::java_lang_math_fmaF:
 178   case Interpreter::java_lang_math_tanh:
 179     // TODO: Implement intrinsic
 180     break;
 181   default:
 182     ShouldNotReachHere();
 183   }
 184 
 185   if (entry_point != nullptr) {
 186     __ mov(SP, Rsender_sp);
 187     if (use_runtime_call) {
 188       __ mov(Rtmp_save0, LR);
 189       continuation = Rtmp_save0;
 190       generate_math_runtime_call(kind);
 191     }
 192     __ ret(continuation);
 193   }
 194   return entry_point;
 195 }
 196 
 197 void TemplateInterpreterGenerator::generate_math_runtime_call(AbstractInterpreter::MethodKind kind) {
 198   address fn;
 199   switch (kind) {
 200 #ifdef __SOFTFP__
 201   case Interpreter::java_lang_math_abs:
 202     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dabs);
 203     break;
 204   case Interpreter::java_lang_math_sqrt:
 205     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
 206     break;
 207 #endif // __SOFTFP__
 208   case Interpreter::java_lang_math_sin:
 209     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 210     break;
 211   case Interpreter::java_lang_math_cos:
 212     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 213     break;
 214   case Interpreter::java_lang_math_tan:
 215     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 216     break;
 217   case Interpreter::java_lang_math_log:
 218     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 219     break;
 220   case Interpreter::java_lang_math_log10:
 221     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 222     break;
 223   case Interpreter::java_lang_math_exp:
 224     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 225     break;
 226   case Interpreter::java_lang_math_pow:
 227     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 228     break;
 229   default:
 230     ShouldNotReachHere();
 231     fn = nullptr; // silence "maybe uninitialized" compiler warnings
 232   }
 233   __ call_VM_leaf(fn);
 234 }
 235 
 236 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 237   address entry = __ pc();
 238 
 239   // Note: There should be a minimal interpreter frame set up when stack
 240   // overflow occurs since we check explicitly for it now.
 241   //
 242 #ifdef ASSERT
 243   { Label L;
 244     __ sub(Rtemp, FP, - frame::interpreter_frame_monitor_block_top_offset * wordSize);
 245     __ cmp(SP, Rtemp);  // Rtemp = maximal SP for current FP,
 246                         //  (stack grows negative)
 247     __ b(L, ls); // check if frame is complete
 248     __ stop ("interpreter frame not set up");
 249     __ bind(L);
 250   }
 251 #endif // ASSERT
 252 
 253   // Restore bcp under the assumption that the current frame is still
 254   // interpreted
 255   __ restore_bcp();
 256 
 257   // expression stack must be empty before entering the VM if an exception
 258   // happened
 259   __ empty_expression_stack();
 260 
 261   // throw exception
 262   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 263 
 264   __ should_not_reach_here();
 265 
 266   return entry;
 267 }
 268 
 269 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
 270   address entry = __ pc();
 271 
 272   // index is in R4_ArrayIndexOutOfBounds_index
 273 
 274   // expression stack must be empty before entering the VM if an exception happened
 275   __ empty_expression_stack();
 276 
 277   // setup parameters
 278   // Array expected in R1.
 279   __ mov(R2, R4_ArrayIndexOutOfBounds_index);
 280 
 281   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2);
 282 
 283   __ nop(); // to avoid filling CPU pipeline with invalid instructions
 284   __ nop();
 285   __ should_not_reach_here();
 286 
 287   return entry;
 288 }
 289 
 290 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 291   address entry = __ pc();
 292 
 293   // object is in R2_ClassCastException_obj
 294 
 295   // expression stack must be empty before entering the VM if an exception
 296   // happened
 297   __ empty_expression_stack();
 298 
 299   __ mov(R1, R2_ClassCastException_obj);
 300   __ call_VM(noreg,
 301              CAST_FROM_FN_PTR(address,
 302                               InterpreterRuntime::throw_ClassCastException),
 303              R1);
 304 
 305   __ should_not_reach_here();
 306 
 307   return entry;
 308 }
 309 
 310 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
 311   assert(!pass_oop || message == nullptr, "either oop or message but not both");
 312   address entry = __ pc();
 313 
 314   InlinedString Lname(name);
 315   InlinedString Lmessage(message);
 316 
 317   if (pass_oop) {
 318     // object is at TOS
 319     __ pop_ptr(R2);
 320   }
 321 
 322   // expression stack must be empty before entering the VM if an exception happened
 323   __ empty_expression_stack();
 324 
 325   // setup parameters
 326   __ ldr_literal(R1, Lname);
 327 
 328   if (pass_oop) {
 329     __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2);
 330   } else {
 331     if (message != nullptr) {
 332       __ ldr_literal(R2, Lmessage);
 333     } else {
 334       __ mov(R2, 0);
 335     }
 336     __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), R1, R2);
 337   }
 338 
 339   // throw exception
 340   __ b(Interpreter::throw_exception_entry());
 341 
 342   __ nop(); // to avoid filling CPU pipeline with invalid instructions
 343   __ nop();
 344   __ bind_literal(Lname);
 345   if (!pass_oop && (message != nullptr)) {
 346     __ bind_literal(Lmessage);
 347   }
 348 
 349   return entry;
 350 }
 351 
 352 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 353   address entry = __ pc();
 354 
 355   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
 356 
 357   // Restore stack bottom in case i2c adjusted stack
 358   __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 359   // and null it as marker that SP is now tos until next java call
 360   __ mov(Rtemp, (int)NULL_WORD);
 361   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 362 
 363   __ restore_method();
 364   __ restore_bcp();
 365   __ restore_dispatch();
 366   __ restore_locals();
 367 
 368   const Register Rcache = R2_tmp;
 369   const Register Rindex = R3_tmp;
 370 
 371   if (index_size == sizeof(u4)) {
 372     __ load_resolved_indy_entry(Rcache, Rindex);
 373     __ ldrh(Rcache, Address(Rcache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
 374   } else {
 375     // Pop N words from the stack
 376     assert(index_size == sizeof(u2), "Can only be u2");
 377     __ load_method_entry(Rcache, Rindex);
 378     __ ldrh(Rcache, Address(Rcache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
 379   }
 380 
 381   __ check_stack_top();
 382   __ add(Rstack_top, Rstack_top, AsmOperand(Rcache, lsl, Interpreter::logStackElementSize));
 383 
 384   __ convert_retval_to_tos(state);
 385 
 386   __ check_and_handle_popframe();
 387   __ check_and_handle_earlyret();
 388 
 389   __ dispatch_next(state, step);
 390 
 391   return entry;
 392 }
 393 
 394 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
 395   address entry = __ pc();
 396 
 397   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
 398 
 399   // The stack is not extended by deopt but we must null last_sp as this
 400   // entry is like a "return".
 401   __ mov(Rtemp, 0);
 402   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 403 
 404   __ restore_method();
 405   __ restore_bcp();
 406   __ restore_dispatch();
 407   __ restore_locals();
 408 
 409   // handle exceptions
 410   { Label L;
 411     __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
 412     __ cbz(Rtemp, L);
 413     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 414     __ should_not_reach_here();
 415     __ bind(L);
 416   }
 417 
 418   if (continuation == nullptr) {
 419     __ dispatch_next(state, step);
 420   } else {
 421     __ jump_to_entry(continuation);
 422   }
 423 
 424   return entry;
 425 }
 426 
 427 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 428   address entry = __ pc();
 429 
 430   switch (type) {
 431   case T_CHAR    : /* Nothing to do */  break;
 432   case T_BYTE    : /* Nothing to do */  break;
 433   case T_SHORT   : /* Nothing to do */  break;
 434   case T_INT     : /* Nothing to do */  break;
 435   case T_LONG    : /* Nothing to do */  break;
 436   case T_VOID    : /* Nothing to do */  break;
 437   case T_DOUBLE  : /* Nothing to do */  break;
 438   case T_FLOAT   : /* Nothing to do */  break;
 439   case T_BOOLEAN : __ c2bool(R0);       break;
 440   case T_OBJECT  :
 441     __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize));
 442     __ verify_oop(R0);
 443     break;
 444   default        : __ should_not_reach_here(); break;
 445   }
 446 
 447   __ ret();
 448   return entry;
 449 }
 450 
 451 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
 452   address entry = __ pc();
 453   __ push(state);
 454   __ call_VM(noreg, runtime_entry);
 455 
 456   // load current bytecode
 457   __ ldrb(R3_bytecode, Address(Rbcp));
 458   __ dispatch_only_normal(vtos);
 459   return entry;
 460 }
 461 
 462 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
 463   return nullptr;
 464 }
 465 
 466 
 467 // Helpers for commoning out cases in the various type of method entries.
 468 //
 469 
 470 // increment invocation count & check for overflow
 471 //
 472 // Note: checking for negative value instead of overflow
 473 //       so we have a 'sticky' overflow test
 474 //
 475 // In: Rmethod.
 476 //
 477 // Uses R0, R1, Rtemp.
 478 //
 479 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 480   Label done;
 481   const Register Rcounters = Rtemp;
 482   const Address invocation_counter(Rcounters,
 483                 MethodCounters::invocation_counter_offset() +
 484                 InvocationCounter::counter_offset());
 485 
 486   // Note: In tiered we increment either counters in MethodCounters* or
 487   // in MDO depending if we're profiling or not.
 488   int increment = InvocationCounter::count_increment;
 489   Label no_mdo;
 490   if (ProfileInterpreter) {
 491     // Are we profiling?
 492     __ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset()));
 493     __ cbz(R1_tmp, no_mdo);
 494     // Increment counter in the MDO
 495     const Address mdo_invocation_counter(R1_tmp,
 496                   in_bytes(MethodData::invocation_counter_offset()) +
 497                   in_bytes(InvocationCounter::counter_offset()));
 498     const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset()));
 499     __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow);
 500     __ b(done);
 501   }
 502   __ bind(no_mdo);
 503   __ get_method_counters(Rmethod, Rcounters, done);
 504   const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset()));
 505   __ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow);
 506   __ bind(done);
 507 }
 508 
 509 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 510   // InterpreterRuntime::frequency_counter_overflow takes one argument
 511   // indicating if the counter overflow occurs at a backwards branch (non-null bcp).
 512   // The call returns the address of the verified entry point for the method or null
 513   // if the compilation did not complete (either went background or bailed out).
 514   __ mov(R1, (int)false);
 515   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
 516 
 517   // jump to the interpreted entry.
 518   __ b(do_continue);
 519 }
 520 
 521 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 522   // Check if we've got enough room on the stack for
 523   //  - overhead;
 524   //  - locals;
 525   //  - expression stack.
 526   //
 527   // Registers on entry:
 528   //
 529   // R3 = number of additional locals
 530   // Rthread
 531   // Rmethod
 532   // Registers used: R0, R1, R2, Rtemp.
 533 
 534   const Register Radditional_locals = R3;
 535   const Register RmaxStack = R2;
 536 
 537   // monitor entry size
 538   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 539 
 540   // total overhead size: entry_size + (saved registers, thru expr stack bottom).
 541   // be sure to change this if you add/subtract anything to/from the overhead area
 542   const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size;
 543 
 544   // Pages reserved for VM runtime calls and subsequent Java calls.
 545   const int reserved_pages = StackOverflow::stack_shadow_zone_size();
 546 
 547   // Thread::stack_size() includes guard pages, and they should not be touched.
 548   const int guard_pages = StackOverflow::stack_guard_zone_size();
 549 
 550   __ ldr(R0, Address(Rthread, Thread::stack_base_offset()));
 551   __ ldr(R1, Address(Rthread, Thread::stack_size_offset()));
 552   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
 553   __ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset()));
 554   __ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words());
 555 
 556   // reserve space for additional locals
 557   __ sub(Rtemp, Rtemp, AsmOperand(Radditional_locals, lsl, Interpreter::logStackElementSize));
 558 
 559   // stack size
 560   __ sub(R0, R0, R1);
 561 
 562   // reserve space for expression stack
 563   __ sub(Rtemp, Rtemp, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize));
 564 
 565   __ cmp(Rtemp, R0);
 566 
 567   __ mov(SP, Rsender_sp, ls);  // restore SP
 568   __ b(SharedRuntime::throw_StackOverflowError_entry(), ls);
 569 }
 570 
 571 
 572 // Allocate monitor and lock method (asm interpreter)
 573 //
 574 void TemplateInterpreterGenerator::lock_method() {
 575   // synchronize method
 576 
 577   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 578   assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment");
 579 
 580   #ifdef ASSERT
 581     { Label L;
 582       __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 583       __ tbnz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
 584       __ stop("method doesn't need synchronization");
 585       __ bind(L);
 586     }
 587   #endif // ASSERT
 588 
 589   // get synchronization object
 590   { Label done;
 591     __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 592     __ tst(Rtemp, JVM_ACC_STATIC);
 593     __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case)
 594     __ b(done, eq);
 595     __ load_mirror(R0, Rmethod, Rtemp);
 596     __ bind(done);
 597   }
 598 
 599   // add space for monitor & lock
 600 
 601 
 602   __ sub(Rstack_top, Rstack_top, entry_size);
 603   __ check_stack_top_on_expansion();
 604                                               // add space for a monitor entry
 605   __ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
 606                                               // set new monitor block top
 607   __ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset()));
 608                                               // store object
 609   __ mov(R1, Rstack_top);                     // monitor entry address
 610   __ lock_object(R1);
 611 }
 612 
 613 
 614 //
 615 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
 616 // and for native methods hence the shared code.
 617 
 618 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 619   // Generates the following stack layout:
 620   //
 621   // [ expr. stack bottom ]
 622   // [ saved Rbcp         ]
 623   // [ current Rlocals    ]
 624   // [ cache              ]
 625   // [ mdx                ]
 626   // [ Method*            ]
 627   // [ last_sp            ]
 628   // [ sender_sp          ]
 629   // [ saved FP           ] <--- FP
 630   // [ saved LR           ]
 631 
 632   // initialize fixed part of activation frame
 633   __ push(LR);                                        // save return address
 634   __ push(FP);                                        // save FP
 635   __ mov(FP, SP);                                     // establish new FP
 636 
 637   __ push(Rsender_sp);
 638 
 639   __ mov(R0, 0);
 640   __ push(R0);                                        // leave last_sp as null
 641 
 642   // setup Rbcp
 643   if (native_call) {
 644     __ mov(Rbcp, 0);                                  // bcp = 0 for native calls
 645   } else {
 646     __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); // get ConstMethod*
 647     __ add(Rbcp, Rtemp, ConstMethod::codes_offset()); // get codebase
 648   }
 649 
 650   __ push(Rmethod);                                    // save Method*
 651   // Get mirror and store it in the frame as GC root for this Method*
 652   __ load_mirror(Rtemp, Rmethod, Rtemp);
 653   __ push(Rtemp);
 654 
 655   if (ProfileInterpreter) {
 656     __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
 657     __ tst(Rtemp, Rtemp);
 658     __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()), ne);
 659     __ push(Rtemp);                                    // set the mdp (method data pointer)
 660   } else {
 661     __ push(R0);
 662   }
 663 
 664   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
 665   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
 666   __ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset()));
 667   __ push(Rtemp);                                      // set constant pool cache
 668   __ sub(Rtemp, Rlocals, FP);
 669   __ logical_shift_right(Rtemp, Rtemp, Interpreter::logStackElementSize); // Rtemp = Rlocals - fp();
 670   __ push(Rtemp);                                      // set relativized Rlocals, see frame::interpreter_frame_locals()
 671   __ push(Rbcp);                                       // set bcp
 672   __ push(R0);                                         // reserve word for pointer to expression stack bottom
 673   __ str(SP, Address(SP, 0));                          // set expression stack bottom
 674 }
 675 
 676 
 677 // End of helpers
 678 
 679 //------------------------------------------------------------------------------------------------------------------------
 680 // Entry points
 681 //
 682 // Here we generate the various kind of entries into the interpreter.
 683 // The two main entry type are generic bytecode methods and native call method.
 684 // These both come in synchronized and non-synchronized versions but the
 685 // frame layout they create is very similar. The other method entry
 686 // types are really just special purpose entries that are really entry
 687 // and interpretation all in one. These are for trivial methods like
 688 // accessor, empty, or special math methods.
 689 //
 690 // When control flow reaches any of the entry types for the interpreter
 691 // the following holds ->
 692 //
 693 // Arguments:
 694 //
 695 // Rmethod: Method*
 696 // Rthread: thread
 697 // Rsender_sp:  sender sp
 698 // Rparams (SP on 32-bit ARM): pointer to method parameters
 699 //
 700 // LR: return address
 701 //
 702 // Stack layout immediately at entry
 703 //
 704 // [ parameter n        ] <--- Rparams (SP on 32-bit ARM)
 705 //   ...
 706 // [ parameter 1        ]
 707 // [ expression stack   ] (caller's java expression stack)
 708 
 709 // Assuming that we don't go to one of the trivial specialized
 710 // entries the stack will look like below when we are ready to execute
 711 // the first bytecode (or call the native routine). The register usage
 712 // will be as the template based interpreter expects.
 713 //
 714 // local variables follow incoming parameters immediately; i.e.
 715 // the return address is saved at the end of the locals.
 716 //
 717 // [ expr. stack        ] <--- Rstack_top (SP on 32-bit ARM)
 718 // [ monitor entry      ]
 719 //   ...
 720 // [ monitor entry      ]
 721 // [ expr. stack bottom ]
 722 // [ saved Rbcp         ]
 723 // [ current Rlocals    ]
 724 // [ cache              ]
 725 // [ mdx                ]
 726 // [ mirror             ]
 727 // [ Method*            ]
 728 //
 729 // 32-bit ARM:
 730 // [ last_sp            ]
 731 //
 732 // [ sender_sp          ]
 733 // [ saved FP           ] <--- FP
 734 // [ saved LR           ]
 735 // [ optional padding(*)]
 736 // [ local variable m   ]
 737 //   ...
 738 // [ local variable 1   ]
 739 // [ parameter n        ]
 740 //   ...
 741 // [ parameter 1        ] <--- Rlocals
 742 //
 743 
 744 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 745   // Code: _aload_0, _getfield, _areturn
 746   // parameter size = 1
 747   //
 748   // The code that gets generated by this routine is split into 2 parts:
 749   //    1. The "intrinsified" code performing an ON_WEAK_OOP_REF load,
 750   //    2. The slow path - which is an expansion of the regular method entry.
 751   //
 752   // Notes:-
 753   // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed.
 754   // * We may jump to the slow path iff the receiver is null. If the
 755   //   Reference object is null then we no longer perform an ON_WEAK_OOP_REF load
 756   //   Thus we can use the regular method entry code to generate the NPE.
 757   //
 758   // Rmethod: Method*
 759   // Rthread: thread
 760   // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path
 761   // Rparams: parameters
 762 
 763   address entry = __ pc();
 764   Label slow_path;
 765   const Register Rthis = R0;
 766   const Register Rret_addr = Rtmp_save1;
 767   assert_different_registers(Rthis, Rret_addr, Rsender_sp);
 768 
 769   const int referent_offset = java_lang_ref_Reference::referent_offset();
 770 
 771   // Check if local 0 != nullptr
 772   // If the receiver is null then it is OK to jump to the slow path.
 773   __ ldr(Rthis, Address(Rparams));
 774   __ cbz(Rthis, slow_path);
 775 
 776   // Preserve LR
 777   __ mov(Rret_addr, LR);
 778 
 779   // Load the value of the referent field.
 780   const Address field_address(Rthis, referent_offset);
 781   __ load_heap_oop(R0, field_address, Rtemp, R1_tmp, R2_tmp, ON_WEAK_OOP_REF);
 782 
 783   // _areturn
 784   __ mov(SP, Rsender_sp);
 785   __ ret(Rret_addr);
 786 
 787   // generate a vanilla interpreter entry as the slow path
 788   __ bind(slow_path);
 789   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 790   return entry;
 791 }
 792 
 793 // Not supported
 794 address TemplateInterpreterGenerator::generate_currentThread() { return nullptr; }
 795 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return nullptr; }
 796 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
 797 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
 798 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; }
 799 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; }
 800 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; }
 801 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; }
 802 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
 803 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
 804 
 805 //
 806 // Interpreter stub for calling a native method. (asm interpreter)
 807 // This sets up a somewhat different looking stack for calling the native method
 808 // than the typical interpreter frame setup.
 809 //
 810 
 811 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
 812   // determine code generation flags
 813   bool inc_counter  = UseCompiler || CountCompiledCalls;
 814 
 815   // Incoming registers:
 816   //
 817   // Rmethod: Method*
 818   // Rthread: thread
 819   // Rsender_sp: sender sp
 820   // Rparams: parameters
 821 
 822   address entry_point = __ pc();
 823 
 824   // Register allocation
 825   const Register Rsize_of_params = R6;
 826   const Register Rsig_handler    = Rtmp_save0;   // R4
 827   const Register Rnative_code    = Rtmp_save1;   // R5
 828   const Register Rresult_handler = R6;
 829 
 830   const Register Rsaved_result_lo = Rtmp_save0;  // R4
 831   const Register Rsaved_result_hi = Rtmp_save1;  // R5
 832   FloatRegister saved_result_fp;
 833 
 834 
 835   __ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset()));
 836   __ ldrh(Rsize_of_params,  Address(Rsize_of_params, ConstMethod::size_of_parameters_offset()));
 837 
 838   // native calls don't need the stack size check since they have no expression stack
 839   // and the arguments are already on the stack and we only add a handful of words
 840   // to the stack
 841 
 842   // compute beginning of parameters (Rlocals)
 843   __ sub(Rlocals, Rparams, wordSize);
 844   __ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize));
 845 
 846   // reserve stack space for oop_temp
 847   __ mov(R0, 0);
 848   __ push(R0);
 849 
 850   generate_fixed_frame(true); // Note: R9 is now saved in the frame
 851 
 852   // make sure method is native & not abstract
 853 #ifdef ASSERT
 854   __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 855   {
 856     Label L;
 857     __ tbnz(Rtemp, JVM_ACC_NATIVE_BIT, L);
 858     __ stop("tried to execute non-native method as native");
 859     __ bind(L);
 860   }
 861   { Label L;
 862     __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L);
 863     __ stop("tried to execute abstract method in interpreter");
 864     __ bind(L);
 865   }
 866 #endif
 867 
 868   // increment invocation count & check for overflow
 869   Label invocation_counter_overflow;
 870   if (inc_counter) {
 871     if (synchronized) {
 872       // Avoid unlocking method's monitor in case of exception, as it has not
 873       // been locked yet.
 874       __ set_do_not_unlock_if_synchronized(true, Rtemp);
 875     }
 876     generate_counter_incr(&invocation_counter_overflow);
 877   }
 878 
 879   Label continue_after_compile;
 880   __ bind(continue_after_compile);
 881 
 882   if (inc_counter && synchronized) {
 883     __ set_do_not_unlock_if_synchronized(false, Rtemp);
 884   }
 885 
 886   // check for synchronized methods
 887   // Must happen AFTER invocation_counter check and stack overflow check,
 888   // so method is not locked if overflows.
 889   //
 890   if (synchronized) {
 891     lock_method();
 892   } else {
 893     // no synchronization necessary
 894 #ifdef ASSERT
 895       { Label L;
 896         __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 897         __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
 898         __ stop("method needs synchronization");
 899         __ bind(L);
 900       }
 901 #endif
 902   }
 903 
 904   // start execution
 905 #ifdef ASSERT
 906   { Label L;
 907     __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
 908     __ cmp(Rtemp, Rstack_top);
 909     __ b(L, eq);
 910     __ stop("broken stack frame setup in interpreter 3");
 911     __ bind(L);
 912   }
 913 #endif
 914   __ check_extended_sp(Rtemp);
 915 
 916   // jvmti/dtrace support
 917   __ notify_method_entry();
 918 #if R9_IS_SCRATCHED
 919   __ restore_method();
 920 #endif
 921 
 922   {
 923     Label L;
 924     __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset()));
 925     __ cbnz(Rsig_handler, L);
 926     __ mov(R1, Rmethod);
 927     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1, true);
 928     __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset()));
 929     __ bind(L);
 930   }
 931 
 932   {
 933     Label L;
 934     __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset()));
 935     __ cbnz(Rnative_code, L);
 936     __ mov(R1, Rmethod);
 937     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1);
 938     __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset()));
 939     __ bind(L);
 940   }
 941 
 942   // Allocate stack space for arguments
 943 
 944 
 945   // C functions need aligned stack
 946   __ bic(SP, SP, StackAlignmentInBytes - 1);
 947   // Multiply by BytesPerLong instead of BytesPerWord, because calling convention
 948   // may require empty slots due to long alignment, e.g. func(int, jlong, int, jlong)
 949   __ sub(SP, SP, AsmOperand(Rsize_of_params, lsl, LogBytesPerLong));
 950 
 951 #ifdef __ABI_HARD__
 952   // Allocate more stack space to accommodate all GP as well as FP registers:
 953   // 4 * wordSize
 954   // 8 * BytesPerLong
 955   int reg_arguments = align_up((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes);
 956 #else
 957   // Reserve at least 4 words on the stack for loading
 958   // of parameters passed on registers (R0-R3).
 959   // See generate_slow_signature_handler().
 960   // It is also used for JNIEnv & class additional parameters.
 961   int reg_arguments = 4 * wordSize;
 962 #endif // __ABI_HARD__
 963 
 964   __ sub(SP, SP, reg_arguments);
 965 
 966 
 967   // Note: signature handler blows R4 besides all scratch registers.
 968   // See AbstractInterpreterGenerator::generate_slow_signature_handler().
 969   __ call(Rsig_handler);
 970 #if R9_IS_SCRATCHED
 971   __ restore_method();
 972 #endif
 973   __ mov(Rresult_handler, R0);
 974 
 975   // Pass JNIEnv and mirror for static methods
 976   {
 977     Label L;
 978     __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 979     __ add(R0, Rthread, in_bytes(JavaThread::jni_environment_offset()));
 980     __ tbz(Rtemp, JVM_ACC_STATIC_BIT, L);
 981     __ load_mirror(Rtemp, Rmethod, Rtemp);
 982     __ add(R1, FP, frame::interpreter_frame_oop_temp_offset * wordSize);
 983     __ str(Rtemp, Address(R1, 0));
 984     __ bind(L);
 985   }
 986 
 987   __ set_last_Java_frame(SP, FP, true, Rtemp);
 988 
 989   // Changing state to _thread_in_native must be the last thing to do
 990   // before the jump to native code. At this moment stack must be
 991   // safepoint-safe and completely prepared for stack walking.
 992 #ifdef ASSERT
 993   {
 994     Label L;
 995     __ ldr_u32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
 996     __ cmp_32(Rtemp, _thread_in_Java);
 997     __ b(L, eq);
 998     __ stop("invalid thread state");
 999     __ bind(L);
1000   }
1001 #endif
1002 
1003   // Force all preceding writes to be observed prior to thread state change
1004   __ membar(MacroAssembler::StoreStore, Rtemp);
1005 
1006   __ mov(Rtemp, _thread_in_native);
1007   __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1008 
1009   __ call(Rnative_code);
1010 #if R9_IS_SCRATCHED
1011   __ restore_method();
1012 #endif
1013 
1014   // Set FPSCR/FPCR to a known state
1015   if (AlwaysRestoreFPU) {
1016     __ restore_default_fp_mode();
1017   }
1018 
1019   // Do safepoint check
1020   __ mov(Rtemp, _thread_in_native_trans);
1021   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1022 
1023   // Force this write out before the read below
1024   if (!UseSystemMemoryBarrier) {
1025     __ membar(MacroAssembler::StoreLoad, Rtemp);
1026   }
1027 
1028   // Protect the return value in the interleaved code: save it to callee-save registers.
1029   __ mov(Rsaved_result_lo, R0);
1030   __ mov(Rsaved_result_hi, R1);
1031 #ifdef __ABI_HARD__
1032   // preserve native FP result in a callee-saved register
1033   saved_result_fp = D8;
1034   __ fcpyd(saved_result_fp, D0);
1035 #else
1036   saved_result_fp = fnoreg;
1037 #endif // __ABI_HARD__
1038 
1039   {
1040   Label call, skip_call;
1041   __ safepoint_poll(Rtemp, call);
1042   __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset()));
1043   __ cmp(R3, 0);
1044   __ b(skip_call, eq);
1045   __ bind(call);
1046   __ mov(R0, Rthread);
1047   __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none);
1048   __ bind(skip_call);
1049 
1050 #if R9_IS_SCRATCHED
1051   __ restore_method();
1052 #endif
1053   }
1054 
1055   // Perform Native->Java thread transition
1056   __ mov(Rtemp, _thread_in_Java);
1057   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1058 
1059   // Zero handles and last_java_sp
1060   __ reset_last_Java_frame(Rtemp);
1061   __ ldr(R3, Address(Rthread, JavaThread::active_handles_offset()));
1062   __ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset()));
1063   if (CheckJNICalls) {
1064     __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1065   }
1066 
1067   // Unbox oop result, e.g. JNIHandles::resolve result if it's an oop.
1068   {
1069     Label Lnot_oop;
1070     __ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT));
1071     __ cmp(Rtemp, Rresult_handler);
1072     __ b(Lnot_oop, ne);
1073     Register value = Rsaved_result_lo;
1074     __ resolve_jobject(value,   // value
1075                        Rtemp,   // tmp1
1076                        R1_tmp); // tmp2
1077     // Store resolved result in frame for GC visibility.
1078     __ str(value, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize));
1079     __ bind(Lnot_oop);
1080   }
1081 
1082 
1083   // reguard stack if StackOverflow exception happened while in native.
1084   {
1085     __ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset()));
1086     __ cmp_32(Rtemp, StackOverflow::stack_guard_yellow_reserved_disabled);
1087   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq);
1088 #if R9_IS_SCRATCHED
1089   __ restore_method();
1090 #endif
1091   }
1092 
1093   // check pending exceptions
1094   {
1095     __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
1096     __ cmp(Rtemp, 0);
1097     __ mov(Rexception_pc, PC, ne);
1098     __ b(StubRoutines::forward_exception_entry(), ne);
1099   }
1100 
1101   if (synchronized) {
1102     // address of first monitor
1103     __ sub(R0, FP, - (frame::interpreter_frame_monitor_block_bottom_offset - frame::interpreter_frame_monitor_size()) * wordSize);
1104     __ unlock_object(R0);
1105   }
1106 
1107   // jvmti/dtrace support
1108   // Note: This must happen _after_ handling/throwing any exceptions since
1109   //       the exception handler code notifies the runtime of method exits
1110   //       too. If this happens before, method entry/exit notifications are
1111   //       not properly paired (was bug - gri 11/22/99).
1112   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp);
1113 
1114   // Restore the result. Oop result is restored from the stack by the
1115   // result handler.
1116   __ mov(R0, Rsaved_result_lo);
1117   __ mov(R1, Rsaved_result_hi);
1118 
1119 #ifdef __ABI_HARD__
1120   // reload native FP result
1121   __ fcpyd(D0, D8);
1122 #endif // __ABI_HARD__
1123 
1124   __ blx(Rresult_handler);
1125 
1126   // Restore FP/LR, sender_sp and return
1127   __ mov(Rtemp, FP);
1128   __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1129   __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1130 
1131   __ ret();
1132 
1133   if (inc_counter) {
1134     // Handle overflow of counter and compile method
1135     __ bind(invocation_counter_overflow);
1136     generate_counter_overflow(continue_after_compile);
1137   }
1138 
1139   return entry_point;
1140 }
1141 
1142 //
1143 // Generic interpreted method entry to (asm) interpreter
1144 //
1145 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1146   // determine code generation flags
1147   bool inc_counter  = UseCompiler || CountCompiledCalls;
1148 
1149   // Rmethod: Method*
1150   // Rthread: thread
1151   // Rsender_sp: sender sp (could differ from SP if we were called via c2i)
1152   // Rparams: pointer to the last parameter in the stack
1153 
1154   address entry_point = __ pc();
1155 
1156   const Register RconstMethod = R3;
1157 
1158 
1159   __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
1160 
1161   __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
1162   __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset()));
1163 
1164   // setup Rlocals
1165   __ sub(Rlocals, Rparams, wordSize);
1166   __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize));
1167 
1168   __ sub(R3, R3, R2); // number of additional locals
1169 
1170 
1171   // see if we've got enough room on the stack for locals plus overhead.
1172   generate_stack_overflow_check();
1173 
1174   // allocate space for locals
1175   // explicitly initialize locals
1176 
1177   // Loop is unrolled 4 times
1178   Label loop;
1179   __ mov(R0, 0);
1180   __ bind(loop);
1181 
1182   // #1
1183   __ subs(R3, R3, 1);
1184   __ push(R0, ge);
1185 
1186   // #2
1187   __ subs(R3, R3, 1, ge);
1188   __ push(R0, ge);
1189 
1190   // #3
1191   __ subs(R3, R3, 1, ge);
1192   __ push(R0, ge);
1193 
1194   // #4
1195   __ subs(R3, R3, 1, ge);
1196   __ push(R0, ge);
1197 
1198   __ b(loop, gt);
1199 
1200   // initialize fixed part of activation frame
1201   generate_fixed_frame(false);
1202 
1203   __ restore_dispatch();
1204 
1205   // make sure method is not native & not abstract
1206 #ifdef ASSERT
1207   __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1208   {
1209     Label L;
1210     __ tbz(Rtemp, JVM_ACC_NATIVE_BIT, L);
1211     __ stop("tried to execute native method as non-native");
1212     __ bind(L);
1213   }
1214   { Label L;
1215     __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L);
1216     __ stop("tried to execute abstract method in interpreter");
1217     __ bind(L);
1218   }
1219 #endif
1220 
1221   // increment invocation count & check for overflow
1222   Label invocation_counter_overflow;
1223   if (inc_counter) {
1224     if (synchronized) {
1225       // Avoid unlocking method's monitor in case of exception, as it has not
1226       // been locked yet.
1227       __ set_do_not_unlock_if_synchronized(true, Rtemp);
1228     }
1229     generate_counter_incr(&invocation_counter_overflow);
1230   }
1231   Label continue_after_compile;
1232   __ bind(continue_after_compile);
1233 
1234   if (inc_counter && synchronized) {
1235     __ set_do_not_unlock_if_synchronized(false, Rtemp);
1236   }
1237 #if R9_IS_SCRATCHED
1238   __ restore_method();
1239 #endif
1240 
1241   // check for synchronized methods
1242   // Must happen AFTER invocation_counter check and stack overflow check,
1243   // so method is not locked if overflows.
1244   //
1245   if (synchronized) {
1246     // Allocate monitor and lock method
1247     lock_method();
1248   } else {
1249     // no synchronization necessary
1250 #ifdef ASSERT
1251       { Label L;
1252         __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1253         __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
1254         __ stop("method needs synchronization");
1255         __ bind(L);
1256       }
1257 #endif
1258   }
1259 
1260   // start execution
1261 #ifdef ASSERT
1262   { Label L;
1263     __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
1264     __ cmp(Rtemp, Rstack_top);
1265     __ b(L, eq);
1266     __ stop("broken stack frame setup in interpreter 4");
1267     __ bind(L);
1268   }
1269 #endif
1270   __ check_extended_sp(Rtemp);
1271 
1272   // jvmti support
1273   __ notify_method_entry();
1274 #if R9_IS_SCRATCHED
1275   __ restore_method();
1276 #endif
1277 
1278   __ dispatch_next(vtos);
1279 
1280   // invocation counter overflow
1281   if (inc_counter) {
1282     // Handle overflow of counter and compile method
1283     __ bind(invocation_counter_overflow);
1284     generate_counter_overflow(continue_after_compile);
1285   }
1286 
1287   return entry_point;
1288 }
1289 
1290 //------------------------------------------------------------------------------------------------------------------------
1291 // Exceptions
1292 
1293 void TemplateInterpreterGenerator::generate_throw_exception() {
1294   // Entry point in previous activation (i.e., if the caller was interpreted)
1295   Interpreter::_rethrow_exception_entry = __ pc();
1296   // Rexception_obj: exception
1297 
1298   // Clear interpreter_frame_last_sp.
1299   __ mov(Rtemp, 0);
1300   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1301 
1302 #if R9_IS_SCRATCHED
1303   __ restore_method();
1304 #endif
1305   __ restore_bcp();
1306   __ restore_dispatch();
1307   __ restore_locals();
1308 
1309 
1310   // Entry point for exceptions thrown within interpreter code
1311   Interpreter::_throw_exception_entry = __ pc();
1312 
1313   // expression stack is undefined here
1314   // Rexception_obj: exception
1315   // Rbcp: exception bcp
1316   __ verify_oop(Rexception_obj);
1317 
1318   // expression stack must be empty before entering the VM in case of an exception
1319   __ empty_expression_stack();
1320   // find exception handler address and preserve exception oop
1321   __ mov(R1, Rexception_obj);
1322   __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), R1);
1323   // R0: exception handler entry point
1324   // Rexception_obj: preserved exception oop
1325   // Rbcp: bcp for exception handler
1326   __ push_ptr(Rexception_obj);                    // push exception which is now the only value on the stack
1327   __ jump(R0);                                    // jump to exception handler (may be _remove_activation_entry!)
1328 
1329   // If the exception is not handled in the current frame the frame is removed and
1330   // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
1331   //
1332   // Note: At this point the bci is still the bxi for the instruction which caused
1333   //       the exception and the expression stack is empty. Thus, for any VM calls
1334   //       at this point, GC will find a legal oop map (with empty expression stack).
1335 
1336   // In current activation
1337   // tos: exception
1338   // Rbcp: exception bcp
1339 
1340   //
1341   // JVMTI PopFrame support
1342   //
1343    Interpreter::_remove_activation_preserving_args_entry = __ pc();
1344 
1345 
1346   __ empty_expression_stack();
1347 
1348   // Set the popframe_processing bit in _popframe_condition indicating that we are
1349   // currently handling popframe, so that call_VMs that may happen later do not trigger new
1350   // popframe handling cycles.
1351 
1352   __ ldr_s32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1353   __ orr(Rtemp, Rtemp, (unsigned)JavaThread::popframe_processing_bit);
1354   __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1355 
1356   {
1357     // Check to see whether we are returning to a deoptimized frame.
1358     // (The PopFrame call ensures that the caller of the popped frame is
1359     // either interpreted or compiled and deoptimizes it if compiled.)
1360     // In this case, we can't call dispatch_next() after the frame is
1361     // popped, but instead must save the incoming arguments and restore
1362     // them after deoptimization has occurred.
1363     //
1364     // Note that we don't compare the return PC against the
1365     // deoptimization blob's unpack entry because of the presence of
1366     // adapter frames in C2.
1367     Label caller_not_deoptimized;
1368     __ ldr(R0, Address(FP, frame::return_addr_offset * wordSize));
1369     __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0);
1370     __ cbnz_32(R0, caller_not_deoptimized);
1371 
1372     // Compute size of arguments for saving when returning to deoptimized caller
1373     __ restore_method();
1374     __ ldr(R0, Address(Rmethod, Method::const_offset()));
1375     __ ldrh(R0, Address(R0, ConstMethod::size_of_parameters_offset()));
1376 
1377     __ logical_shift_left(R1, R0, Interpreter::logStackElementSize);
1378     // Save these arguments
1379     __ restore_locals();
1380     __ sub(R2, Rlocals, R1);
1381     __ add(R2, R2, wordSize);
1382     __ mov(R0, Rthread);
1383     __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R0, R1, R2);
1384 
1385     __ remove_activation(vtos, LR,
1386                          /* throw_monitor_exception */ false,
1387                          /* install_monitor_exception */ false,
1388                          /* notify_jvmdi */ false);
1389 
1390     // Inform deoptimization that it is responsible for restoring these arguments
1391     __ mov(Rtemp, JavaThread::popframe_force_deopt_reexecution_bit);
1392     __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1393 
1394     // Continue in deoptimization handler
1395     __ ret();
1396 
1397     __ bind(caller_not_deoptimized);
1398   }
1399 
1400   __ remove_activation(vtos, R4,
1401                        /* throw_monitor_exception */ false,
1402                        /* install_monitor_exception */ false,
1403                        /* notify_jvmdi */ false);
1404 
1405   // Finish with popframe handling
1406   // A previous I2C followed by a deoptimization might have moved the
1407   // outgoing arguments further up the stack. PopFrame expects the
1408   // mutations to those outgoing arguments to be preserved and other
1409   // constraints basically require this frame to look exactly as
1410   // though it had previously invoked an interpreted activation with
1411   // no space between the top of the expression stack (current
1412   // last_sp) and the top of stack. Rather than force deopt to
1413   // maintain this kind of invariant all the time we call a small
1414   // fixup routine to move the mutated arguments onto the top of our
1415   // expression stack if necessary.
1416   __ mov(R1, SP);
1417   __ ldr(R2, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1418   // PC must point into interpreter here
1419   __ set_last_Java_frame(SP, FP, true, Rtemp);
1420   __ mov(R0, Rthread);
1421   __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2);
1422   __ reset_last_Java_frame(Rtemp);
1423 
1424   // Restore the last_sp and null it out
1425   __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1426   __ mov(Rtemp, (int)NULL_WORD);
1427   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1428 
1429   __ restore_bcp();
1430   __ restore_dispatch();
1431   __ restore_locals();
1432   __ restore_method();
1433 
1434   // The method data pointer was incremented already during
1435   // call profiling. We have to restore the mdp for the current bcp.
1436   if (ProfileInterpreter) {
1437     __ set_method_data_pointer_for_bcp();
1438   }
1439 
1440   // Clear the popframe condition flag
1441   assert(JavaThread::popframe_inactive == 0, "adjust this code");
1442   __ str_32(__ zero_register(Rtemp), Address(Rthread, JavaThread::popframe_condition_offset()));
1443 
1444 #if INCLUDE_JVMTI
1445   {
1446     Label L_done;
1447 
1448     __ ldrb(Rtemp, Address(Rbcp, 0));
1449     __ cmp(Rtemp, Bytecodes::_invokestatic);
1450     __ b(L_done, ne);
1451 
1452     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1453     // Detect such a case in the InterpreterRuntime function and return the member name argument, or null.
1454 
1455     // get local0
1456     __ ldr(R1, Address(Rlocals, 0));
1457     __ mov(R2, Rmethod);
1458     __ mov(R3, Rbcp);
1459     __ call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R1, R2, R3);
1460 
1461     __ cbz(R0, L_done);
1462 
1463     __ str(R0, Address(Rstack_top));
1464     __ bind(L_done);
1465   }
1466 #endif // INCLUDE_JVMTI
1467 
1468   __ dispatch_next(vtos);
1469   // end of PopFrame support
1470 
1471   Interpreter::_remove_activation_entry = __ pc();
1472 
1473   // preserve exception over this code sequence
1474   __ pop_ptr(R0_tos);
1475   __ str(R0_tos, Address(Rthread, JavaThread::vm_result_offset()));
1476   // remove the activation (without doing throws on illegalMonitorExceptions)
1477   __ remove_activation(vtos, Rexception_pc, false, true, false);
1478   // restore exception
1479   __ get_vm_result(Rexception_obj, Rtemp);
1480 
1481   // In between activations - previous activation type unknown yet
1482   // compute continuation point - the continuation point expects
1483   // the following registers set up:
1484   //
1485   // Rexception_obj: exception
1486   // Rexception_pc: return address/pc that threw exception
1487   // SP: expression stack of caller
1488   // FP: frame pointer of caller
1489   __ mov(c_rarg0, Rthread);
1490   __ mov(c_rarg1, Rexception_pc);
1491   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1);
1492   // Note that an "issuing PC" is actually the next PC after the call
1493 
1494   __ jump(R0);                             // jump to exception handler of caller
1495 }
1496 
1497 
1498 //
1499 // JVMTI ForceEarlyReturn support
1500 //
1501 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1502   address entry = __ pc();
1503 
1504 
1505   __ restore_bcp();
1506   __ restore_dispatch();
1507   __ restore_locals();
1508 
1509   __ empty_expression_stack();
1510 
1511   __ load_earlyret_value(state);
1512 
1513   // Clear the earlyret state
1514   __ ldr(Rtemp, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
1515 
1516   assert(JvmtiThreadState::earlyret_inactive == 0, "adjust this code");
1517   __ str_32(__ zero_register(R2), Address(Rtemp, JvmtiThreadState::earlyret_state_offset()));
1518 
1519   __ remove_activation(state, LR,
1520                        false, /* throw_monitor_exception */
1521                        false, /* install_monitor_exception */
1522                        true); /* notify_jvmdi */
1523 
1524   // According to interpreter calling conventions, result is returned in R0/R1,
1525   // so ftos (S0) and dtos (D0) are moved to R0/R1.
1526   // This conversion should be done after remove_activation, as it uses
1527   // push(state) & pop(state) to preserve return value.
1528   __ convert_tos_to_retval(state);
1529   __ ret();
1530 
1531   return entry;
1532 } // end of ForceEarlyReturn support
1533 
1534 
1535 //------------------------------------------------------------------------------------------------------------------------
1536 // Helper for vtos entry point generation
1537 
1538 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1539   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1540   Label L;
1541 
1542 #ifdef __SOFTFP__
1543   dep = __ pc();                // fall through
1544 #else
1545   fep = __ pc(); __ push(ftos); __ b(L);
1546   dep = __ pc(); __ push(dtos); __ b(L);
1547 #endif // __SOFTFP__
1548 
1549   lep = __ pc(); __ push(ltos); __ b(L);
1550 
1551   if (VerifyOops) {  // can't share atos entry if VerifyOops
1552     aep = __ pc(); __ push(atos); __ b(L);
1553   } else {
1554     aep = __ pc();              // fall through
1555   }
1556 
1557 #ifdef __SOFTFP__
1558   fep = __ pc();                // fall through
1559 #endif // __SOFTFP__
1560 
1561   bep = cep = sep =             // fall through
1562   iep = __ pc(); __ push(itos); // fall through
1563   vep = __ pc(); __ bind(L);    // fall through
1564   generate_and_dispatch(t);
1565 }
1566 
1567 //------------------------------------------------------------------------------------------------------------------------
1568 
1569 // Non-product code
1570 #ifndef PRODUCT
1571 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1572   address entry = __ pc();
1573 
1574   // prepare expression stack
1575   __ push(state);       // save tosca
1576 
1577   // pass tosca registers as arguments
1578   __ mov(R2, R0_tos);
1579   __ mov(R3, R1_tos_hi);
1580   __ mov(R1, LR);       // save return address
1581 
1582   // call tracer
1583   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3);
1584 
1585   __ mov(LR, R0);       // restore return address
1586   __ pop(state);        // restore tosca
1587 
1588   // return
1589   __ ret();
1590 
1591   return entry;
1592 }
1593 
1594 
1595 void TemplateInterpreterGenerator::count_bytecode() {
1596   __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true);
1597 }
1598 
1599 
1600 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1601   __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true);
1602 }
1603 
1604 
1605 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1606   const Register Rindex_addr = R2_tmp;
1607   Label Lcontinue;
1608   InlinedAddress Lcounters((address)BytecodePairHistogram::_counters);
1609   InlinedAddress Lindex((address)&BytecodePairHistogram::_index);
1610   const Register Rcounters_addr = R2_tmp;
1611   const Register Rindex = R4_tmp;
1612 
1613   // calculate new index for counter:
1614   // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes).
1615   // (_index >> log2_number_of_codes) is previous bytecode
1616 
1617   __ ldr_literal(Rindex_addr, Lindex);
1618   __ ldr_s32(Rindex, Address(Rindex_addr));
1619   __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1620   __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes));
1621   __ str_32(Rindex, Address(Rindex_addr));
1622 
1623   // Rindex (R4) contains index of counter
1624 
1625   __ ldr_literal(Rcounters_addr, Lcounters);
1626   __ ldr_s32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex));
1627   __ adds_32(Rtemp, Rtemp, 1);
1628   __ b(Lcontinue, mi);                           // avoid overflow
1629   __ str_32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex));
1630 
1631   __ b(Lcontinue);
1632 
1633   __ bind_literal(Lindex);
1634   __ bind_literal(Lcounters);
1635 
1636   __ bind(Lcontinue);
1637 }
1638 
1639 
1640 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1641   // Call a little run-time stub to avoid blow-up for each bytecode.
1642   // The run-time runtime saves the right registers, depending on
1643   // the tosca in-state for the given template.
1644   assert(Interpreter::trace_code(t->tos_in()) != nullptr,
1645          "entry must have been generated");
1646   address trace_entry = Interpreter::trace_code(t->tos_in());
1647   __ call(trace_entry, relocInfo::none);
1648 }
1649 
1650 
1651 void TemplateInterpreterGenerator::stop_interpreter_at() {
1652   Label Lcontinue;
1653   const Register stop_at = R2_tmp;
1654 
1655   __ ldr_global_s32(Rtemp, (address) &BytecodeCounter::_counter_value);
1656   __ mov_slow(stop_at, StopInterpreterAt);
1657 
1658   // test bytecode counter
1659   __ cmp(Rtemp, stop_at);
1660   __ b(Lcontinue, ne);
1661 
1662   __ trace_state("stop_interpreter_at");
1663   __ breakpoint();
1664 
1665   __ bind(Lcontinue);
1666 }
1667 #endif // !PRODUCT