1 /*
   2  * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "classfile/javaClasses.hpp"
  29 #include "interpreter/bytecodeHistogram.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "interpreter/templateInterpreterGenerator.hpp"
  34 #include "interpreter/templateTable.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/methodData.hpp"
  37 #include "oops/method.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "oops/resolvedIndyEntry.hpp"
  40 #include "oops/resolvedMethodEntry.hpp"
  41 #include "prims/jvmtiExport.hpp"
  42 #include "prims/jvmtiThreadState.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/deoptimization.hpp"
  46 #include "runtime/frame.inline.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/synchronizer.hpp"
  51 #include "runtime/timer.hpp"
  52 #include "runtime/vframeArray.hpp"
  53 #include "utilities/align.hpp"
  54 #include "utilities/debug.hpp"
  55 #include "utilities/macros.hpp"
  56 
  57 // Size of interpreter code.  Increase if too small.  Interpreter will
  58 // fail with a guarantee ("not enough space for interpreter generation");
  59 // if too small.
  60 // Run with +PrintInterpreter to get the VM to print out the size.
  61 // Max size with JVMTI
  62 int TemplateInterpreter::InterpreterCodeSize = 180 * 1024;
  63 
  64 #define __ _masm->
  65 
  66 //------------------------------------------------------------------------------------------------------------------------
  67 
  68 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  69   address entry = __ pc();
  70 
  71   // callee-save register for saving LR, shared with generate_native_entry
  72   const Register Rsaved_ret_addr = Rtmp_save0;
  73 
  74   __ mov(Rsaved_ret_addr, LR);
  75 
  76   __ mov(R1, Rmethod);
  77   __ mov(R2, Rlocals);
  78   __ mov(R3, SP);
  79 
  80 
  81   // Safer to save R9 (when scratched) since callers may have been
  82   // written assuming R9 survives. This is suboptimal but
  83   // probably not important for this slow case call site.
  84   // Note for R9 saving: slow_signature_handler may copy register
  85   // arguments above the current SP (passed as R3). It is safe for
  86   // call_VM to use push and pop to protect additional values on the
  87   // stack if needed.
  88   __ call_VM(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), true /* save R9 if needed*/);
  89   __ add(SP, SP, wordSize);     // Skip R0
  90   __ pop(RegisterSet(R1, R3));  // Load arguments passed in registers
  91 #ifdef __ABI_HARD__
  92   // Few alternatives to an always-load-FP-registers approach:
  93   // - parse method signature to detect FP arguments
  94   // - keep a counter/flag on a stack indicationg number of FP arguments in the method.
  95   // The later has been originally implemented and tested but a conditional path could
  96   // eliminate any gain imposed by avoiding 8 double word loads.
  97   __ fldmiad(SP, FloatRegisterSet(D0, 8), writeback);
  98 #endif // __ABI_HARD__
  99 
 100   __ ret(Rsaved_ret_addr);
 101 
 102   return entry;
 103 }
 104 
 105 
 106 //
 107 // Various method entries (that c++ and asm interpreter agree upon)
 108 //------------------------------------------------------------------------------------------------------------------------
 109 //
 110 //
 111 
 112 // Abstract method entry
 113 // Attempt to execute abstract method. Throw exception
 114 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 115   address entry_point = __ pc();
 116 
 117 
 118   __ empty_expression_stack();
 119 
 120   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
 121 
 122   DEBUG_ONLY(STOP("generate_abstract_entry");) // Should not reach here
 123   return entry_point;
 124 }
 125 
 126 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 127   address entry_point = nullptr;
 128   Register continuation = LR;
 129   bool use_runtime_call = false;
 130   switch (kind) {
 131   case Interpreter::java_lang_math_abs:
 132     entry_point = __ pc();
 133 #ifdef __SOFTFP__
 134     use_runtime_call = true;
 135     __ ldrd(R0, Address(SP));
 136 #else // !__SOFTFP__
 137     __ ldr_double(D0, Address(SP));
 138     __ abs_double(D0, D0);
 139 #endif // __SOFTFP__
 140     break;
 141   case Interpreter::java_lang_math_sqrt:
 142     entry_point = __ pc();
 143 #ifdef __SOFTFP__
 144     use_runtime_call = true;
 145     __ ldrd(R0, Address(SP));
 146 #else // !__SOFTFP__
 147     __ ldr_double(D0, Address(SP));
 148     __ sqrt_double(D0, D0);
 149 #endif // __SOFTFP__
 150     break;
 151   case Interpreter::java_lang_math_sin:
 152   case Interpreter::java_lang_math_cos:
 153   case Interpreter::java_lang_math_tan:
 154   case Interpreter::java_lang_math_log:
 155   case Interpreter::java_lang_math_log10:
 156   case Interpreter::java_lang_math_exp:
 157     entry_point = __ pc();
 158     use_runtime_call = true;
 159 #ifdef __SOFTFP__
 160     __ ldrd(R0, Address(SP));
 161 #else // !__SOFTFP__
 162     __ ldr_double(D0, Address(SP));
 163 #endif // __SOFTFP__
 164     break;
 165   case Interpreter::java_lang_math_pow:
 166     entry_point = __ pc();
 167     use_runtime_call = true;
 168 #ifdef __SOFTFP__
 169     __ ldrd(R0, Address(SP, 2 * Interpreter::stackElementSize));
 170     __ ldrd(R2, Address(SP));
 171 #else // !__SOFTFP__
 172     __ ldr_double(D0, Address(SP, 2 * Interpreter::stackElementSize));
 173     __ ldr_double(D1, Address(SP));
 174 #endif // __SOFTFP__
 175     break;
 176   case Interpreter::java_lang_math_fmaD:
 177   case Interpreter::java_lang_math_fmaF:
 178     // TODO: Implement intrinsic
 179     break;
 180   default:
 181     ShouldNotReachHere();
 182   }
 183 
 184   if (entry_point != nullptr) {
 185     __ mov(SP, Rsender_sp);
 186     if (use_runtime_call) {
 187       __ mov(Rtmp_save0, LR);
 188       continuation = Rtmp_save0;
 189       generate_math_runtime_call(kind);
 190     }
 191     __ ret(continuation);
 192   }
 193   return entry_point;
 194 }
 195 
 196 void TemplateInterpreterGenerator::generate_math_runtime_call(AbstractInterpreter::MethodKind kind) {
 197   address fn;
 198   switch (kind) {
 199 #ifdef __SOFTFP__
 200   case Interpreter::java_lang_math_abs:
 201     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dabs);
 202     break;
 203   case Interpreter::java_lang_math_sqrt:
 204     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
 205     break;
 206 #endif // __SOFTFP__
 207   case Interpreter::java_lang_math_sin:
 208     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
 209     break;
 210   case Interpreter::java_lang_math_cos:
 211     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
 212     break;
 213   case Interpreter::java_lang_math_tan:
 214     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
 215     break;
 216   case Interpreter::java_lang_math_log:
 217     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
 218     break;
 219   case Interpreter::java_lang_math_log10:
 220     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
 221     break;
 222   case Interpreter::java_lang_math_exp:
 223     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
 224     break;
 225   case Interpreter::java_lang_math_pow:
 226     fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
 227     break;
 228   default:
 229     ShouldNotReachHere();
 230     fn = nullptr; // silence "maybe uninitialized" compiler warnings
 231   }
 232   __ call_VM_leaf(fn);
 233 }
 234 
 235 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 236   address entry = __ pc();
 237 
 238   // Note: There should be a minimal interpreter frame set up when stack
 239   // overflow occurs since we check explicitly for it now.
 240   //
 241 #ifdef ASSERT
 242   { Label L;
 243     __ sub(Rtemp, FP, - frame::interpreter_frame_monitor_block_top_offset * wordSize);
 244     __ cmp(SP, Rtemp);  // Rtemp = maximal SP for current FP,
 245                         //  (stack grows negative)
 246     __ b(L, ls); // check if frame is complete
 247     __ stop ("interpreter frame not set up");
 248     __ bind(L);
 249   }
 250 #endif // ASSERT
 251 
 252   // Restore bcp under the assumption that the current frame is still
 253   // interpreted
 254   __ restore_bcp();
 255 
 256   // expression stack must be empty before entering the VM if an exception
 257   // happened
 258   __ empty_expression_stack();
 259 
 260   // throw exception
 261   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 262 
 263   __ should_not_reach_here();
 264 
 265   return entry;
 266 }
 267 
 268 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
 269   address entry = __ pc();
 270 
 271   // index is in R4_ArrayIndexOutOfBounds_index
 272 
 273   // expression stack must be empty before entering the VM if an exception happened
 274   __ empty_expression_stack();
 275 
 276   // setup parameters
 277   // Array expected in R1.
 278   __ mov(R2, R4_ArrayIndexOutOfBounds_index);
 279 
 280   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2);
 281 
 282   __ nop(); // to avoid filling CPU pipeline with invalid instructions
 283   __ nop();
 284   __ should_not_reach_here();
 285 
 286   return entry;
 287 }
 288 
 289 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 290   address entry = __ pc();
 291 
 292   // object is in R2_ClassCastException_obj
 293 
 294   // expression stack must be empty before entering the VM if an exception
 295   // happened
 296   __ empty_expression_stack();
 297 
 298   __ mov(R1, R2_ClassCastException_obj);
 299   __ call_VM(noreg,
 300              CAST_FROM_FN_PTR(address,
 301                               InterpreterRuntime::throw_ClassCastException),
 302              R1);
 303 
 304   __ should_not_reach_here();
 305 
 306   return entry;
 307 }
 308 
 309 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
 310   assert(!pass_oop || message == nullptr, "either oop or message but not both");
 311   address entry = __ pc();
 312 
 313   InlinedString Lname(name);
 314   InlinedString Lmessage(message);
 315 
 316   if (pass_oop) {
 317     // object is at TOS
 318     __ pop_ptr(R2);
 319   }
 320 
 321   // expression stack must be empty before entering the VM if an exception happened
 322   __ empty_expression_stack();
 323 
 324   // setup parameters
 325   __ ldr_literal(R1, Lname);
 326 
 327   if (pass_oop) {
 328     __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2);
 329   } else {
 330     if (message != nullptr) {
 331       __ ldr_literal(R2, Lmessage);
 332     } else {
 333       __ mov(R2, 0);
 334     }
 335     __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), R1, R2);
 336   }
 337 
 338   // throw exception
 339   __ b(Interpreter::throw_exception_entry());
 340 
 341   __ nop(); // to avoid filling CPU pipeline with invalid instructions
 342   __ nop();
 343   __ bind_literal(Lname);
 344   if (!pass_oop && (message != nullptr)) {
 345     __ bind_literal(Lmessage);
 346   }
 347 
 348   return entry;
 349 }
 350 
 351 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 352   address entry = __ pc();
 353 
 354   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
 355 
 356   // Restore stack bottom in case i2c adjusted stack
 357   __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 358   // and null it as marker that SP is now tos until next java call
 359   __ mov(Rtemp, (int)NULL_WORD);
 360   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 361 
 362   __ restore_method();
 363   __ restore_bcp();
 364   __ restore_dispatch();
 365   __ restore_locals();
 366 
 367   const Register Rcache = R2_tmp;
 368   const Register Rindex = R3_tmp;
 369 
 370   if (index_size == sizeof(u4)) {
 371     __ load_resolved_indy_entry(Rcache, Rindex);
 372     __ ldrh(Rcache, Address(Rcache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
 373   } else {
 374     // Pop N words from the stack
 375     assert(index_size == sizeof(u2), "Can only be u2");
 376     __ load_method_entry(Rcache, Rindex);
 377     __ ldrh(Rcache, Address(Rcache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
 378   }
 379 
 380   __ check_stack_top();
 381   __ add(Rstack_top, Rstack_top, AsmOperand(Rcache, lsl, Interpreter::logStackElementSize));
 382 
 383   __ convert_retval_to_tos(state);
 384 
 385   __ check_and_handle_popframe();
 386   __ check_and_handle_earlyret();
 387 
 388   __ dispatch_next(state, step);
 389 
 390   return entry;
 391 }
 392 
 393 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
 394   address entry = __ pc();
 395 
 396   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
 397 
 398   // The stack is not extended by deopt but we must null last_sp as this
 399   // entry is like a "return".
 400   __ mov(Rtemp, 0);
 401   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 402 
 403   __ restore_method();
 404   __ restore_bcp();
 405   __ restore_dispatch();
 406   __ restore_locals();
 407 
 408   // handle exceptions
 409   { Label L;
 410     __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
 411     __ cbz(Rtemp, L);
 412     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 413     __ should_not_reach_here();
 414     __ bind(L);
 415   }
 416 
 417   if (continuation == nullptr) {
 418     __ dispatch_next(state, step);
 419   } else {
 420     __ jump_to_entry(continuation);
 421   }
 422 
 423   return entry;
 424 }
 425 
 426 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 427   address entry = __ pc();
 428 
 429   switch (type) {
 430   case T_CHAR    : /* Nothing to do */  break;
 431   case T_BYTE    : /* Nothing to do */  break;
 432   case T_SHORT   : /* Nothing to do */  break;
 433   case T_INT     : /* Nothing to do */  break;
 434   case T_LONG    : /* Nothing to do */  break;
 435   case T_VOID    : /* Nothing to do */  break;
 436   case T_DOUBLE  : /* Nothing to do */  break;
 437   case T_FLOAT   : /* Nothing to do */  break;
 438   case T_BOOLEAN : __ c2bool(R0);       break;
 439   case T_OBJECT  :
 440     __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize));
 441     __ verify_oop(R0);
 442     break;
 443   default        : __ should_not_reach_here(); break;
 444   }
 445 
 446   __ ret();
 447   return entry;
 448 }
 449 
 450 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
 451   address entry = __ pc();
 452   __ push(state);
 453   __ call_VM(noreg, runtime_entry);
 454 
 455   // load current bytecode
 456   __ ldrb(R3_bytecode, Address(Rbcp));
 457   __ dispatch_only_normal(vtos);
 458   return entry;
 459 }
 460 
 461 address TemplateInterpreterGenerator::generate_cont_preempt_rerun_interpreter_adapter() {
 462   return nullptr;
 463 }
 464 
 465 
 466 // Helpers for commoning out cases in the various type of method entries.
 467 //
 468 
 469 // increment invocation count & check for overflow
 470 //
 471 // Note: checking for negative value instead of overflow
 472 //       so we have a 'sticky' overflow test
 473 //
 474 // In: Rmethod.
 475 //
 476 // Uses R0, R1, Rtemp.
 477 //
 478 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 479   Label done;
 480   const Register Rcounters = Rtemp;
 481   const Address invocation_counter(Rcounters,
 482                 MethodCounters::invocation_counter_offset() +
 483                 InvocationCounter::counter_offset());
 484 
 485   // Note: In tiered we increment either counters in MethodCounters* or
 486   // in MDO depending if we're profiling or not.
 487   int increment = InvocationCounter::count_increment;
 488   Label no_mdo;
 489   if (ProfileInterpreter) {
 490     // Are we profiling?
 491     __ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset()));
 492     __ cbz(R1_tmp, no_mdo);
 493     // Increment counter in the MDO
 494     const Address mdo_invocation_counter(R1_tmp,
 495                   in_bytes(MethodData::invocation_counter_offset()) +
 496                   in_bytes(InvocationCounter::counter_offset()));
 497     const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset()));
 498     __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow);
 499     __ b(done);
 500   }
 501   __ bind(no_mdo);
 502   __ get_method_counters(Rmethod, Rcounters, done);
 503   const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset()));
 504   __ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow);
 505   __ bind(done);
 506 }
 507 
 508 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 509   // InterpreterRuntime::frequency_counter_overflow takes one argument
 510   // indicating if the counter overflow occurs at a backwards branch (non-null bcp).
 511   // The call returns the address of the verified entry point for the method or null
 512   // if the compilation did not complete (either went background or bailed out).
 513   __ mov(R1, (int)false);
 514   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
 515 
 516   // jump to the interpreted entry.
 517   __ b(do_continue);
 518 }
 519 
 520 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 521   // Check if we've got enough room on the stack for
 522   //  - overhead;
 523   //  - locals;
 524   //  - expression stack.
 525   //
 526   // Registers on entry:
 527   //
 528   // R3 = number of additional locals
 529   // Rthread
 530   // Rmethod
 531   // Registers used: R0, R1, R2, Rtemp.
 532 
 533   const Register Radditional_locals = R3;
 534   const Register RmaxStack = R2;
 535 
 536   // monitor entry size
 537   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 538 
 539   // total overhead size: entry_size + (saved registers, thru expr stack bottom).
 540   // be sure to change this if you add/subtract anything to/from the overhead area
 541   const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size;
 542 
 543   // Pages reserved for VM runtime calls and subsequent Java calls.
 544   const int reserved_pages = StackOverflow::stack_shadow_zone_size();
 545 
 546   // Thread::stack_size() includes guard pages, and they should not be touched.
 547   const int guard_pages = StackOverflow::stack_guard_zone_size();
 548 
 549   __ ldr(R0, Address(Rthread, Thread::stack_base_offset()));
 550   __ ldr(R1, Address(Rthread, Thread::stack_size_offset()));
 551   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
 552   __ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset()));
 553   __ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words());
 554 
 555   // reserve space for additional locals
 556   __ sub(Rtemp, Rtemp, AsmOperand(Radditional_locals, lsl, Interpreter::logStackElementSize));
 557 
 558   // stack size
 559   __ sub(R0, R0, R1);
 560 
 561   // reserve space for expression stack
 562   __ sub(Rtemp, Rtemp, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize));
 563 
 564   __ cmp(Rtemp, R0);
 565 
 566   __ mov(SP, Rsender_sp, ls);  // restore SP
 567   __ b(StubRoutines::throw_StackOverflowError_entry(), ls);
 568 }
 569 
 570 
 571 // Allocate monitor and lock method (asm interpreter)
 572 //
 573 void TemplateInterpreterGenerator::lock_method() {
 574   // synchronize method
 575 
 576   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 577   assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment");
 578 
 579   #ifdef ASSERT
 580     { Label L;
 581       __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 582       __ tbnz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
 583       __ stop("method doesn't need synchronization");
 584       __ bind(L);
 585     }
 586   #endif // ASSERT
 587 
 588   // get synchronization object
 589   { Label done;
 590     __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 591     __ tst(Rtemp, JVM_ACC_STATIC);
 592     __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case)
 593     __ b(done, eq);
 594     __ load_mirror(R0, Rmethod, Rtemp);
 595     __ bind(done);
 596   }
 597 
 598   // add space for monitor & lock
 599 
 600 
 601   __ sub(Rstack_top, Rstack_top, entry_size);
 602   __ check_stack_top_on_expansion();
 603                                               // add space for a monitor entry
 604   __ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
 605                                               // set new monitor block top
 606   __ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset()));
 607                                               // store object
 608   __ mov(R1, Rstack_top);                     // monitor entry address
 609   __ lock_object(R1);
 610 }
 611 
 612 
 613 //
 614 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
 615 // and for native methods hence the shared code.
 616 
 617 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 618   // Generates the following stack layout:
 619   //
 620   // [ expr. stack bottom ]
 621   // [ saved Rbcp         ]
 622   // [ current Rlocals    ]
 623   // [ cache              ]
 624   // [ mdx                ]
 625   // [ Method*            ]
 626   // [ last_sp            ]
 627   // [ sender_sp          ]
 628   // [ saved FP           ] <--- FP
 629   // [ saved LR           ]
 630 
 631   // initialize fixed part of activation frame
 632   __ push(LR);                                        // save return address
 633   __ push(FP);                                        // save FP
 634   __ mov(FP, SP);                                     // establish new FP
 635 
 636   __ push(Rsender_sp);
 637 
 638   __ mov(R0, 0);
 639   __ push(R0);                                        // leave last_sp as null
 640 
 641   // setup Rbcp
 642   if (native_call) {
 643     __ mov(Rbcp, 0);                                  // bcp = 0 for native calls
 644   } else {
 645     __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); // get ConstMethod*
 646     __ add(Rbcp, Rtemp, ConstMethod::codes_offset()); // get codebase
 647   }
 648 
 649   __ push(Rmethod);                                    // save Method*
 650   // Get mirror and store it in the frame as GC root for this Method*
 651   __ load_mirror(Rtemp, Rmethod, Rtemp);
 652   __ push(Rtemp);
 653 
 654   if (ProfileInterpreter) {
 655     __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
 656     __ tst(Rtemp, Rtemp);
 657     __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()), ne);
 658     __ push(Rtemp);                                    // set the mdp (method data pointer)
 659   } else {
 660     __ push(R0);
 661   }
 662 
 663   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
 664   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
 665   __ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset()));
 666   __ push(Rtemp);                                      // set constant pool cache
 667   __ sub(Rtemp, Rlocals, FP);
 668   __ logical_shift_right(Rtemp, Rtemp, Interpreter::logStackElementSize); // Rtemp = Rlocals - fp();
 669   __ push(Rtemp);                                      // set relativized Rlocals, see frame::interpreter_frame_locals()
 670   __ push(Rbcp);                                       // set bcp
 671   __ push(R0);                                         // reserve word for pointer to expression stack bottom
 672   __ str(SP, Address(SP, 0));                          // set expression stack bottom
 673 }
 674 
 675 
 676 // End of helpers
 677 
 678 //------------------------------------------------------------------------------------------------------------------------
 679 // Entry points
 680 //
 681 // Here we generate the various kind of entries into the interpreter.
 682 // The two main entry type are generic bytecode methods and native call method.
 683 // These both come in synchronized and non-synchronized versions but the
 684 // frame layout they create is very similar. The other method entry
 685 // types are really just special purpose entries that are really entry
 686 // and interpretation all in one. These are for trivial methods like
 687 // accessor, empty, or special math methods.
 688 //
 689 // When control flow reaches any of the entry types for the interpreter
 690 // the following holds ->
 691 //
 692 // Arguments:
 693 //
 694 // Rmethod: Method*
 695 // Rthread: thread
 696 // Rsender_sp:  sender sp
 697 // Rparams (SP on 32-bit ARM): pointer to method parameters
 698 //
 699 // LR: return address
 700 //
 701 // Stack layout immediately at entry
 702 //
 703 // [ parameter n        ] <--- Rparams (SP on 32-bit ARM)
 704 //   ...
 705 // [ parameter 1        ]
 706 // [ expression stack   ] (caller's java expression stack)
 707 
 708 // Assuming that we don't go to one of the trivial specialized
 709 // entries the stack will look like below when we are ready to execute
 710 // the first bytecode (or call the native routine). The register usage
 711 // will be as the template based interpreter expects.
 712 //
 713 // local variables follow incoming parameters immediately; i.e.
 714 // the return address is saved at the end of the locals.
 715 //
 716 // [ expr. stack        ] <--- Rstack_top (SP on 32-bit ARM)
 717 // [ monitor entry      ]
 718 //   ...
 719 // [ monitor entry      ]
 720 // [ expr. stack bottom ]
 721 // [ saved Rbcp         ]
 722 // [ current Rlocals    ]
 723 // [ cache              ]
 724 // [ mdx                ]
 725 // [ mirror             ]
 726 // [ Method*            ]
 727 //
 728 // 32-bit ARM:
 729 // [ last_sp            ]
 730 //
 731 // [ sender_sp          ]
 732 // [ saved FP           ] <--- FP
 733 // [ saved LR           ]
 734 // [ optional padding(*)]
 735 // [ local variable m   ]
 736 //   ...
 737 // [ local variable 1   ]
 738 // [ parameter n        ]
 739 //   ...
 740 // [ parameter 1        ] <--- Rlocals
 741 //
 742 
 743 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 744   // Code: _aload_0, _getfield, _areturn
 745   // parameter size = 1
 746   //
 747   // The code that gets generated by this routine is split into 2 parts:
 748   //    1. The "intrinsified" code performing an ON_WEAK_OOP_REF load,
 749   //    2. The slow path - which is an expansion of the regular method entry.
 750   //
 751   // Notes:-
 752   // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed.
 753   // * We may jump to the slow path iff the receiver is null. If the
 754   //   Reference object is null then we no longer perform an ON_WEAK_OOP_REF load
 755   //   Thus we can use the regular method entry code to generate the NPE.
 756   //
 757   // Rmethod: Method*
 758   // Rthread: thread
 759   // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path
 760   // Rparams: parameters
 761 
 762   address entry = __ pc();
 763   Label slow_path;
 764   const Register Rthis = R0;
 765   const Register Rret_addr = Rtmp_save1;
 766   assert_different_registers(Rthis, Rret_addr, Rsender_sp);
 767 
 768   const int referent_offset = java_lang_ref_Reference::referent_offset();
 769 
 770   // Check if local 0 != nullptr
 771   // If the receiver is null then it is OK to jump to the slow path.
 772   __ ldr(Rthis, Address(Rparams));
 773   __ cbz(Rthis, slow_path);
 774 
 775   // Preserve LR
 776   __ mov(Rret_addr, LR);
 777 
 778   // Load the value of the referent field.
 779   const Address field_address(Rthis, referent_offset);
 780   __ load_heap_oop(R0, field_address, Rtemp, R1_tmp, R2_tmp, ON_WEAK_OOP_REF);
 781 
 782   // _areturn
 783   __ mov(SP, Rsender_sp);
 784   __ ret(Rret_addr);
 785 
 786   // generate a vanilla interpreter entry as the slow path
 787   __ bind(slow_path);
 788   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 789   return entry;
 790 }
 791 
 792 // Not supported
 793 address TemplateInterpreterGenerator::generate_currentThread() { return nullptr; }
 794 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return nullptr; }
 795 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
 796 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
 797 address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() { return nullptr; }
 798 address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() { return nullptr; }
 799 address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() { return nullptr; }
 800 address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { return nullptr; }
 801 address TemplateInterpreterGenerator::generate_Float_float16ToFloat_entry() { return nullptr; }
 802 address TemplateInterpreterGenerator::generate_Float_floatToFloat16_entry() { return nullptr; }
 803 
 804 //
 805 // Interpreter stub for calling a native method. (asm interpreter)
 806 // This sets up a somewhat different looking stack for calling the native method
 807 // than the typical interpreter frame setup.
 808 //
 809 
 810 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
 811   // determine code generation flags
 812   bool inc_counter  = UseCompiler || CountCompiledCalls;
 813 
 814   // Incoming registers:
 815   //
 816   // Rmethod: Method*
 817   // Rthread: thread
 818   // Rsender_sp: sender sp
 819   // Rparams: parameters
 820 
 821   address entry_point = __ pc();
 822 
 823   // Register allocation
 824   const Register Rsize_of_params = R6;
 825   const Register Rsig_handler    = Rtmp_save0;   // R4
 826   const Register Rnative_code    = Rtmp_save1;   // R5
 827   const Register Rresult_handler = R6;
 828 
 829   const Register Rsaved_result_lo = Rtmp_save0;  // R4
 830   const Register Rsaved_result_hi = Rtmp_save1;  // R5
 831   FloatRegister saved_result_fp;
 832 
 833 
 834   __ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset()));
 835   __ ldrh(Rsize_of_params,  Address(Rsize_of_params, ConstMethod::size_of_parameters_offset()));
 836 
 837   // native calls don't need the stack size check since they have no expression stack
 838   // and the arguments are already on the stack and we only add a handful of words
 839   // to the stack
 840 
 841   // compute beginning of parameters (Rlocals)
 842   __ sub(Rlocals, Rparams, wordSize);
 843   __ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize));
 844 
 845   // reserve stack space for oop_temp
 846   __ mov(R0, 0);
 847   __ push(R0);
 848 
 849   generate_fixed_frame(true); // Note: R9 is now saved in the frame
 850 
 851   // make sure method is native & not abstract
 852 #ifdef ASSERT
 853   __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 854   {
 855     Label L;
 856     __ tbnz(Rtemp, JVM_ACC_NATIVE_BIT, L);
 857     __ stop("tried to execute non-native method as native");
 858     __ bind(L);
 859   }
 860   { Label L;
 861     __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L);
 862     __ stop("tried to execute abstract method in interpreter");
 863     __ bind(L);
 864   }
 865 #endif
 866 
 867   // increment invocation count & check for overflow
 868   Label invocation_counter_overflow;
 869   if (inc_counter) {
 870     if (synchronized) {
 871       // Avoid unlocking method's monitor in case of exception, as it has not
 872       // been locked yet.
 873       __ set_do_not_unlock_if_synchronized(true, Rtemp);
 874     }
 875     generate_counter_incr(&invocation_counter_overflow);
 876   }
 877 
 878   Label continue_after_compile;
 879   __ bind(continue_after_compile);
 880 
 881   if (inc_counter && synchronized) {
 882     __ set_do_not_unlock_if_synchronized(false, Rtemp);
 883   }
 884 
 885   // check for synchronized methods
 886   // Must happen AFTER invocation_counter check and stack overflow check,
 887   // so method is not locked if overflows.
 888   //
 889   if (synchronized) {
 890     lock_method();
 891   } else {
 892     // no synchronization necessary
 893 #ifdef ASSERT
 894       { Label L;
 895         __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 896         __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
 897         __ stop("method needs synchronization");
 898         __ bind(L);
 899       }
 900 #endif
 901   }
 902 
 903   // start execution
 904 #ifdef ASSERT
 905   { Label L;
 906     __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
 907     __ cmp(Rtemp, Rstack_top);
 908     __ b(L, eq);
 909     __ stop("broken stack frame setup in interpreter 3");
 910     __ bind(L);
 911   }
 912 #endif
 913   __ check_extended_sp(Rtemp);
 914 
 915   // jvmti/dtrace support
 916   __ notify_method_entry();
 917 #if R9_IS_SCRATCHED
 918   __ restore_method();
 919 #endif
 920 
 921   {
 922     Label L;
 923     __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset()));
 924     __ cbnz(Rsig_handler, L);
 925     __ mov(R1, Rmethod);
 926     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1, true);
 927     __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset()));
 928     __ bind(L);
 929   }
 930 
 931   {
 932     Label L;
 933     __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset()));
 934     __ cbnz(Rnative_code, L);
 935     __ mov(R1, Rmethod);
 936     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1);
 937     __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset()));
 938     __ bind(L);
 939   }
 940 
 941   // Allocate stack space for arguments
 942 
 943 
 944   // C functions need aligned stack
 945   __ bic(SP, SP, StackAlignmentInBytes - 1);
 946   // Multiply by BytesPerLong instead of BytesPerWord, because calling convention
 947   // may require empty slots due to long alignment, e.g. func(int, jlong, int, jlong)
 948   __ sub(SP, SP, AsmOperand(Rsize_of_params, lsl, LogBytesPerLong));
 949 
 950 #ifdef __ABI_HARD__
 951   // Allocate more stack space to accommodate all GP as well as FP registers:
 952   // 4 * wordSize
 953   // 8 * BytesPerLong
 954   int reg_arguments = align_up((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes);
 955 #else
 956   // Reserve at least 4 words on the stack for loading
 957   // of parameters passed on registers (R0-R3).
 958   // See generate_slow_signature_handler().
 959   // It is also used for JNIEnv & class additional parameters.
 960   int reg_arguments = 4 * wordSize;
 961 #endif // __ABI_HARD__
 962 
 963   __ sub(SP, SP, reg_arguments);
 964 
 965 
 966   // Note: signature handler blows R4 besides all scratch registers.
 967   // See AbstractInterpreterGenerator::generate_slow_signature_handler().
 968   __ call(Rsig_handler);
 969 #if R9_IS_SCRATCHED
 970   __ restore_method();
 971 #endif
 972   __ mov(Rresult_handler, R0);
 973 
 974   // Pass JNIEnv and mirror for static methods
 975   {
 976     Label L;
 977     __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 978     __ add(R0, Rthread, in_bytes(JavaThread::jni_environment_offset()));
 979     __ tbz(Rtemp, JVM_ACC_STATIC_BIT, L);
 980     __ load_mirror(Rtemp, Rmethod, Rtemp);
 981     __ add(R1, FP, frame::interpreter_frame_oop_temp_offset * wordSize);
 982     __ str(Rtemp, Address(R1, 0));
 983     __ bind(L);
 984   }
 985 
 986   __ set_last_Java_frame(SP, FP, true, Rtemp);
 987 
 988   // Changing state to _thread_in_native must be the last thing to do
 989   // before the jump to native code. At this moment stack must be
 990   // safepoint-safe and completely prepared for stack walking.
 991 #ifdef ASSERT
 992   {
 993     Label L;
 994     __ ldr_u32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
 995     __ cmp_32(Rtemp, _thread_in_Java);
 996     __ b(L, eq);
 997     __ stop("invalid thread state");
 998     __ bind(L);
 999   }
1000 #endif
1001 
1002   // Force all preceding writes to be observed prior to thread state change
1003   __ membar(MacroAssembler::StoreStore, Rtemp);
1004 
1005   __ mov(Rtemp, _thread_in_native);
1006   __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1007 
1008   __ call(Rnative_code);
1009 #if R9_IS_SCRATCHED
1010   __ restore_method();
1011 #endif
1012 
1013   // Set FPSCR/FPCR to a known state
1014   if (AlwaysRestoreFPU) {
1015     __ restore_default_fp_mode();
1016   }
1017 
1018   // Do safepoint check
1019   __ mov(Rtemp, _thread_in_native_trans);
1020   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1021 
1022   // Force this write out before the read below
1023   if (!UseSystemMemoryBarrier) {
1024     __ membar(MacroAssembler::StoreLoad, Rtemp);
1025   }
1026 
1027   // Protect the return value in the interleaved code: save it to callee-save registers.
1028   __ mov(Rsaved_result_lo, R0);
1029   __ mov(Rsaved_result_hi, R1);
1030 #ifdef __ABI_HARD__
1031   // preserve native FP result in a callee-saved register
1032   saved_result_fp = D8;
1033   __ fcpyd(saved_result_fp, D0);
1034 #else
1035   saved_result_fp = fnoreg;
1036 #endif // __ABI_HARD__
1037 
1038   {
1039   Label call, skip_call;
1040   __ safepoint_poll(Rtemp, call);
1041   __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset()));
1042   __ cmp(R3, 0);
1043   __ b(skip_call, eq);
1044   __ bind(call);
1045   __ mov(R0, Rthread);
1046   __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none);
1047   __ bind(skip_call);
1048 
1049 #if R9_IS_SCRATCHED
1050   __ restore_method();
1051 #endif
1052   }
1053 
1054   // Perform Native->Java thread transition
1055   __ mov(Rtemp, _thread_in_Java);
1056   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1057 
1058   // Zero handles and last_java_sp
1059   __ reset_last_Java_frame(Rtemp);
1060   __ ldr(R3, Address(Rthread, JavaThread::active_handles_offset()));
1061   __ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset()));
1062   if (CheckJNICalls) {
1063     __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1064   }
1065 
1066   // Unbox oop result, e.g. JNIHandles::resolve result if it's an oop.
1067   {
1068     Label Lnot_oop;
1069     __ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT));
1070     __ cmp(Rtemp, Rresult_handler);
1071     __ b(Lnot_oop, ne);
1072     Register value = Rsaved_result_lo;
1073     __ resolve_jobject(value,   // value
1074                        Rtemp,   // tmp1
1075                        R1_tmp); // tmp2
1076     // Store resolved result in frame for GC visibility.
1077     __ str(value, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize));
1078     __ bind(Lnot_oop);
1079   }
1080 
1081 
1082   // reguard stack if StackOverflow exception happened while in native.
1083   {
1084     __ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset()));
1085     __ cmp_32(Rtemp, StackOverflow::stack_guard_yellow_reserved_disabled);
1086   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq);
1087 #if R9_IS_SCRATCHED
1088   __ restore_method();
1089 #endif
1090   }
1091 
1092   // check pending exceptions
1093   {
1094     __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
1095     __ cmp(Rtemp, 0);
1096     __ mov(Rexception_pc, PC, ne);
1097     __ b(StubRoutines::forward_exception_entry(), ne);
1098   }
1099 
1100   if (synchronized) {
1101     // address of first monitor
1102     __ sub(R0, FP, - (frame::interpreter_frame_monitor_block_bottom_offset - frame::interpreter_frame_monitor_size()) * wordSize);
1103     __ unlock_object(R0);
1104   }
1105 
1106   // jvmti/dtrace support
1107   // Note: This must happen _after_ handling/throwing any exceptions since
1108   //       the exception handler code notifies the runtime of method exits
1109   //       too. If this happens before, method entry/exit notifications are
1110   //       not properly paired (was bug - gri 11/22/99).
1111   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp);
1112 
1113   // Restore the result. Oop result is restored from the stack by the
1114   // result handler.
1115   __ mov(R0, Rsaved_result_lo);
1116   __ mov(R1, Rsaved_result_hi);
1117 
1118 #ifdef __ABI_HARD__
1119   // reload native FP result
1120   __ fcpyd(D0, D8);
1121 #endif // __ABI_HARD__
1122 
1123   __ blx(Rresult_handler);
1124 
1125   // Restore FP/LR, sender_sp and return
1126   __ mov(Rtemp, FP);
1127   __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1128   __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1129 
1130   __ ret();
1131 
1132   if (inc_counter) {
1133     // Handle overflow of counter and compile method
1134     __ bind(invocation_counter_overflow);
1135     generate_counter_overflow(continue_after_compile);
1136   }
1137 
1138   return entry_point;
1139 }
1140 
1141 //
1142 // Generic interpreted method entry to (asm) interpreter
1143 //
1144 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1145   // determine code generation flags
1146   bool inc_counter  = UseCompiler || CountCompiledCalls;
1147 
1148   // Rmethod: Method*
1149   // Rthread: thread
1150   // Rsender_sp: sender sp (could differ from SP if we were called via c2i)
1151   // Rparams: pointer to the last parameter in the stack
1152 
1153   address entry_point = __ pc();
1154 
1155   const Register RconstMethod = R3;
1156 
1157 
1158   __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
1159 
1160   __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
1161   __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset()));
1162 
1163   // setup Rlocals
1164   __ sub(Rlocals, Rparams, wordSize);
1165   __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize));
1166 
1167   __ sub(R3, R3, R2); // number of additional locals
1168 
1169 
1170   // see if we've got enough room on the stack for locals plus overhead.
1171   generate_stack_overflow_check();
1172 
1173   // allocate space for locals
1174   // explicitly initialize locals
1175 
1176   // Loop is unrolled 4 times
1177   Label loop;
1178   __ mov(R0, 0);
1179   __ bind(loop);
1180 
1181   // #1
1182   __ subs(R3, R3, 1);
1183   __ push(R0, ge);
1184 
1185   // #2
1186   __ subs(R3, R3, 1, ge);
1187   __ push(R0, ge);
1188 
1189   // #3
1190   __ subs(R3, R3, 1, ge);
1191   __ push(R0, ge);
1192 
1193   // #4
1194   __ subs(R3, R3, 1, ge);
1195   __ push(R0, ge);
1196 
1197   __ b(loop, gt);
1198 
1199   // initialize fixed part of activation frame
1200   generate_fixed_frame(false);
1201 
1202   __ restore_dispatch();
1203 
1204   // make sure method is not native & not abstract
1205 #ifdef ASSERT
1206   __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1207   {
1208     Label L;
1209     __ tbz(Rtemp, JVM_ACC_NATIVE_BIT, L);
1210     __ stop("tried to execute native method as non-native");
1211     __ bind(L);
1212   }
1213   { Label L;
1214     __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L);
1215     __ stop("tried to execute abstract method in interpreter");
1216     __ bind(L);
1217   }
1218 #endif
1219 
1220   // increment invocation count & check for overflow
1221   Label invocation_counter_overflow;
1222   if (inc_counter) {
1223     if (synchronized) {
1224       // Avoid unlocking method's monitor in case of exception, as it has not
1225       // been locked yet.
1226       __ set_do_not_unlock_if_synchronized(true, Rtemp);
1227     }
1228     generate_counter_incr(&invocation_counter_overflow);
1229   }
1230   Label continue_after_compile;
1231   __ bind(continue_after_compile);
1232 
1233   if (inc_counter && synchronized) {
1234     __ set_do_not_unlock_if_synchronized(false, Rtemp);
1235   }
1236 #if R9_IS_SCRATCHED
1237   __ restore_method();
1238 #endif
1239 
1240   // check for synchronized methods
1241   // Must happen AFTER invocation_counter check and stack overflow check,
1242   // so method is not locked if overflows.
1243   //
1244   if (synchronized) {
1245     // Allocate monitor and lock method
1246     lock_method();
1247   } else {
1248     // no synchronization necessary
1249 #ifdef ASSERT
1250       { Label L;
1251         __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1252         __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
1253         __ stop("method needs synchronization");
1254         __ bind(L);
1255       }
1256 #endif
1257   }
1258 
1259   // start execution
1260 #ifdef ASSERT
1261   { Label L;
1262     __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
1263     __ cmp(Rtemp, Rstack_top);
1264     __ b(L, eq);
1265     __ stop("broken stack frame setup in interpreter 4");
1266     __ bind(L);
1267   }
1268 #endif
1269   __ check_extended_sp(Rtemp);
1270 
1271   // jvmti support
1272   __ notify_method_entry();
1273 #if R9_IS_SCRATCHED
1274   __ restore_method();
1275 #endif
1276 
1277   __ dispatch_next(vtos);
1278 
1279   // invocation counter overflow
1280   if (inc_counter) {
1281     // Handle overflow of counter and compile method
1282     __ bind(invocation_counter_overflow);
1283     generate_counter_overflow(continue_after_compile);
1284   }
1285 
1286   return entry_point;
1287 }
1288 
1289 //------------------------------------------------------------------------------------------------------------------------
1290 // Exceptions
1291 
1292 void TemplateInterpreterGenerator::generate_throw_exception() {
1293   // Entry point in previous activation (i.e., if the caller was interpreted)
1294   Interpreter::_rethrow_exception_entry = __ pc();
1295   // Rexception_obj: exception
1296 
1297   // Clear interpreter_frame_last_sp.
1298   __ mov(Rtemp, 0);
1299   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1300 
1301 #if R9_IS_SCRATCHED
1302   __ restore_method();
1303 #endif
1304   __ restore_bcp();
1305   __ restore_dispatch();
1306   __ restore_locals();
1307 
1308 
1309   // Entry point for exceptions thrown within interpreter code
1310   Interpreter::_throw_exception_entry = __ pc();
1311 
1312   // expression stack is undefined here
1313   // Rexception_obj: exception
1314   // Rbcp: exception bcp
1315   __ verify_oop(Rexception_obj);
1316 
1317   // expression stack must be empty before entering the VM in case of an exception
1318   __ empty_expression_stack();
1319   // find exception handler address and preserve exception oop
1320   __ mov(R1, Rexception_obj);
1321   __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), R1);
1322   // R0: exception handler entry point
1323   // Rexception_obj: preserved exception oop
1324   // Rbcp: bcp for exception handler
1325   __ push_ptr(Rexception_obj);                    // push exception which is now the only value on the stack
1326   __ jump(R0);                                    // jump to exception handler (may be _remove_activation_entry!)
1327 
1328   // If the exception is not handled in the current frame the frame is removed and
1329   // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
1330   //
1331   // Note: At this point the bci is still the bxi for the instruction which caused
1332   //       the exception and the expression stack is empty. Thus, for any VM calls
1333   //       at this point, GC will find a legal oop map (with empty expression stack).
1334 
1335   // In current activation
1336   // tos: exception
1337   // Rbcp: exception bcp
1338 
1339   //
1340   // JVMTI PopFrame support
1341   //
1342    Interpreter::_remove_activation_preserving_args_entry = __ pc();
1343 
1344 
1345   __ empty_expression_stack();
1346 
1347   // Set the popframe_processing bit in _popframe_condition indicating that we are
1348   // currently handling popframe, so that call_VMs that may happen later do not trigger new
1349   // popframe handling cycles.
1350 
1351   __ ldr_s32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1352   __ orr(Rtemp, Rtemp, (unsigned)JavaThread::popframe_processing_bit);
1353   __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1354 
1355   {
1356     // Check to see whether we are returning to a deoptimized frame.
1357     // (The PopFrame call ensures that the caller of the popped frame is
1358     // either interpreted or compiled and deoptimizes it if compiled.)
1359     // In this case, we can't call dispatch_next() after the frame is
1360     // popped, but instead must save the incoming arguments and restore
1361     // them after deoptimization has occurred.
1362     //
1363     // Note that we don't compare the return PC against the
1364     // deoptimization blob's unpack entry because of the presence of
1365     // adapter frames in C2.
1366     Label caller_not_deoptimized;
1367     __ ldr(R0, Address(FP, frame::return_addr_offset * wordSize));
1368     __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0);
1369     __ cbnz_32(R0, caller_not_deoptimized);
1370 
1371     // Compute size of arguments for saving when returning to deoptimized caller
1372     __ restore_method();
1373     __ ldr(R0, Address(Rmethod, Method::const_offset()));
1374     __ ldrh(R0, Address(R0, ConstMethod::size_of_parameters_offset()));
1375 
1376     __ logical_shift_left(R1, R0, Interpreter::logStackElementSize);
1377     // Save these arguments
1378     __ restore_locals();
1379     __ sub(R2, Rlocals, R1);
1380     __ add(R2, R2, wordSize);
1381     __ mov(R0, Rthread);
1382     __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R0, R1, R2);
1383 
1384     __ remove_activation(vtos, LR,
1385                          /* throw_monitor_exception */ false,
1386                          /* install_monitor_exception */ false,
1387                          /* notify_jvmdi */ false);
1388 
1389     // Inform deoptimization that it is responsible for restoring these arguments
1390     __ mov(Rtemp, JavaThread::popframe_force_deopt_reexecution_bit);
1391     __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1392 
1393     // Continue in deoptimization handler
1394     __ ret();
1395 
1396     __ bind(caller_not_deoptimized);
1397   }
1398 
1399   __ remove_activation(vtos, R4,
1400                        /* throw_monitor_exception */ false,
1401                        /* install_monitor_exception */ false,
1402                        /* notify_jvmdi */ false);
1403 
1404   // Finish with popframe handling
1405   // A previous I2C followed by a deoptimization might have moved the
1406   // outgoing arguments further up the stack. PopFrame expects the
1407   // mutations to those outgoing arguments to be preserved and other
1408   // constraints basically require this frame to look exactly as
1409   // though it had previously invoked an interpreted activation with
1410   // no space between the top of the expression stack (current
1411   // last_sp) and the top of stack. Rather than force deopt to
1412   // maintain this kind of invariant all the time we call a small
1413   // fixup routine to move the mutated arguments onto the top of our
1414   // expression stack if necessary.
1415   __ mov(R1, SP);
1416   __ ldr(R2, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1417   // PC must point into interpreter here
1418   __ set_last_Java_frame(SP, FP, true, Rtemp);
1419   __ mov(R0, Rthread);
1420   __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2);
1421   __ reset_last_Java_frame(Rtemp);
1422 
1423   // Restore the last_sp and null it out
1424   __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1425   __ mov(Rtemp, (int)NULL_WORD);
1426   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1427 
1428   __ restore_bcp();
1429   __ restore_dispatch();
1430   __ restore_locals();
1431   __ restore_method();
1432 
1433   // The method data pointer was incremented already during
1434   // call profiling. We have to restore the mdp for the current bcp.
1435   if (ProfileInterpreter) {
1436     __ set_method_data_pointer_for_bcp();
1437   }
1438 
1439   // Clear the popframe condition flag
1440   assert(JavaThread::popframe_inactive == 0, "adjust this code");
1441   __ str_32(__ zero_register(Rtemp), Address(Rthread, JavaThread::popframe_condition_offset()));
1442 
1443 #if INCLUDE_JVMTI
1444   {
1445     Label L_done;
1446 
1447     __ ldrb(Rtemp, Address(Rbcp, 0));
1448     __ cmp(Rtemp, Bytecodes::_invokestatic);
1449     __ b(L_done, ne);
1450 
1451     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1452     // Detect such a case in the InterpreterRuntime function and return the member name argument, or null.
1453 
1454     // get local0
1455     __ ldr(R1, Address(Rlocals, 0));
1456     __ mov(R2, Rmethod);
1457     __ mov(R3, Rbcp);
1458     __ call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R1, R2, R3);
1459 
1460     __ cbz(R0, L_done);
1461 
1462     __ str(R0, Address(Rstack_top));
1463     __ bind(L_done);
1464   }
1465 #endif // INCLUDE_JVMTI
1466 
1467   __ dispatch_next(vtos);
1468   // end of PopFrame support
1469 
1470   Interpreter::_remove_activation_entry = __ pc();
1471 
1472   // preserve exception over this code sequence
1473   __ pop_ptr(R0_tos);
1474   __ str(R0_tos, Address(Rthread, JavaThread::vm_result_offset()));
1475   // remove the activation (without doing throws on illegalMonitorExceptions)
1476   __ remove_activation(vtos, Rexception_pc, false, true, false);
1477   // restore exception
1478   __ get_vm_result(Rexception_obj, Rtemp);
1479 
1480   // In between activations - previous activation type unknown yet
1481   // compute continuation point - the continuation point expects
1482   // the following registers set up:
1483   //
1484   // Rexception_obj: exception
1485   // Rexception_pc: return address/pc that threw exception
1486   // SP: expression stack of caller
1487   // FP: frame pointer of caller
1488   __ mov(c_rarg0, Rthread);
1489   __ mov(c_rarg1, Rexception_pc);
1490   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1);
1491   // Note that an "issuing PC" is actually the next PC after the call
1492 
1493   __ jump(R0);                             // jump to exception handler of caller
1494 }
1495 
1496 
1497 //
1498 // JVMTI ForceEarlyReturn support
1499 //
1500 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1501   address entry = __ pc();
1502 
1503 
1504   __ restore_bcp();
1505   __ restore_dispatch();
1506   __ restore_locals();
1507 
1508   __ empty_expression_stack();
1509 
1510   __ load_earlyret_value(state);
1511 
1512   // Clear the earlyret state
1513   __ ldr(Rtemp, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
1514 
1515   assert(JvmtiThreadState::earlyret_inactive == 0, "adjust this code");
1516   __ str_32(__ zero_register(R2), Address(Rtemp, JvmtiThreadState::earlyret_state_offset()));
1517 
1518   __ remove_activation(state, LR,
1519                        false, /* throw_monitor_exception */
1520                        false, /* install_monitor_exception */
1521                        true); /* notify_jvmdi */
1522 
1523   // According to interpreter calling conventions, result is returned in R0/R1,
1524   // so ftos (S0) and dtos (D0) are moved to R0/R1.
1525   // This conversion should be done after remove_activation, as it uses
1526   // push(state) & pop(state) to preserve return value.
1527   __ convert_tos_to_retval(state);
1528   __ ret();
1529 
1530   return entry;
1531 } // end of ForceEarlyReturn support
1532 
1533 
1534 //------------------------------------------------------------------------------------------------------------------------
1535 // Helper for vtos entry point generation
1536 
1537 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1538   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1539   Label L;
1540 
1541 #ifdef __SOFTFP__
1542   dep = __ pc();                // fall through
1543 #else
1544   fep = __ pc(); __ push(ftos); __ b(L);
1545   dep = __ pc(); __ push(dtos); __ b(L);
1546 #endif // __SOFTFP__
1547 
1548   lep = __ pc(); __ push(ltos); __ b(L);
1549 
1550   if (VerifyOops) {  // can't share atos entry if VerifyOops
1551     aep = __ pc(); __ push(atos); __ b(L);
1552   } else {
1553     aep = __ pc();              // fall through
1554   }
1555 
1556 #ifdef __SOFTFP__
1557   fep = __ pc();                // fall through
1558 #endif // __SOFTFP__
1559 
1560   bep = cep = sep =             // fall through
1561   iep = __ pc(); __ push(itos); // fall through
1562   vep = __ pc(); __ bind(L);    // fall through
1563   generate_and_dispatch(t);
1564 }
1565 
1566 //------------------------------------------------------------------------------------------------------------------------
1567 
1568 // Non-product code
1569 #ifndef PRODUCT
1570 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1571   address entry = __ pc();
1572 
1573   // prepare expression stack
1574   __ push(state);       // save tosca
1575 
1576   // pass tosca registers as arguments
1577   __ mov(R2, R0_tos);
1578   __ mov(R3, R1_tos_hi);
1579   __ mov(R1, LR);       // save return address
1580 
1581   // call tracer
1582   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3);
1583 
1584   __ mov(LR, R0);       // restore return address
1585   __ pop(state);        // restore tosca
1586 
1587   // return
1588   __ ret();
1589 
1590   return entry;
1591 }
1592 
1593 
1594 void TemplateInterpreterGenerator::count_bytecode() {
1595   __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true);
1596 }
1597 
1598 
1599 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1600   __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true);
1601 }
1602 
1603 
1604 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1605   const Register Rindex_addr = R2_tmp;
1606   Label Lcontinue;
1607   InlinedAddress Lcounters((address)BytecodePairHistogram::_counters);
1608   InlinedAddress Lindex((address)&BytecodePairHistogram::_index);
1609   const Register Rcounters_addr = R2_tmp;
1610   const Register Rindex = R4_tmp;
1611 
1612   // calculate new index for counter:
1613   // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes).
1614   // (_index >> log2_number_of_codes) is previous bytecode
1615 
1616   __ ldr_literal(Rindex_addr, Lindex);
1617   __ ldr_s32(Rindex, Address(Rindex_addr));
1618   __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1619   __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes));
1620   __ str_32(Rindex, Address(Rindex_addr));
1621 
1622   // Rindex (R4) contains index of counter
1623 
1624   __ ldr_literal(Rcounters_addr, Lcounters);
1625   __ ldr_s32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex));
1626   __ adds_32(Rtemp, Rtemp, 1);
1627   __ b(Lcontinue, mi);                           // avoid overflow
1628   __ str_32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex));
1629 
1630   __ b(Lcontinue);
1631 
1632   __ bind_literal(Lindex);
1633   __ bind_literal(Lcounters);
1634 
1635   __ bind(Lcontinue);
1636 }
1637 
1638 
1639 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1640   // Call a little run-time stub to avoid blow-up for each bytecode.
1641   // The run-time runtime saves the right registers, depending on
1642   // the tosca in-state for the given template.
1643   assert(Interpreter::trace_code(t->tos_in()) != nullptr,
1644          "entry must have been generated");
1645   address trace_entry = Interpreter::trace_code(t->tos_in());
1646   __ call(trace_entry, relocInfo::none);
1647 }
1648 
1649 
1650 void TemplateInterpreterGenerator::stop_interpreter_at() {
1651   Label Lcontinue;
1652   const Register stop_at = R2_tmp;
1653 
1654   __ ldr_global_s32(Rtemp, (address) &BytecodeCounter::_counter_value);
1655   __ mov_slow(stop_at, StopInterpreterAt);
1656 
1657   // test bytecode counter
1658   __ cmp(Rtemp, stop_at);
1659   __ b(Lcontinue, ne);
1660 
1661   __ trace_state("stop_interpreter_at");
1662   __ breakpoint();
1663 
1664   __ bind(Lcontinue);
1665 }
1666 #endif // !PRODUCT