1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/resolvedFieldEntry.hpp"
  40 #include "oops/resolvedIndyEntry.hpp"
  41 #include "oops/resolvedMethodEntry.hpp"
  42 #include "prims/jvmtiExport.hpp"
  43 #include "prims/jvmtiThreadState.hpp"
  44 #include "runtime/basicLock.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/javaThread.hpp"
  47 #include "runtime/safepointMechanism.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "utilities/powerOfTwo.hpp"
  50 
  51 void InterpreterMacroAssembler::narrow(Register result) {
  52 
  53   // Get method->_constMethod->_result_type
  54   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  55   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  56   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  57 
  58   Label done, notBool, notByte, notChar;
  59 
  60   // common case first
  61   cmpw(rscratch1, T_INT);
  62   br(Assembler::EQ, done);
  63 
  64   // mask integer result to narrower return type.
  65   cmpw(rscratch1, T_BOOLEAN);
  66   br(Assembler::NE, notBool);
  67   andw(result, result, 0x1);
  68   b(done);
  69 
  70   bind(notBool);
  71   cmpw(rscratch1, T_BYTE);
  72   br(Assembler::NE, notByte);
  73   sbfx(result, result, 0, 8);
  74   b(done);
  75 
  76   bind(notByte);
  77   cmpw(rscratch1, T_CHAR);
  78   br(Assembler::NE, notChar);
  79   ubfx(result, result, 0, 16);  // truncate upper 16 bits
  80   b(done);
  81 
  82   bind(notChar);
  83   sbfx(result, result, 0, 16);     // sign-extend short
  84 
  85   // Nothing to do for T_INT
  86   bind(done);
  87 }
  88 
  89 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  90   assert(entry, "Entry must have been generated by now");
  91   b(entry);
  92 }
  93 
  94 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
  95   if (JvmtiExport::can_pop_frame()) {
  96     Label L;
  97     // Initiate popframe handling only if it is not already being
  98     // processed.  If the flag has the popframe_processing bit set, it
  99     // means that this code is called *during* popframe handling - we
 100     // don't want to reenter.
 101     // This method is only called just after the call into the vm in
 102     // call_VM_base, so the arg registers are available.
 103     ldrw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
 104     tbz(rscratch1, exact_log2(JavaThread::popframe_pending_bit), L);
 105     tbnz(rscratch1, exact_log2(JavaThread::popframe_processing_bit), L);
 106     // Call Interpreter::remove_activation_preserving_args_entry() to get the
 107     // address of the same-named entrypoint in the generated interpreter code.
 108     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
 109     br(r0);
 110     bind(L);
 111   }
 112 }
 113 
 114 
 115 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
 116   ldr(r2, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 117   const Address tos_addr(r2, JvmtiThreadState::earlyret_tos_offset());
 118   const Address oop_addr(r2, JvmtiThreadState::earlyret_oop_offset());
 119   const Address val_addr(r2, JvmtiThreadState::earlyret_value_offset());
 120   switch (state) {
 121     case atos: ldr(r0, oop_addr);
 122                str(zr, oop_addr);
 123                interp_verify_oop(r0, state);        break;
 124     case ltos: ldr(r0, val_addr);                   break;
 125     case btos:                                   // fall through
 126     case ztos:                                   // fall through
 127     case ctos:                                   // fall through
 128     case stos:                                   // fall through
 129     case itos: ldrw(r0, val_addr);                  break;
 130     case ftos: ldrs(v0, val_addr);                  break;
 131     case dtos: ldrd(v0, val_addr);                  break;
 132     case vtos: /* nothing to do */                  break;
 133     default  : ShouldNotReachHere();
 134   }
 135   // Clean up tos value in the thread object
 136   movw(rscratch1, (int) ilgl);
 137   strw(rscratch1, tos_addr);
 138   strw(zr, val_addr);
 139 }
 140 
 141 
 142 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
 143   if (JvmtiExport::can_force_early_return()) {
 144     Label L;
 145     ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 146     cbz(rscratch1, L); // if (thread->jvmti_thread_state() == nullptr) exit;
 147 
 148     // Initiate earlyret handling only if it is not already being processed.
 149     // If the flag has the earlyret_processing bit set, it means that this code
 150     // is called *during* earlyret handling - we don't want to reenter.
 151     ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_state_offset()));
 152     cmpw(rscratch1, JvmtiThreadState::earlyret_pending);
 153     br(Assembler::NE, L);
 154 
 155     // Call Interpreter::remove_activation_early_entry() to get the address of the
 156     // same-named entrypoint in the generated interpreter code.
 157     ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 158     ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_tos_offset()));
 159     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), rscratch1);
 160     br(r0);
 161     bind(L);
 162   }
 163 }
 164 
 165 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
 166   Register reg,
 167   int bcp_offset) {
 168   assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
 169   ldrh(reg, Address(rbcp, bcp_offset));
 170   rev16(reg, reg);
 171 }
 172 
 173 void InterpreterMacroAssembler::get_dispatch() {
 174   uint64_t offset;
 175   adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
 176   // Use add() here after ARDP, rather than lea().
 177   // lea() does not generate anything if its offset is zero.
 178   // However, relocs expect to find either an ADD or a load/store
 179   // insn after an ADRP.  add() always generates an ADD insn, even
 180   // for add(Rn, Rn, 0).
 181   add(rdispatch, rdispatch, offset);
 182 }
 183 
 184 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
 185                                                        int bcp_offset,
 186                                                        size_t index_size) {
 187   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 188   if (index_size == sizeof(u2)) {
 189     load_unsigned_short(index, Address(rbcp, bcp_offset));
 190   } else if (index_size == sizeof(u4)) {
 191     // assert(EnableInvokeDynamic, "giant index used only for JSR 292");
 192     ldrw(index, Address(rbcp, bcp_offset));
 193   } else if (index_size == sizeof(u1)) {
 194     load_unsigned_byte(index, Address(rbcp, bcp_offset));
 195   } else {
 196     ShouldNotReachHere();
 197   }
 198 }
 199 
 200 void InterpreterMacroAssembler::get_method_counters(Register method,
 201                                                     Register mcs, Label& skip) {
 202   Label has_counters;
 203   ldr(mcs, Address(method, Method::method_counters_offset()));
 204   cbnz(mcs, has_counters);
 205   call_VM(noreg, CAST_FROM_FN_PTR(address,
 206           InterpreterRuntime::build_method_counters), method);
 207   ldr(mcs, Address(method, Method::method_counters_offset()));
 208   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 209   bind(has_counters);
 210 }
 211 
 212 // Load object from cpool->resolved_references(index)
 213 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 214                                            Register result, Register index, Register tmp) {
 215   assert_different_registers(result, index);
 216 
 217   get_constant_pool(result);
 218   // load pointer for resolved_references[] objArray
 219   ldr(result, Address(result, ConstantPool::cache_offset()));
 220   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
 221   resolve_oop_handle(result, tmp, rscratch2);
 222   // Add in the index
 223   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 224   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
 225 }
 226 
 227 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 228                              Register cpool, Register index, Register klass, Register temp) {
 229   add(temp, cpool, index, LSL, LogBytesPerWord);
 230   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 231   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
 232   add(klass, klass, temp, LSL, LogBytesPerWord);
 233   ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
 234 }
 235 
 236 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 237 // subtype of super_klass.
 238 //
 239 // Args:
 240 //      r0: superklass
 241 //      Rsub_klass: subklass
 242 //
 243 // Kills:
 244 //      r2, r5
 245 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 246                                                   Label& ok_is_subtype) {
 247   assert(Rsub_klass != r0, "r0 holds superklass");
 248   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 249   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 250 
 251   // Profile the not-null value's klass.
 252   profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
 253 
 254   // Do the check.
 255   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 256 }
 257 
 258 // Java Expression Stack
 259 
 260 void InterpreterMacroAssembler::pop_ptr(Register r) {
 261   ldr(r, post(esp, wordSize));
 262 }
 263 
 264 void InterpreterMacroAssembler::pop_i(Register r) {
 265   ldrw(r, post(esp, wordSize));
 266 }
 267 
 268 void InterpreterMacroAssembler::pop_l(Register r) {
 269   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 270 }
 271 
 272 void InterpreterMacroAssembler::push_ptr(Register r) {
 273   str(r, pre(esp, -wordSize));
 274  }
 275 
 276 void InterpreterMacroAssembler::push_i(Register r) {
 277   str(r, pre(esp, -wordSize));
 278 }
 279 
 280 void InterpreterMacroAssembler::push_l(Register r) {
 281   str(zr, pre(esp, -wordSize));
 282   str(r, pre(esp, - wordSize));
 283 }
 284 
 285 void InterpreterMacroAssembler::pop_f(FloatRegister r) {
 286   ldrs(r, post(esp, wordSize));
 287 }
 288 
 289 void InterpreterMacroAssembler::pop_d(FloatRegister r) {
 290   ldrd(r, post(esp, 2 * Interpreter::stackElementSize));
 291 }
 292 
 293 void InterpreterMacroAssembler::push_f(FloatRegister r) {
 294   strs(r, pre(esp, -wordSize));
 295 }
 296 
 297 void InterpreterMacroAssembler::push_d(FloatRegister r) {
 298   strd(r, pre(esp, 2* -wordSize));
 299 }
 300 
 301 void InterpreterMacroAssembler::pop(TosState state) {
 302   switch (state) {
 303   case atos: pop_ptr();                 break;
 304   case btos:
 305   case ztos:
 306   case ctos:
 307   case stos:
 308   case itos: pop_i();                   break;
 309   case ltos: pop_l();                   break;
 310   case ftos: pop_f();                   break;
 311   case dtos: pop_d();                   break;
 312   case vtos: /* nothing to do */        break;
 313   default:   ShouldNotReachHere();
 314   }
 315   interp_verify_oop(r0, state);
 316 }
 317 
 318 void InterpreterMacroAssembler::push(TosState state) {
 319   interp_verify_oop(r0, state);
 320   switch (state) {
 321   case atos: push_ptr();                break;
 322   case btos:
 323   case ztos:
 324   case ctos:
 325   case stos:
 326   case itos: push_i();                  break;
 327   case ltos: push_l();                  break;
 328   case ftos: push_f();                  break;
 329   case dtos: push_d();                  break;
 330   case vtos: /* nothing to do */        break;
 331   default  : ShouldNotReachHere();
 332   }
 333 }
 334 
 335 // Helpers for swap and dup
 336 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
 337   ldr(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
 338 }
 339 
 340 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
 341   str(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
 342 }
 343 
 344 void InterpreterMacroAssembler::load_float(Address src) {
 345   ldrs(v0, src);
 346 }
 347 
 348 void InterpreterMacroAssembler::load_double(Address src) {
 349   ldrd(v0, src);
 350 }
 351 
 352 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
 353   // set sender sp
 354   mov(r19_sender_sp, sp);
 355   // record last_sp
 356   sub(rscratch1, esp, rfp);
 357   asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
 358   str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 359 }
 360 
 361 // Jump to from_interpreted entry of a call unless single stepping is possible
 362 // in this thread in which case we must call the i2i entry
 363 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
 364   prepare_to_jump_from_interpreted();
 365 
 366   if (JvmtiExport::can_post_interpreter_events()) {
 367     Label run_compiled_code;
 368     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 369     // compiled code in threads for which the event is enabled.  Check here for
 370     // interp_only_mode if these events CAN be enabled.
 371     ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
 372     cbzw(rscratch1, run_compiled_code);
 373     ldr(rscratch1, Address(method, Method::interpreter_entry_offset()));
 374     br(rscratch1);
 375     bind(run_compiled_code);
 376   }
 377 
 378   ldr(rscratch1, Address(method, Method::from_interpreted_offset()));
 379   br(rscratch1);
 380 }
 381 
 382 // The following two routines provide a hook so that an implementation
 383 // can schedule the dispatch in two parts.  amd64 does not do this.
 384 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
 385 }
 386 
 387 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
 388     dispatch_next(state, step);
 389 }
 390 
 391 void InterpreterMacroAssembler::dispatch_base(TosState state,
 392                                               address* table,
 393                                               bool verifyoop,
 394                                               bool generate_poll) {
 395   if (VerifyActivationFrameSize) {
 396     Unimplemented();
 397   }
 398   if (verifyoop) {
 399     interp_verify_oop(r0, state);
 400   }
 401 
 402   Label safepoint;
 403   address* const safepoint_table = Interpreter::safept_table(state);
 404   bool needs_thread_local_poll = generate_poll && table != safepoint_table;
 405 
 406   if (needs_thread_local_poll) {
 407     NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
 408     ldr(rscratch2, Address(rthread, JavaThread::polling_word_offset()));
 409     tbnz(rscratch2, exact_log2(SafepointMechanism::poll_bit()), safepoint);
 410   }
 411 
 412   if (table == Interpreter::dispatch_table(state)) {
 413     addw(rscratch2, rscratch1, Interpreter::distance_from_dispatch_table(state));
 414     ldr(rscratch2, Address(rdispatch, rscratch2, Address::uxtw(3)));
 415   } else {
 416     mov(rscratch2, (address)table);
 417     ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));
 418   }
 419   br(rscratch2);
 420 
 421   if (needs_thread_local_poll) {
 422     bind(safepoint);
 423     lea(rscratch2, ExternalAddress((address)safepoint_table));
 424     ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));
 425     br(rscratch2);
 426   }
 427 }
 428 
 429 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
 430   dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
 431 }
 432 
 433 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
 434   dispatch_base(state, Interpreter::normal_table(state));
 435 }
 436 
 437 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
 438   dispatch_base(state, Interpreter::normal_table(state), false);
 439 }
 440 
 441 
 442 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
 443   // load next bytecode
 444   ldrb(rscratch1, Address(pre(rbcp, step)));
 445   dispatch_base(state, Interpreter::dispatch_table(state), /*verifyoop*/true, generate_poll);
 446 }
 447 
 448 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
 449   // load current bytecode
 450   ldrb(rscratch1, Address(rbcp, 0));
 451   dispatch_base(state, table);
 452 }
 453 
 454 // remove activation
 455 //
 456 // Apply stack watermark barrier.
 457 // Unlock the receiver if this is a synchronized method.
 458 // Unlock any Java monitors from synchronized blocks.
 459 // Remove the activation from the stack.
 460 //
 461 // If there are locked Java monitors
 462 //    If throw_monitor_exception
 463 //       throws IllegalMonitorStateException
 464 //    Else if install_monitor_exception
 465 //       installs IllegalMonitorStateException
 466 //    Else
 467 //       no error processing
 468 void InterpreterMacroAssembler::remove_activation(
 469         TosState state,
 470         bool throw_monitor_exception,
 471         bool install_monitor_exception,
 472         bool notify_jvmdi) {
 473   // Note: Registers r3 xmm0 may be in use for the
 474   // result check if synchronized method
 475   Label unlocked, unlock, no_unlock;
 476 
 477   // The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
 478   // that would normally not be safe to use. Such bad returns into unsafe territory of
 479   // the stack, will call InterpreterRuntime::at_unwind.
 480   Label slow_path;
 481   Label fast_path;
 482   safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
 483   br(Assembler::AL, fast_path);
 484   bind(slow_path);
 485   push(state);
 486   set_last_Java_frame(esp, rfp, (address)pc(), rscratch1);
 487   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
 488   reset_last_Java_frame(true);
 489   pop(state);
 490   bind(fast_path);
 491 
 492   // get the value of _do_not_unlock_if_synchronized into r3
 493   const Address do_not_unlock_if_synchronized(rthread,
 494     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 495   ldrb(r3, do_not_unlock_if_synchronized);
 496   strb(zr, do_not_unlock_if_synchronized); // reset the flag
 497 
 498  // get method access flags
 499   ldr(r1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 500   ldr(r2, Address(r1, Method::access_flags_offset()));
 501   tbz(r2, exact_log2(JVM_ACC_SYNCHRONIZED), unlocked);
 502 
 503   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 504   // is set.
 505   cbnz(r3, no_unlock);
 506 
 507   // unlock monitor
 508   push(state); // save result
 509 
 510   // BasicObjectLock will be first in list, since this is a
 511   // synchronized method. However, need to check that the object has
 512   // not been unlocked by an explicit monitorexit bytecode.
 513   const Address monitor(rfp, frame::interpreter_frame_initial_sp_offset *
 514                         wordSize - (int) sizeof(BasicObjectLock));
 515   // We use c_rarg1 so that if we go slow path it will be the correct
 516   // register for unlock_object to pass to VM directly
 517   lea(c_rarg1, monitor); // address of first monitor
 518 
 519   ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset()));
 520   cbnz(r0, unlock);
 521 
 522   pop(state);
 523   if (throw_monitor_exception) {
 524     // Entry already unlocked, need to throw exception
 525     call_VM(noreg, CAST_FROM_FN_PTR(address,
 526                    InterpreterRuntime::throw_illegal_monitor_state_exception));
 527     should_not_reach_here();
 528   } else {
 529     // Monitor already unlocked during a stack unroll. If requested,
 530     // install an illegal_monitor_state_exception.  Continue with
 531     // stack unrolling.
 532     if (install_monitor_exception) {
 533       call_VM(noreg, CAST_FROM_FN_PTR(address,
 534                      InterpreterRuntime::new_illegal_monitor_state_exception));
 535     }
 536     b(unlocked);
 537   }
 538 
 539   bind(unlock);
 540   unlock_object(c_rarg1);
 541   pop(state);
 542 
 543   // Check that for block-structured locking (i.e., that all locked
 544   // objects has been unlocked)
 545   bind(unlocked);
 546 
 547   // r0: Might contain return value
 548 
 549   // Check that all monitors are unlocked
 550   {
 551     Label loop, exception, entry, restart;
 552     const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 553     const Address monitor_block_top(
 554         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 555     const Address monitor_block_bot(
 556         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
 557 
 558     bind(restart);
 559     // We use c_rarg1 so that if we go slow path it will be the correct
 560     // register for unlock_object to pass to VM directly
 561     ldr(c_rarg1, monitor_block_top); // derelativize pointer
 562     lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize)));
 563     // c_rarg1 points to current entry, starting with top-most entry
 564 
 565     lea(r19, monitor_block_bot);  // points to word before bottom of
 566                                   // monitor block
 567     b(entry);
 568 
 569     // Entry already locked, need to throw exception
 570     bind(exception);
 571 
 572     if (throw_monitor_exception) {
 573       // Throw exception
 574       MacroAssembler::call_VM(noreg,
 575                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
 576                                    throw_illegal_monitor_state_exception));
 577       should_not_reach_here();
 578     } else {
 579       // Stack unrolling. Unlock object and install illegal_monitor_exception.
 580       // Unlock does not block, so don't have to worry about the frame.
 581       // We don't have to preserve c_rarg1 since we are going to throw an exception.
 582 
 583       push(state);
 584       unlock_object(c_rarg1);
 585       pop(state);
 586 
 587       if (install_monitor_exception) {
 588         call_VM(noreg, CAST_FROM_FN_PTR(address,
 589                                         InterpreterRuntime::
 590                                         new_illegal_monitor_state_exception));
 591       }
 592 
 593       b(restart);
 594     }
 595 
 596     bind(loop);
 597     // check if current entry is used
 598     ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset()));
 599     cbnz(rscratch1, exception);
 600 
 601     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 602     bind(entry);
 603     cmp(c_rarg1, r19); // check if bottom reached
 604     br(Assembler::NE, loop); // if not at bottom then check this entry
 605   }
 606 
 607   bind(no_unlock);
 608 
 609   // jvmti support
 610   if (notify_jvmdi) {
 611     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 612   } else {
 613     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 614   }
 615 
 616   // remove activation
 617   // get sender esp
 618   ldr(rscratch2,
 619       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 620   if (StackReservedPages > 0) {
 621     // testing if reserved zone needs to be re-enabled
 622     Label no_reserved_zone_enabling;
 623 
 624     // check if already enabled - if so no re-enabling needed
 625     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 626     ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
 627     cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
 628     br(Assembler::EQ, no_reserved_zone_enabling);
 629 
 630     // look for an overflow into the stack reserved zone, i.e.
 631     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 632     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 633     cmp(rscratch2, rscratch1);
 634     br(Assembler::LS, no_reserved_zone_enabling);
 635 
 636     call_VM_leaf(
 637       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 638     call_VM(noreg, CAST_FROM_FN_PTR(address,
 639                    InterpreterRuntime::throw_delayed_StackOverflowError));
 640     should_not_reach_here();
 641 
 642     bind(no_reserved_zone_enabling);
 643   }
 644 
 645   // restore sender esp
 646   mov(esp, rscratch2);
 647   // remove frame anchor
 648   leave();
 649   // If we're returning to interpreted code we will shortly be
 650   // adjusting SP to allow some space for ESP.  If we're returning to
 651   // compiled code the saved sender SP was saved in sender_sp, so this
 652   // restores it.
 653   andr(sp, esp, -16);
 654 }
 655 
 656 // Lock object
 657 //
 658 // Args:
 659 //      c_rarg1: BasicObjectLock to be used for locking
 660 //
 661 // Kills:
 662 //      r0
 663 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
 664 //      rscratch1, rscratch2 (scratch regs)
 665 void InterpreterMacroAssembler::lock_object(Register lock_reg)
 666 {
 667   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
 668   if (LockingMode == LM_MONITOR) {
 669     call_VM_preemptable(noreg,
 670             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 671             lock_reg);
 672   } else {
 673     Label count, done;
 674 
 675     const Register swap_reg = r0;
 676     const Register tmp = c_rarg2;
 677     const Register obj_reg = c_rarg3; // Will contain the oop
 678     const Register tmp2 = c_rarg4;
 679     const Register tmp3 = c_rarg5;
 680 
 681     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 682     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 683     const int mark_offset = lock_offset +
 684                             BasicLock::displaced_header_offset_in_bytes();
 685 
 686     Label slow_case;
 687 
 688     // Load object pointer into obj_reg %c_rarg3
 689     ldr(obj_reg, Address(lock_reg, obj_offset));
 690 
 691     if (DiagnoseSyncOnValueBasedClasses != 0) {
 692       load_klass(tmp, obj_reg);
 693       ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
 694       tst(tmp, KlassFlags::_misc_is_value_based_class);
 695       br(Assembler::NE, slow_case);
 696     }
 697 
 698     if (LockingMode == LM_LIGHTWEIGHT) {
 699       lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
 700       b(done);
 701     } else if (LockingMode == LM_LEGACY) {
 702       // Load (object->mark() | 1) into swap_reg
 703       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 704       orr(swap_reg, rscratch1, 1);
 705 
 706       // Save (object->mark() | 1) into BasicLock's displaced header
 707       str(swap_reg, Address(lock_reg, mark_offset));
 708 
 709       assert(lock_offset == 0,
 710              "displached header must be first word in BasicObjectLock");
 711 
 712       Label fail;
 713       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 714 
 715       // Fast check for recursive lock.
 716       //
 717       // Can apply the optimization only if this is a stack lock
 718       // allocated in this thread. For efficiency, we can focus on
 719       // recently allocated stack locks (instead of reading the stack
 720       // base and checking whether 'mark' points inside the current
 721       // thread stack):
 722       //  1) (mark & 7) == 0, and
 723       //  2) sp <= mark < mark + os::pagesize()
 724       //
 725       // Warning: sp + os::pagesize can overflow the stack base. We must
 726       // neither apply the optimization for an inflated lock allocated
 727       // just above the thread stack (this is why condition 1 matters)
 728       // nor apply the optimization if the stack lock is inside the stack
 729       // of another thread. The latter is avoided even in case of overflow
 730       // because we have guard pages at the end of all stacks. Hence, if
 731       // we go over the stack base and hit the stack of another thread,
 732       // this should not be in a writeable area that could contain a
 733       // stack lock allocated by that thread. As a consequence, a stack
 734       // lock less than page size away from sp is guaranteed to be
 735       // owned by the current thread.
 736       //
 737       // These 3 tests can be done by evaluating the following
 738       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 739       // assuming both stack pointer and pagesize have their
 740       // least significant 3 bits clear.
 741       // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 742       // NOTE2: aarch64 does not like to subtract sp from rn so take a
 743       // copy
 744       mov(rscratch1, sp);
 745       sub(swap_reg, swap_reg, rscratch1);
 746       ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
 747 
 748       // Save the test result, for recursive case, the result is zero
 749       str(swap_reg, Address(lock_reg, mark_offset));
 750       br(Assembler::NE, slow_case);
 751 
 752       bind(count);
 753       inc_held_monitor_count(rscratch1);
 754       b(done);
 755     }
 756     bind(slow_case);
 757 
 758     // Call the runtime routine for slow case
 759     call_VM_preemptable(noreg,
 760             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 761             lock_reg);
 762 
 763     bind(done);
 764   }
 765 }
 766 
 767 
 768 // Unlocks an object. Used in monitorexit bytecode and
 769 // remove_activation.  Throws an IllegalMonitorException if object is
 770 // not locked by current thread.
 771 //
 772 // Args:
 773 //      c_rarg1: BasicObjectLock for lock
 774 //
 775 // Kills:
 776 //      r0
 777 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 778 //      rscratch1, rscratch2 (scratch regs)
 779 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 780 {
 781   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 782 
 783   if (LockingMode == LM_MONITOR) {
 784     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 785   } else {
 786     Label count, done;
 787 
 788     const Register swap_reg   = r0;
 789     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 790     const Register obj_reg    = c_rarg3;  // Will contain the oop
 791     const Register tmp_reg    = c_rarg4;  // Temporary used by lightweight_unlock
 792 
 793     save_bcp(); // Save in case of exception
 794 
 795     if (LockingMode != LM_LIGHTWEIGHT) {
 796       // Convert from BasicObjectLock structure to object and BasicLock
 797       // structure Store the BasicLock address into %r0
 798       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
 799     }
 800 
 801     // Load oop into obj_reg(%c_rarg3)
 802     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
 803 
 804     // Free entry
 805     str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
 806 
 807     Label slow_case;
 808     if (LockingMode == LM_LIGHTWEIGHT) {
 809       lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
 810       b(done);
 811     } else if (LockingMode == LM_LEGACY) {
 812       // Load the old header from BasicLock structure
 813       ldr(header_reg, Address(swap_reg,
 814                               BasicLock::displaced_header_offset_in_bytes()));
 815 
 816       // Test for recursion
 817       cbz(header_reg, count);
 818 
 819       // Atomic swap back the old header
 820       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, &slow_case);
 821 
 822       bind(count);
 823       dec_held_monitor_count(rscratch1);
 824       b(done);
 825     }
 826 
 827     bind(slow_case);
 828     // Call the runtime routine for slow case.
 829     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
 830     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 831     bind(done);
 832     restore_bcp();
 833   }
 834 }
 835 
 836 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 837                                                          Label& zero_continue) {
 838   assert(ProfileInterpreter, "must be profiling interpreter");
 839   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 840   cbz(mdp, zero_continue);
 841 }
 842 
 843 // Set the method data pointer for the current bcp.
 844 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 845   assert(ProfileInterpreter, "must be profiling interpreter");
 846   Label set_mdp;
 847   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
 848 
 849   // Test MDO to avoid the call if it is null.
 850   ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));
 851   cbz(r0, set_mdp);
 852   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rmethod, rbcp);
 853   // r0: mdi
 854   // mdo is guaranteed to be non-zero here, we checked for it before the call.
 855   ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
 856   lea(r1, Address(r1, in_bytes(MethodData::data_offset())));
 857   add(r0, r1, r0);
 858   str(r0, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 859   bind(set_mdp);
 860   ldp(r0, r1, Address(post(sp, 2 * wordSize)));
 861 }
 862 
 863 void InterpreterMacroAssembler::verify_method_data_pointer() {
 864   assert(ProfileInterpreter, "must be profiling interpreter");
 865 #ifdef ASSERT
 866   Label verify_continue;
 867   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
 868   stp(r2, r3, Address(pre(sp, -2 * wordSize)));
 869   test_method_data_pointer(r3, verify_continue); // If mdp is zero, continue
 870   get_method(r1);
 871 
 872   // If the mdp is valid, it will point to a DataLayout header which is
 873   // consistent with the bcp.  The converse is highly probable also.
 874   ldrsh(r2, Address(r3, in_bytes(DataLayout::bci_offset())));
 875   ldr(rscratch1, Address(r1, Method::const_offset()));
 876   add(r2, r2, rscratch1, Assembler::LSL);
 877   lea(r2, Address(r2, ConstMethod::codes_offset()));
 878   cmp(r2, rbcp);
 879   br(Assembler::EQ, verify_continue);
 880   // r1: method
 881   // rbcp: bcp // rbcp == 22
 882   // r3: mdp
 883   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
 884                r1, rbcp, r3);
 885   bind(verify_continue);
 886   ldp(r2, r3, Address(post(sp, 2 * wordSize)));
 887   ldp(r0, r1, Address(post(sp, 2 * wordSize)));
 888 #endif // ASSERT
 889 }
 890 
 891 
 892 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
 893                                                 int constant,
 894                                                 Register value) {
 895   assert(ProfileInterpreter, "must be profiling interpreter");
 896   Address data(mdp_in, constant);
 897   str(value, data);
 898 }
 899 
 900 
 901 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
 902                                                       int constant,
 903                                                       bool decrement) {
 904   increment_mdp_data_at(mdp_in, noreg, constant, decrement);
 905 }
 906 
 907 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
 908                                                       Register reg,
 909                                                       int constant,
 910                                                       bool decrement) {
 911   assert(ProfileInterpreter, "must be profiling interpreter");
 912   // %%% this does 64bit counters at best it is wasting space
 913   // at worst it is a rare bug when counters overflow
 914 
 915   assert_different_registers(rscratch2, rscratch1, mdp_in, reg);
 916 
 917   Address addr1(mdp_in, constant);
 918   Address addr2(rscratch2, reg, Address::lsl(0));
 919   Address &addr = addr1;
 920   if (reg != noreg) {
 921     lea(rscratch2, addr1);
 922     addr = addr2;
 923   }
 924 
 925   if (decrement) {
 926     // Decrement the register.  Set condition codes.
 927     // Intel does this
 928     // addptr(data, (int32_t) -DataLayout::counter_increment);
 929     // If the decrement causes the counter to overflow, stay negative
 930     // Label L;
 931     // jcc(Assembler::negative, L);
 932     // addptr(data, (int32_t) DataLayout::counter_increment);
 933     // so we do this
 934     ldr(rscratch1, addr);
 935     subs(rscratch1, rscratch1, (unsigned)DataLayout::counter_increment);
 936     Label L;
 937     br(Assembler::LO, L);       // skip store if counter underflow
 938     str(rscratch1, addr);
 939     bind(L);
 940   } else {
 941     assert(DataLayout::counter_increment == 1,
 942            "flow-free idiom only works with 1");
 943     // Intel does this
 944     // Increment the register.  Set carry flag.
 945     // addptr(data, DataLayout::counter_increment);
 946     // If the increment causes the counter to overflow, pull back by 1.
 947     // sbbptr(data, (int32_t)0);
 948     // so we do this
 949     ldr(rscratch1, addr);
 950     adds(rscratch1, rscratch1, DataLayout::counter_increment);
 951     Label L;
 952     br(Assembler::CS, L);       // skip store if counter overflow
 953     str(rscratch1, addr);
 954     bind(L);
 955   }
 956 }
 957 
 958 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
 959                                                 int flag_byte_constant) {
 960   assert(ProfileInterpreter, "must be profiling interpreter");
 961   int flags_offset = in_bytes(DataLayout::flags_offset());
 962   // Set the flag
 963   ldrb(rscratch1, Address(mdp_in, flags_offset));
 964   orr(rscratch1, rscratch1, flag_byte_constant);
 965   strb(rscratch1, Address(mdp_in, flags_offset));
 966 }
 967 
 968 
 969 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
 970                                                  int offset,
 971                                                  Register value,
 972                                                  Register test_value_out,
 973                                                  Label& not_equal_continue) {
 974   assert(ProfileInterpreter, "must be profiling interpreter");
 975   if (test_value_out == noreg) {
 976     ldr(rscratch1, Address(mdp_in, offset));
 977     cmp(value, rscratch1);
 978   } else {
 979     // Put the test value into a register, so caller can use it:
 980     ldr(test_value_out, Address(mdp_in, offset));
 981     cmp(value, test_value_out);
 982   }
 983   br(Assembler::NE, not_equal_continue);
 984 }
 985 
 986 
 987 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
 988                                                      int offset_of_disp) {
 989   assert(ProfileInterpreter, "must be profiling interpreter");
 990   ldr(rscratch1, Address(mdp_in, offset_of_disp));
 991   add(mdp_in, mdp_in, rscratch1, LSL);
 992   str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 993 }
 994 
 995 
 996 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
 997                                                      Register reg,
 998                                                      int offset_of_disp) {
 999   assert(ProfileInterpreter, "must be profiling interpreter");
1000   lea(rscratch1, Address(mdp_in, offset_of_disp));
1001   ldr(rscratch1, Address(rscratch1, reg, Address::lsl(0)));
1002   add(mdp_in, mdp_in, rscratch1, LSL);
1003   str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1004 }
1005 
1006 
1007 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
1008                                                        int constant) {
1009   assert(ProfileInterpreter, "must be profiling interpreter");
1010   add(mdp_in, mdp_in, (unsigned)constant);
1011   str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1012 }
1013 
1014 
1015 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1016   assert(ProfileInterpreter, "must be profiling interpreter");
1017   // save/restore across call_VM
1018   stp(zr, return_bci, Address(pre(sp, -2 * wordSize)));
1019   call_VM(noreg,
1020           CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
1021           return_bci);
1022   ldp(zr, return_bci, Address(post(sp, 2 * wordSize)));
1023 }
1024 
1025 
1026 void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
1027                                                      Register bumped_count) {
1028   if (ProfileInterpreter) {
1029     Label profile_continue;
1030 
1031     // If no method data exists, go to profile_continue.
1032     // Otherwise, assign to mdp
1033     test_method_data_pointer(mdp, profile_continue);
1034 
1035     // We are taking a branch.  Increment the taken count.
1036     // We inline increment_mdp_data_at to return bumped_count in a register
1037     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1038     Address data(mdp, in_bytes(JumpData::taken_offset()));
1039     ldr(bumped_count, data);
1040     assert(DataLayout::counter_increment == 1,
1041             "flow-free idiom only works with 1");
1042     // Intel does this to catch overflow
1043     // addptr(bumped_count, DataLayout::counter_increment);
1044     // sbbptr(bumped_count, 0);
1045     // so we do this
1046     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1047     Label L;
1048     br(Assembler::CS, L);       // skip store if counter overflow
1049     str(bumped_count, data);
1050     bind(L);
1051     // The method data pointer needs to be updated to reflect the new target.
1052     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1053     bind(profile_continue);
1054   }
1055 }
1056 
1057 
1058 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1059   if (ProfileInterpreter) {
1060     Label profile_continue;
1061 
1062     // If no method data exists, go to profile_continue.
1063     test_method_data_pointer(mdp, profile_continue);
1064 
1065     // We are taking a branch.  Increment the not taken count.
1066     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1067 
1068     // The method data pointer needs to be updated to correspond to
1069     // the next bytecode
1070     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1071     bind(profile_continue);
1072   }
1073 }
1074 
1075 
1076 void InterpreterMacroAssembler::profile_call(Register mdp) {
1077   if (ProfileInterpreter) {
1078     Label profile_continue;
1079 
1080     // If no method data exists, go to profile_continue.
1081     test_method_data_pointer(mdp, profile_continue);
1082 
1083     // We are making a call.  Increment the count.
1084     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1085 
1086     // The method data pointer needs to be updated to reflect the new target.
1087     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1088     bind(profile_continue);
1089   }
1090 }
1091 
1092 void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1093   if (ProfileInterpreter) {
1094     Label profile_continue;
1095 
1096     // If no method data exists, go to profile_continue.
1097     test_method_data_pointer(mdp, profile_continue);
1098 
1099     // We are making a call.  Increment the count.
1100     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1101 
1102     // The method data pointer needs to be updated to reflect the new target.
1103     update_mdp_by_constant(mdp,
1104                            in_bytes(VirtualCallData::
1105                                     virtual_call_data_size()));
1106     bind(profile_continue);
1107   }
1108 }
1109 
1110 
1111 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1112                                                      Register mdp,
1113                                                      Register reg2,
1114                                                      bool receiver_can_be_null) {
1115   if (ProfileInterpreter) {
1116     Label profile_continue;
1117 
1118     // If no method data exists, go to profile_continue.
1119     test_method_data_pointer(mdp, profile_continue);
1120 
1121     Label skip_receiver_profile;
1122     if (receiver_can_be_null) {
1123       Label not_null;
1124       // We are making a call.  Increment the count for null receiver.
1125       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1126       b(skip_receiver_profile);
1127       bind(not_null);
1128     }
1129 
1130     // Record the receiver type.
1131     record_klass_in_profile(receiver, mdp, reg2);
1132     bind(skip_receiver_profile);
1133 
1134     // The method data pointer needs to be updated to reflect the new target.
1135     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1136     bind(profile_continue);
1137   }
1138 }
1139 
1140 // This routine creates a state machine for updating the multi-row
1141 // type profile at a virtual call site (or other type-sensitive bytecode).
1142 // The machine visits each row (of receiver/count) until the receiver type
1143 // is found, or until it runs out of rows.  At the same time, it remembers
1144 // the location of the first empty row.  (An empty row records null for its
1145 // receiver, and can be allocated for a newly-observed receiver type.)
1146 // Because there are two degrees of freedom in the state, a simple linear
1147 // search will not work; it must be a decision tree.  Hence this helper
1148 // function is recursive, to generate the required tree structured code.
1149 // It's the interpreter, so we are trading off code space for speed.
1150 // See below for example code.
1151 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1152                                         Register receiver, Register mdp,
1153                                         Register reg2, int start_row,
1154                                         Label& done) {
1155   if (TypeProfileWidth == 0) {
1156     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1157   } else {
1158     record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
1159         &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
1160   }
1161 }
1162 
1163 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
1164                                         Register reg2, int start_row, Label& done, int total_rows,
1165                                         OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn) {
1166   int last_row = total_rows - 1;
1167   assert(start_row <= last_row, "must be work left to do");
1168   // Test this row for both the item and for null.
1169   // Take any of three different outcomes:
1170   //   1. found item => increment count and goto done
1171   //   2. found null => keep looking for case 1, maybe allocate this cell
1172   //   3. found something else => keep looking for cases 1 and 2
1173   // Case 3 is handled by a recursive call.
1174   for (int row = start_row; row <= last_row; row++) {
1175     Label next_test;
1176     bool test_for_null_also = (row == start_row);
1177 
1178     // See if the item is item[n].
1179     int item_offset = in_bytes(item_offset_fn(row));
1180     test_mdp_data_at(mdp, item_offset, item,
1181                      (test_for_null_also ? reg2 : noreg),
1182                      next_test);
1183     // (Reg2 now contains the item from the CallData.)
1184 
1185     // The item is item[n].  Increment count[n].
1186     int count_offset = in_bytes(item_count_offset_fn(row));
1187     increment_mdp_data_at(mdp, count_offset);
1188     b(done);
1189     bind(next_test);
1190 
1191     if (test_for_null_also) {
1192       Label found_null;
1193       // Failed the equality check on item[n]...  Test for null.
1194       if (start_row == last_row) {
1195         // The only thing left to do is handle the null case.
1196         cbz(reg2, found_null);
1197         // Item did not match any saved item and there is no empty row for it.
1198         // Increment total counter to indicate polymorphic case.
1199         increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1200         b(done);
1201         bind(found_null);
1202         break;
1203       }
1204       // Since null is rare, make it be the branch-taken case.
1205       cbz(reg2, found_null);
1206 
1207       // Put all the "Case 3" tests here.
1208       record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
1209         item_offset_fn, item_count_offset_fn);
1210 
1211       // Found a null.  Keep searching for a matching item,
1212       // but remember that this is an empty (unused) slot.
1213       bind(found_null);
1214     }
1215   }
1216 
1217   // In the fall-through case, we found no matching item, but we
1218   // observed the item[start_row] is null.
1219 
1220   // Fill in the item field and increment the count.
1221   int item_offset = in_bytes(item_offset_fn(start_row));
1222   set_mdp_data_at(mdp, item_offset, item);
1223   int count_offset = in_bytes(item_count_offset_fn(start_row));
1224   mov(reg2, DataLayout::counter_increment);
1225   set_mdp_data_at(mdp, count_offset, reg2);
1226   if (start_row > 0) {
1227     b(done);
1228   }
1229 }
1230 
1231 // Example state machine code for three profile rows:
1232 //   // main copy of decision tree, rooted at row[1]
1233 //   if (row[0].rec == rec) { row[0].incr(); goto done; }
1234 //   if (row[0].rec != nullptr) {
1235 //     // inner copy of decision tree, rooted at row[1]
1236 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1237 //     if (row[1].rec != nullptr) {
1238 //       // degenerate decision tree, rooted at row[2]
1239 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1240 //       if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
1241 //       row[2].init(rec); goto done;
1242 //     } else {
1243 //       // remember row[1] is empty
1244 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1245 //       row[1].init(rec); goto done;
1246 //     }
1247 //   } else {
1248 //     // remember row[0] is empty
1249 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1250 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
1251 //     row[0].init(rec); goto done;
1252 //   }
1253 //   done:
1254 
1255 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1256                                                         Register mdp, Register reg2) {
1257   assert(ProfileInterpreter, "must be profiling");
1258   Label done;
1259 
1260   record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
1261 
1262   bind (done);
1263 }
1264 
1265 void InterpreterMacroAssembler::profile_ret(Register return_bci,
1266                                             Register mdp) {
1267   if (ProfileInterpreter) {
1268     Label profile_continue;
1269     uint row;
1270 
1271     // If no method data exists, go to profile_continue.
1272     test_method_data_pointer(mdp, profile_continue);
1273 
1274     // Update the total ret count.
1275     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1276 
1277     for (row = 0; row < RetData::row_limit(); row++) {
1278       Label next_test;
1279 
1280       // See if return_bci is equal to bci[n]:
1281       test_mdp_data_at(mdp,
1282                        in_bytes(RetData::bci_offset(row)),
1283                        return_bci, noreg,
1284                        next_test);
1285 
1286       // return_bci is equal to bci[n].  Increment the count.
1287       increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));
1288 
1289       // The method data pointer needs to be updated to reflect the new target.
1290       update_mdp_by_offset(mdp,
1291                            in_bytes(RetData::bci_displacement_offset(row)));
1292       b(profile_continue);
1293       bind(next_test);
1294     }
1295 
1296     update_mdp_for_ret(return_bci);
1297 
1298     bind(profile_continue);
1299   }
1300 }
1301 
1302 void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1303   if (ProfileInterpreter) {
1304     Label profile_continue;
1305 
1306     // If no method data exists, go to profile_continue.
1307     test_method_data_pointer(mdp, profile_continue);
1308 
1309     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1310 
1311     // The method data pointer needs to be updated.
1312     int mdp_delta = in_bytes(BitData::bit_data_size());
1313     if (TypeProfileCasts) {
1314       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1315     }
1316     update_mdp_by_constant(mdp, mdp_delta);
1317 
1318     bind(profile_continue);
1319   }
1320 }
1321 
1322 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1323   if (ProfileInterpreter) {
1324     Label profile_continue;
1325 
1326     // If no method data exists, go to profile_continue.
1327     test_method_data_pointer(mdp, profile_continue);
1328 
1329     // The method data pointer needs to be updated.
1330     int mdp_delta = in_bytes(BitData::bit_data_size());
1331     if (TypeProfileCasts) {
1332       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1333 
1334       // Record the object type.
1335       record_klass_in_profile(klass, mdp, reg2);
1336     }
1337     update_mdp_by_constant(mdp, mdp_delta);
1338 
1339     bind(profile_continue);
1340   }
1341 }
1342 
1343 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1344   if (ProfileInterpreter) {
1345     Label profile_continue;
1346 
1347     // If no method data exists, go to profile_continue.
1348     test_method_data_pointer(mdp, profile_continue);
1349 
1350     // Update the default case count
1351     increment_mdp_data_at(mdp,
1352                           in_bytes(MultiBranchData::default_count_offset()));
1353 
1354     // The method data pointer needs to be updated.
1355     update_mdp_by_offset(mdp,
1356                          in_bytes(MultiBranchData::
1357                                   default_displacement_offset()));
1358 
1359     bind(profile_continue);
1360   }
1361 }
1362 
1363 void InterpreterMacroAssembler::profile_switch_case(Register index,
1364                                                     Register mdp,
1365                                                     Register reg2) {
1366   if (ProfileInterpreter) {
1367     Label profile_continue;
1368 
1369     // If no method data exists, go to profile_continue.
1370     test_method_data_pointer(mdp, profile_continue);
1371 
1372     // Build the base (index * per_case_size_in_bytes()) +
1373     // case_array_offset_in_bytes()
1374     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1375     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1376     Assembler::maddw(index, index, reg2, rscratch1);
1377 
1378     // Update the case count
1379     increment_mdp_data_at(mdp,
1380                           index,
1381                           in_bytes(MultiBranchData::relative_count_offset()));
1382 
1383     // The method data pointer needs to be updated.
1384     update_mdp_by_offset(mdp,
1385                          index,
1386                          in_bytes(MultiBranchData::
1387                                   relative_displacement_offset()));
1388 
1389     bind(profile_continue);
1390   }
1391 }
1392 
1393 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1394   if (state == atos) {
1395     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1396   }
1397 }
1398 
1399 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1400 
1401 
1402 void InterpreterMacroAssembler::notify_method_entry() {
1403   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1404   // track stack depth.  If it is possible to enter interp_only_mode we add
1405   // the code to check if the event should be sent.
1406   if (JvmtiExport::can_post_interpreter_events()) {
1407     Label L;
1408     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1409     cbzw(r3, L);
1410     call_VM(noreg, CAST_FROM_FN_PTR(address,
1411                                     InterpreterRuntime::post_method_entry));
1412     bind(L);
1413   }
1414 
1415   if (DTraceMethodProbes) {
1416     get_method(c_rarg1);
1417     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1418                  rthread, c_rarg1);
1419   }
1420 
1421   // RedefineClasses() tracing support for obsolete method entry
1422   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1423     get_method(c_rarg1);
1424     call_VM_leaf(
1425       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1426       rthread, c_rarg1);
1427   }
1428 
1429  }
1430 
1431 
1432 void InterpreterMacroAssembler::notify_method_exit(
1433     TosState state, NotifyMethodExitMode mode) {
1434   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1435   // track stack depth.  If it is possible to enter interp_only_mode we add
1436   // the code to check if the event should be sent.
1437   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1438     Label L;
1439     // Note: frame::interpreter_frame_result has a dependency on how the
1440     // method result is saved across the call to post_method_exit. If this
1441     // is changed then the interpreter_frame_result implementation will
1442     // need to be updated too.
1443 
1444     // template interpreter will leave the result on the top of the stack.
1445     push(state);
1446     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1447     cbz(r3, L);
1448     call_VM(noreg,
1449             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1450     bind(L);
1451     pop(state);
1452   }
1453 
1454   if (DTraceMethodProbes) {
1455     push(state);
1456     get_method(c_rarg1);
1457     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1458                  rthread, c_rarg1);
1459     pop(state);
1460   }
1461 }
1462 
1463 
1464 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
1465 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
1466                                                         int increment, Address mask,
1467                                                         Register scratch, Register scratch2,
1468                                                         bool preloaded, Condition cond,
1469                                                         Label* where) {
1470   if (!preloaded) {
1471     ldrw(scratch, counter_addr);
1472   }
1473   add(scratch, scratch, increment);
1474   strw(scratch, counter_addr);
1475   ldrw(scratch2, mask);
1476   ands(scratch, scratch, scratch2);
1477   br(cond, *where);
1478 }
1479 
1480 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
1481                                                   int number_of_arguments) {
1482   // interpreter specific
1483   //
1484   // Note: No need to save/restore rbcp & rlocals pointer since these
1485   //       are callee saved registers and no blocking/ GC can happen
1486   //       in leaf calls.
1487 #ifdef ASSERT
1488   {
1489     Label L;
1490     ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1491     cbz(rscratch1, L);
1492     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
1493          " last_sp != nullptr");
1494     bind(L);
1495   }
1496 #endif /* ASSERT */
1497   // super call
1498   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
1499 }
1500 
1501 void InterpreterMacroAssembler::call_VM_base(Register oop_result,
1502                                              Register java_thread,
1503                                              Register last_java_sp,
1504                                              address  entry_point,
1505                                              int      number_of_arguments,
1506                                              bool     check_exceptions) {
1507   // interpreter specific
1508   //
1509   // Note: Could avoid restoring locals ptr (callee saved) - however doesn't
1510   //       really make a difference for these runtime calls, since they are
1511   //       slow anyway. Btw., bcp must be saved/restored since it may change
1512   //       due to GC.
1513   // assert(java_thread == noreg , "not expecting a precomputed java thread");
1514   save_bcp();
1515 #ifdef ASSERT
1516   {
1517     Label L;
1518     ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1519     cbz(rscratch1, L);
1520     stop("InterpreterMacroAssembler::call_VM_base:"
1521          " last_sp != nullptr");
1522     bind(L);
1523   }
1524 #endif /* ASSERT */
1525   // super call
1526   MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
1527                                entry_point, number_of_arguments,
1528                      check_exceptions);
1529 // interpreter specific
1530   restore_bcp();
1531   restore_locals();
1532 }
1533 
1534 void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
1535                                                     address entry_point,
1536                                                     Register arg_1) {
1537   assert(arg_1 == c_rarg1, "");
1538   Label resume_pc, not_preempted;
1539 
1540 #ifdef ASSERT
1541   {
1542     Label L;
1543     ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1544     cbz(rscratch1, L);
1545     stop("Should not have alternate return address set");
1546     bind(L);
1547   }
1548 #endif /* ASSERT */
1549 
1550   // Force freeze slow path.
1551   push_cont_fastpath();
1552 
1553   // Make VM call. In case of preemption set last_pc to the one we want to resume to.
1554   adr(rscratch1, resume_pc);
1555   str(rscratch1, Address(rthread, JavaThread::last_Java_pc_offset()));
1556   call_VM_base(oop_result, noreg, noreg, entry_point, 1, false /*check_exceptions*/);
1557 
1558   pop_cont_fastpath();
1559 
1560   // Check if preempted.
1561   ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1562   cbz(rscratch1, not_preempted);
1563   str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1564   br(rscratch1);
1565 
1566   // In case of preemption, this is where we will resume once we finally acquire the monitor.
1567   bind(resume_pc);
1568   restore_after_resume(false /* is_native */);
1569 
1570   bind(not_preempted);
1571 }
1572 
1573 void InterpreterMacroAssembler::restore_after_resume(bool is_native) {
1574   lea(rscratch1, ExternalAddress(Interpreter::cont_resume_interpreter_adapter()));
1575   blr(rscratch1);
1576   if (is_native) {
1577     // On resume we need to set up stack as expected
1578     push(dtos);
1579     push(ltos);
1580   }
1581 }
1582 
1583 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
1584   assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
1585   Label update, next, none;
1586 
1587   verify_oop(obj);
1588 
1589   cbnz(obj, update);
1590   orptr(mdo_addr, TypeEntries::null_seen);
1591   b(next);
1592 
1593   bind(update);
1594   load_klass(obj, obj);
1595 
1596   ldr(rscratch1, mdo_addr);
1597   eor(obj, obj, rscratch1);
1598   tst(obj, TypeEntries::type_klass_mask);
1599   br(Assembler::EQ, next); // klass seen before, nothing to
1600                            // do. The unknown bit may have been
1601                            // set already but no need to check.
1602 
1603   tbnz(obj, exact_log2(TypeEntries::type_unknown), next);
1604   // already unknown. Nothing to do anymore.
1605 
1606   cbz(rscratch1, none);
1607   cmp(rscratch1, (u1)TypeEntries::null_seen);
1608   br(Assembler::EQ, none);
1609   // There is a chance that the checks above
1610   // fail if another thread has just set the
1611   // profiling to this obj's klass
1612   eor(obj, obj, rscratch1); // get back original value before XOR
1613   ldr(rscratch1, mdo_addr);
1614   eor(obj, obj, rscratch1);
1615   tst(obj, TypeEntries::type_klass_mask);
1616   br(Assembler::EQ, next);
1617 
1618   // different than before. Cannot keep accurate profile.
1619   orptr(mdo_addr, TypeEntries::type_unknown);
1620   b(next);
1621 
1622   bind(none);
1623   // first time here. Set profile type.
1624   str(obj, mdo_addr);
1625 #ifdef ASSERT
1626   andr(obj, obj, TypeEntries::type_mask);
1627   verify_klass_ptr(obj);
1628 #endif
1629 
1630   bind(next);
1631 }
1632 
1633 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
1634   if (!ProfileInterpreter) {
1635     return;
1636   }
1637 
1638   if (MethodData::profile_arguments() || MethodData::profile_return()) {
1639     Label profile_continue;
1640 
1641     test_method_data_pointer(mdp, profile_continue);
1642 
1643     int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
1644 
1645     ldrb(rscratch1, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start));
1646     cmp(rscratch1, u1(is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag));
1647     br(Assembler::NE, profile_continue);
1648 
1649     if (MethodData::profile_arguments()) {
1650       Label done;
1651       int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
1652 
1653       for (int i = 0; i < TypeProfileArgsLimit; i++) {
1654         if (i > 0 || MethodData::profile_return()) {
1655           // If return value type is profiled we may have no argument to profile
1656           ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1657           sub(tmp, tmp, i*TypeStackSlotEntries::per_arg_count());
1658           cmp(tmp, (u1)TypeStackSlotEntries::per_arg_count());
1659           add(rscratch1, mdp, off_to_args);
1660           br(Assembler::LT, done);
1661         }
1662         ldr(tmp, Address(callee, Method::const_offset()));
1663         load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
1664         // stack offset o (zero based) from the start of the argument
1665         // list, for n arguments translates into offset n - o - 1 from
1666         // the end of the argument list
1667         ldr(rscratch1, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))));
1668         sub(tmp, tmp, rscratch1);
1669         sub(tmp, tmp, 1);
1670         Address arg_addr = argument_address(tmp);
1671         ldr(tmp, arg_addr);
1672 
1673         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i)));
1674         profile_obj_type(tmp, mdo_arg_addr);
1675 
1676         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1677         off_to_args += to_add;
1678       }
1679 
1680       if (MethodData::profile_return()) {
1681         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1682         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1683       }
1684 
1685       add(rscratch1, mdp, off_to_args);
1686       bind(done);
1687       mov(mdp, rscratch1);
1688 
1689       if (MethodData::profile_return()) {
1690         // We're right after the type profile for the last
1691         // argument. tmp is the number of cells left in the
1692         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1693         // if there's a return to profile.
1694         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1695         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1696       }
1697       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1698     } else {
1699       assert(MethodData::profile_return(), "either profile call args or call ret");
1700       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1701     }
1702 
1703     // mdp points right after the end of the
1704     // CallTypeData/VirtualCallTypeData, right after the cells for the
1705     // return value type if there's one
1706 
1707     bind(profile_continue);
1708   }
1709 }
1710 
1711 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1712   assert_different_registers(mdp, ret, tmp, rbcp);
1713   if (ProfileInterpreter && MethodData::profile_return()) {
1714     Label profile_continue, done;
1715 
1716     test_method_data_pointer(mdp, profile_continue);
1717 
1718     if (MethodData::profile_return_jsr292_only()) {
1719       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
1720 
1721       // If we don't profile all invoke bytecodes we must make sure
1722       // it's a bytecode we indeed profile. We can't go back to the
1723       // beginning of the ProfileData we intend to update to check its
1724       // type because we're right after it and we don't known its
1725       // length
1726       Label do_profile;
1727       ldrb(rscratch1, Address(rbcp, 0));
1728       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1729       br(Assembler::EQ, do_profile);
1730       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1731       br(Assembler::EQ, do_profile);
1732       get_method(tmp);
1733       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1734       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1735       br(Assembler::NE, profile_continue);
1736 
1737       bind(do_profile);
1738     }
1739 
1740     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1741     mov(tmp, ret);
1742     profile_obj_type(tmp, mdo_ret_addr);
1743 
1744     bind(profile_continue);
1745   }
1746 }
1747 
1748 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1749   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1750   if (ProfileInterpreter && MethodData::profile_parameters()) {
1751     Label profile_continue, done;
1752 
1753     test_method_data_pointer(mdp, profile_continue);
1754 
1755     // Load the offset of the area within the MDO used for
1756     // parameters. If it's negative we're not profiling any parameters
1757     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1758     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1759 
1760     // Compute a pointer to the area for parameters from the offset
1761     // and move the pointer to the slot for the last
1762     // parameters. Collect profiling from last parameter down.
1763     // mdo start + parameters offset + array length - 1
1764     add(mdp, mdp, tmp1);
1765     ldr(tmp1, Address(mdp, ArrayData::array_len_offset()));
1766     sub(tmp1, tmp1, TypeStackSlotEntries::per_arg_count());
1767 
1768     Label loop;
1769     bind(loop);
1770 
1771     int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
1772     int type_base = in_bytes(ParametersTypeData::type_offset(0));
1773     int per_arg_scale = exact_log2(DataLayout::cell_size);
1774     add(rscratch1, mdp, off_base);
1775     add(rscratch2, mdp, type_base);
1776 
1777     Address arg_off(rscratch1, tmp1, Address::lsl(per_arg_scale));
1778     Address arg_type(rscratch2, tmp1, Address::lsl(per_arg_scale));
1779 
1780     // load offset on the stack from the slot for this parameter
1781     ldr(tmp2, arg_off);
1782     neg(tmp2, tmp2);
1783     // read the parameter from the local area
1784     ldr(tmp2, Address(rlocals, tmp2, Address::lsl(Interpreter::logStackElementSize)));
1785 
1786     // profile the parameter
1787     profile_obj_type(tmp2, arg_type);
1788 
1789     // go to next parameter
1790     subs(tmp1, tmp1, TypeStackSlotEntries::per_arg_count());
1791     br(Assembler::GE, loop);
1792 
1793     bind(profile_continue);
1794   }
1795 }
1796 
1797 void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) {
1798   // Get index out of bytecode pointer, get_cache_entry_pointer_at_bcp
1799   get_cache_index_at_bcp(index, 1, sizeof(u4));
1800   // Get address of invokedynamic array
1801   ldr(cache, Address(rcpool, in_bytes(ConstantPoolCache::invokedynamic_entries_offset())));
1802   // Scale the index to be the entry index * sizeof(ResolvedIndyEntry)
1803   lsl(index, index, log2i_exact(sizeof(ResolvedIndyEntry)));
1804   add(cache, cache, Array<ResolvedIndyEntry>::base_offset_in_bytes());
1805   lea(cache, Address(cache, index));
1806 }
1807 
1808 void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) {
1809   // Get index out of bytecode pointer
1810   get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
1811   // Take shortcut if the size is a power of 2
1812   if (is_power_of_2(sizeof(ResolvedFieldEntry))) {
1813     lsl(index, index, log2i_exact(sizeof(ResolvedFieldEntry))); // Scale index by power of 2
1814   } else {
1815     mov(cache, sizeof(ResolvedFieldEntry));
1816     mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedFieldEntry)
1817   }
1818   // Get address of field entries array
1819   ldr(cache, Address(rcpool, ConstantPoolCache::field_entries_offset()));
1820   add(cache, cache, Array<ResolvedFieldEntry>::base_offset_in_bytes());
1821   lea(cache, Address(cache, index));
1822   // Prevents stale data from being read after the bytecode is patched to the fast bytecode
1823   membar(MacroAssembler::LoadLoad);
1824 }
1825 
1826 void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
1827   // Get index out of bytecode pointer
1828   get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
1829   mov(cache, sizeof(ResolvedMethodEntry));
1830   mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedMethodEntry)
1831 
1832   // Get address of field entries array
1833   ldr(cache, Address(rcpool, ConstantPoolCache::method_entries_offset()));
1834   add(cache, cache, Array<ResolvedMethodEntry>::base_offset_in_bytes());
1835   lea(cache, Address(cache, index));
1836 }