1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/constMethodFlags.hpp"
  37 #include "oops/markWord.hpp"
  38 #include "oops/method.hpp"
  39 #include "oops/methodData.hpp"
  40 #include "oops/inlineKlass.hpp"
  41 #include "oops/resolvedFieldEntry.hpp"
  42 #include "oops/resolvedIndyEntry.hpp"
  43 #include "prims/jvmtiExport.hpp"
  44 #include "prims/jvmtiThreadState.hpp"
  45 #include "runtime/basicLock.hpp"
  46 #include "runtime/frame.inline.hpp"
  47 #include "runtime/javaThread.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "utilities/powerOfTwo.hpp"
  51 
  52 void InterpreterMacroAssembler::narrow(Register result) {
  53 
  54   // Get method->_constMethod->_result_type
  55   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  56   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  57   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  58 
  59   Label done, notBool, notByte, notChar;
  60 
  61   // common case first
  62   cmpw(rscratch1, T_INT);
  63   br(Assembler::EQ, done);
  64 
  65   // mask integer result to narrower return type.
  66   cmpw(rscratch1, T_BOOLEAN);
  67   br(Assembler::NE, notBool);
  68   andw(result, result, 0x1);
  69   b(done);
  70 
  71   bind(notBool);
  72   cmpw(rscratch1, T_BYTE);
  73   br(Assembler::NE, notByte);
  74   sbfx(result, result, 0, 8);
  75   b(done);
  76 
  77   bind(notByte);
  78   cmpw(rscratch1, T_CHAR);
  79   br(Assembler::NE, notChar);
  80   ubfx(result, result, 0, 16);  // truncate upper 16 bits
  81   b(done);
  82 
  83   bind(notChar);
  84   sbfx(result, result, 0, 16);     // sign-extend short
  85 
  86   // Nothing to do for T_INT
  87   bind(done);
  88 }
  89 
  90 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  91   assert(entry, "Entry must have been generated by now");
  92   b(entry);
  93 }
  94 
  95 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
  96   if (JvmtiExport::can_pop_frame()) {
  97     Label L;
  98     // Initiate popframe handling only if it is not already being
  99     // processed.  If the flag has the popframe_processing bit set, it
 100     // means that this code is called *during* popframe handling - we
 101     // don't want to reenter.
 102     // This method is only called just after the call into the vm in
 103     // call_VM_base, so the arg registers are available.
 104     ldrw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
 105     tbz(rscratch1, exact_log2(JavaThread::popframe_pending_bit), L);
 106     tbnz(rscratch1, exact_log2(JavaThread::popframe_processing_bit), L);
 107     // Call Interpreter::remove_activation_preserving_args_entry() to get the
 108     // address of the same-named entrypoint in the generated interpreter code.
 109     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
 110     br(r0);
 111     bind(L);
 112   }
 113 }
 114 
 115 
 116 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
 117   ldr(r2, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 118   const Address tos_addr(r2, JvmtiThreadState::earlyret_tos_offset());
 119   const Address oop_addr(r2, JvmtiThreadState::earlyret_oop_offset());
 120   const Address val_addr(r2, JvmtiThreadState::earlyret_value_offset());
 121   switch (state) {
 122     case atos: ldr(r0, oop_addr);
 123                str(zr, oop_addr);
 124                interp_verify_oop(r0, state);        break;
 125     case ltos: ldr(r0, val_addr);                   break;
 126     case btos:                                   // fall through
 127     case ztos:                                   // fall through
 128     case ctos:                                   // fall through
 129     case stos:                                   // fall through
 130     case itos: ldrw(r0, val_addr);                  break;
 131     case ftos: ldrs(v0, val_addr);                  break;
 132     case dtos: ldrd(v0, val_addr);                  break;
 133     case vtos: /* nothing to do */                  break;
 134     default  : ShouldNotReachHere();
 135   }
 136   // Clean up tos value in the thread object
 137   movw(rscratch1, (int) ilgl);
 138   strw(rscratch1, tos_addr);
 139   strw(zr, val_addr);
 140 }
 141 
 142 
 143 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
 144   if (JvmtiExport::can_force_early_return()) {
 145     Label L;
 146     ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 147     cbz(rscratch1, L); // if (thread->jvmti_thread_state() == nullptr) exit;
 148 
 149     // Initiate earlyret handling only if it is not already being processed.
 150     // If the flag has the earlyret_processing bit set, it means that this code
 151     // is called *during* earlyret handling - we don't want to reenter.
 152     ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_state_offset()));
 153     cmpw(rscratch1, JvmtiThreadState::earlyret_pending);
 154     br(Assembler::NE, L);
 155 
 156     // Call Interpreter::remove_activation_early_entry() to get the address of the
 157     // same-named entrypoint in the generated interpreter code.
 158     ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 159     ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_tos_offset()));
 160     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), rscratch1);
 161     br(r0);
 162     bind(L);
 163   }
 164 }
 165 
 166 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
 167   Register reg,
 168   int bcp_offset) {
 169   assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
 170   ldrh(reg, Address(rbcp, bcp_offset));
 171   rev16(reg, reg);
 172 }
 173 
 174 void InterpreterMacroAssembler::get_dispatch() {
 175   uint64_t offset;
 176   adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
 177   // Use add() here after ARDP, rather than lea().
 178   // lea() does not generate anything if its offset is zero.
 179   // However, relocs expect to find either an ADD or a load/store
 180   // insn after an ADRP.  add() always generates an ADD insn, even
 181   // for add(Rn, Rn, 0).
 182   add(rdispatch, rdispatch, offset);
 183 }
 184 
 185 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
 186                                                        int bcp_offset,
 187                                                        size_t index_size) {
 188   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 189   if (index_size == sizeof(u2)) {
 190     load_unsigned_short(index, Address(rbcp, bcp_offset));
 191   } else if (index_size == sizeof(u4)) {
 192     // assert(EnableInvokeDynamic, "giant index used only for JSR 292");
 193     ldrw(index, Address(rbcp, bcp_offset));
 194     // Check if the secondary index definition is still ~x, otherwise
 195     // we have to change the following assembler code to calculate the
 196     // plain index.
 197     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
 198     eonw(index, index, zr);  // convert to plain index
 199   } else if (index_size == sizeof(u1)) {
 200     load_unsigned_byte(index, Address(rbcp, bcp_offset));
 201   } else {
 202     ShouldNotReachHere();
 203   }
 204 }
 205 
 206 // Return
 207 // Rindex: index into constant pool
 208 // Rcache: address of cache entry - ConstantPoolCache::base_offset()
 209 //
 210 // A caller must add ConstantPoolCache::base_offset() to Rcache to get
 211 // the true address of the cache entry.
 212 //
 213 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
 214                                                            Register index,
 215                                                            int bcp_offset,
 216                                                            size_t index_size) {
 217   assert_different_registers(cache, index);
 218   assert_different_registers(cache, rcpool);
 219   get_cache_index_at_bcp(index, bcp_offset, index_size);
 220   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 221   // convert from field index to ConstantPoolCacheEntry
 222   // aarch64 already has the cache in rcpool so there is no need to
 223   // install it in cache. instead we pre-add the indexed offset to
 224   // rcpool and return it in cache. All clients of this method need to
 225   // be modified accordingly.
 226   add(cache, rcpool, index, Assembler::LSL, 5);
 227 }
 228 
 229 
 230 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
 231                                                                         Register index,
 232                                                                         Register bytecode,
 233                                                                         int byte_no,
 234                                                                         int bcp_offset,
 235                                                                         size_t index_size) {
 236   get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
 237   // We use a 32-bit load here since the layout of 64-bit words on
 238   // little-endian machines allow us that.
 239   // n.b. unlike x86 cache already includes the index offset
 240   lea(bytecode, Address(cache,
 241                          ConstantPoolCache::base_offset()
 242                          + ConstantPoolCacheEntry::indices_offset()));
 243   ldarw(bytecode, bytecode);
 244   const int shift_count = (1 + byte_no) * BitsPerByte;
 245   ubfx(bytecode, bytecode, shift_count, BitsPerByte);
 246 }
 247 
 248 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
 249                                                                Register tmp,
 250                                                                int bcp_offset,
 251                                                                size_t index_size) {
 252   assert(cache != tmp, "must use different register");
 253   get_cache_index_at_bcp(tmp, bcp_offset, index_size);
 254   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 255   // convert from field index to ConstantPoolCacheEntry index
 256   // and from word offset to byte offset
 257   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 258   ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 259   // skip past the header
 260   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 261   add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord);  // construct pointer to cache entry
 262 }
 263 
 264 void InterpreterMacroAssembler::get_method_counters(Register method,
 265                                                     Register mcs, Label& skip) {
 266   Label has_counters;
 267   ldr(mcs, Address(method, Method::method_counters_offset()));
 268   cbnz(mcs, has_counters);
 269   call_VM(noreg, CAST_FROM_FN_PTR(address,
 270           InterpreterRuntime::build_method_counters), method);
 271   ldr(mcs, Address(method, Method::method_counters_offset()));
 272   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 273   bind(has_counters);
 274 }
 275 
 276 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
 277                                                   Register t1, Register t2,
 278                                                   bool clear_fields, Label& alloc_failed) {
 279   MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
 280   {
 281     SkipIfEqual skip_if(this, &DTraceAllocProbes, 0);
 282     // Trigger dtrace event for fastpath
 283     push(atos);
 284     call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
 285     pop(atos);
 286   }
 287 }
 288 
 289 void InterpreterMacroAssembler::read_flat_field(Register holder_klass,
 290                                                 Register field_index, Register field_offset,
 291                                                 Register temp, Register obj) {
 292   Label alloc_failed, empty_value, done;
 293   const Register src = field_offset;
 294   const Register alloc_temp = rscratch1;
 295   const Register dst_temp   = temp;
 296   assert_different_registers(obj, holder_klass, field_index, field_offset, dst_temp);
 297 
 298   // Grab the inline field klass
 299   push(holder_klass);
 300   const Register field_klass = holder_klass;
 301   get_inline_type_field_klass(holder_klass, field_index, field_klass);
 302 
 303   //check for empty value klass
 304   test_klass_is_empty_inline_type(field_klass, dst_temp, empty_value);
 305 
 306   // allocate buffer
 307   push(obj); // save holder
 308   allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
 309 
 310   // Have an oop instance buffer, copy into it
 311   data_for_oop(obj, dst_temp, field_klass);
 312   pop(alloc_temp);             // restore holder
 313   lea(src, Address(alloc_temp, field_offset));
 314   // call_VM_leaf, clobbers a few regs, save restore new obj
 315   push(obj);
 316   access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, field_klass);
 317   pop(obj);
 318   pop(holder_klass);
 319   b(done);
 320 
 321   bind(empty_value);
 322   get_empty_inline_type_oop(field_klass, dst_temp, obj);
 323   pop(holder_klass);
 324   b(done);
 325 
 326   bind(alloc_failed);
 327   pop(obj);
 328   pop(holder_klass);
 329   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flat_field),
 330           obj, field_index, holder_klass);
 331 
 332   bind(done);
 333 
 334   // Ensure the stores to copy the inline field contents are visible
 335   // before any subsequent store that publishes this reference.
 336   membar(Assembler::StoreStore);
 337 }
 338 
 339 // Load object from cpool->resolved_references(index)
 340 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 341                                            Register result, Register index, Register tmp) {
 342   assert_different_registers(result, index);
 343 
 344   get_constant_pool(result);
 345   // load pointer for resolved_references[] objArray
 346   ldr(result, Address(result, ConstantPool::cache_offset()));
 347   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
 348   resolve_oop_handle(result, tmp, rscratch2);
 349   // Add in the index
 350   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 351   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
 352 }
 353 
 354 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 355                              Register cpool, Register index, Register klass, Register temp) {
 356   add(temp, cpool, index, LSL, LogBytesPerWord);
 357   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 358   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
 359   add(klass, klass, temp, LSL, LogBytesPerWord);
 360   ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
 361 }
 362 
 363 void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
 364                                                               Register method,
 365                                                               Register cache) {
 366   const int method_offset = in_bytes(
 367     ConstantPoolCache::base_offset() +
 368       ((byte_no == TemplateTable::f2_byte)
 369        ? ConstantPoolCacheEntry::f2_offset()
 370        : ConstantPoolCacheEntry::f1_offset()));
 371 
 372   ldr(method, Address(cache, method_offset)); // get f1 Method*
 373 }
 374 
 375 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 376 // subtype of super_klass.
 377 //
 378 // Args:
 379 //      r0: superklass
 380 //      Rsub_klass: subklass
 381 //
 382 // Kills:
 383 //      r2, r5
 384 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 385                                                   Label& ok_is_subtype,
 386                                                   bool profile) {
 387   assert(Rsub_klass != r0, "r0 holds superklass");
 388   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 389   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 390 
 391   // Profile the not-null value's klass.
 392   if (profile) {
 393     profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
 394   }
 395 
 396   // Do the check.
 397   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 398 }
 399 
 400 // Java Expression Stack
 401 
 402 void InterpreterMacroAssembler::pop_ptr(Register r) {
 403   ldr(r, post(esp, wordSize));
 404 }
 405 
 406 void InterpreterMacroAssembler::pop_i(Register r) {
 407   ldrw(r, post(esp, wordSize));
 408 }
 409 
 410 void InterpreterMacroAssembler::pop_l(Register r) {
 411   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 412 }
 413 
 414 void InterpreterMacroAssembler::push_ptr(Register r) {
 415   str(r, pre(esp, -wordSize));
 416  }
 417 
 418 void InterpreterMacroAssembler::push_i(Register r) {
 419   str(r, pre(esp, -wordSize));
 420 }
 421 
 422 void InterpreterMacroAssembler::push_l(Register r) {
 423   str(zr, pre(esp, -wordSize));
 424   str(r, pre(esp, - wordSize));
 425 }
 426 
 427 void InterpreterMacroAssembler::pop_f(FloatRegister r) {
 428   ldrs(r, post(esp, wordSize));
 429 }
 430 
 431 void InterpreterMacroAssembler::pop_d(FloatRegister r) {
 432   ldrd(r, post(esp, 2 * Interpreter::stackElementSize));
 433 }
 434 
 435 void InterpreterMacroAssembler::push_f(FloatRegister r) {
 436   strs(r, pre(esp, -wordSize));
 437 }
 438 
 439 void InterpreterMacroAssembler::push_d(FloatRegister r) {
 440   strd(r, pre(esp, 2* -wordSize));
 441 }
 442 
 443 void InterpreterMacroAssembler::pop(TosState state) {
 444   switch (state) {
 445   case atos: pop_ptr();                 break;
 446   case btos:
 447   case ztos:
 448   case ctos:
 449   case stos:
 450   case itos: pop_i();                   break;
 451   case ltos: pop_l();                   break;
 452   case ftos: pop_f();                   break;
 453   case dtos: pop_d();                   break;
 454   case vtos: /* nothing to do */        break;
 455   default:   ShouldNotReachHere();
 456   }
 457   interp_verify_oop(r0, state);
 458 }
 459 
 460 void InterpreterMacroAssembler::push(TosState state) {
 461   interp_verify_oop(r0, state);
 462   switch (state) {
 463   case atos: push_ptr();                break;
 464   case btos:
 465   case ztos:
 466   case ctos:
 467   case stos:
 468   case itos: push_i();                  break;
 469   case ltos: push_l();                  break;
 470   case ftos: push_f();                  break;
 471   case dtos: push_d();                  break;
 472   case vtos: /* nothing to do */        break;
 473   default  : ShouldNotReachHere();
 474   }
 475 }
 476 
 477 // Helpers for swap and dup
 478 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
 479   ldr(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
 480 }
 481 
 482 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
 483   str(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
 484 }
 485 
 486 void InterpreterMacroAssembler::load_float(Address src) {
 487   ldrs(v0, src);
 488 }
 489 
 490 void InterpreterMacroAssembler::load_double(Address src) {
 491   ldrd(v0, src);
 492 }
 493 
 494 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
 495   // set sender sp
 496   mov(r19_sender_sp, sp);
 497   // record last_sp
 498   sub(rscratch1, esp, rfp);
 499   asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
 500   str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 501 }
 502 
 503 // Jump to from_interpreted entry of a call unless single stepping is possible
 504 // in this thread in which case we must call the i2i entry
 505 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
 506   prepare_to_jump_from_interpreted();
 507 
 508   if (JvmtiExport::can_post_interpreter_events()) {
 509     Label run_compiled_code;
 510     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 511     // compiled code in threads for which the event is enabled.  Check here for
 512     // interp_only_mode if these events CAN be enabled.
 513     ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
 514     cbzw(rscratch1, run_compiled_code);
 515     ldr(rscratch1, Address(method, Method::interpreter_entry_offset()));
 516     br(rscratch1);
 517     bind(run_compiled_code);
 518   }
 519 
 520   ldr(rscratch1, Address(method, Method::from_interpreted_offset()));
 521   br(rscratch1);
 522 }
 523 
 524 // The following two routines provide a hook so that an implementation
 525 // can schedule the dispatch in two parts.  amd64 does not do this.
 526 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
 527 }
 528 
 529 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
 530     dispatch_next(state, step);
 531 }
 532 
 533 void InterpreterMacroAssembler::dispatch_base(TosState state,
 534                                               address* table,
 535                                               bool verifyoop,
 536                                               bool generate_poll) {
 537   if (VerifyActivationFrameSize) {
 538     Unimplemented();
 539   }
 540   if (verifyoop) {
 541     interp_verify_oop(r0, state);
 542   }
 543 
 544   Label safepoint;
 545   address* const safepoint_table = Interpreter::safept_table(state);
 546   bool needs_thread_local_poll = generate_poll && table != safepoint_table;
 547 
 548   if (needs_thread_local_poll) {
 549     NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
 550     ldr(rscratch2, Address(rthread, JavaThread::polling_word_offset()));
 551     tbnz(rscratch2, exact_log2(SafepointMechanism::poll_bit()), safepoint);
 552   }
 553 
 554   if (table == Interpreter::dispatch_table(state)) {
 555     addw(rscratch2, rscratch1, Interpreter::distance_from_dispatch_table(state));
 556     ldr(rscratch2, Address(rdispatch, rscratch2, Address::uxtw(3)));
 557   } else {
 558     mov(rscratch2, (address)table);
 559     ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));
 560   }
 561   br(rscratch2);
 562 
 563   if (needs_thread_local_poll) {
 564     bind(safepoint);
 565     lea(rscratch2, ExternalAddress((address)safepoint_table));
 566     ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));
 567     br(rscratch2);
 568   }
 569 }
 570 
 571 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
 572   dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
 573 }
 574 
 575 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
 576   dispatch_base(state, Interpreter::normal_table(state));
 577 }
 578 
 579 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
 580   dispatch_base(state, Interpreter::normal_table(state), false);
 581 }
 582 
 583 
 584 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
 585   // load next bytecode
 586   ldrb(rscratch1, Address(pre(rbcp, step)));
 587   dispatch_base(state, Interpreter::dispatch_table(state), /*verifyoop*/true, generate_poll);
 588 }
 589 
 590 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
 591   // load current bytecode
 592   ldrb(rscratch1, Address(rbcp, 0));
 593   dispatch_base(state, table);
 594 }
 595 
 596 // remove activation
 597 //
 598 // Apply stack watermark barrier.
 599 // Unlock the receiver if this is a synchronized method.
 600 // Unlock any Java monitors from synchronized blocks.
 601 // Remove the activation from the stack.
 602 //
 603 // If there are locked Java monitors
 604 //    If throw_monitor_exception
 605 //       throws IllegalMonitorStateException
 606 //    Else if install_monitor_exception
 607 //       installs IllegalMonitorStateException
 608 //    Else
 609 //       no error processing
 610 void InterpreterMacroAssembler::remove_activation(
 611         TosState state,
 612         bool throw_monitor_exception,
 613         bool install_monitor_exception,
 614         bool notify_jvmdi) {
 615   // Note: Registers r3 xmm0 may be in use for the
 616   // result check if synchronized method
 617   Label unlocked, unlock, no_unlock;
 618 
 619   // The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
 620   // that would normally not be safe to use. Such bad returns into unsafe territory of
 621   // the stack, will call InterpreterRuntime::at_unwind.
 622   Label slow_path;
 623   Label fast_path;
 624   safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
 625   br(Assembler::AL, fast_path);
 626   bind(slow_path);
 627   push(state);
 628   set_last_Java_frame(esp, rfp, (address)pc(), rscratch1);
 629   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
 630   reset_last_Java_frame(true);
 631   pop(state);
 632   bind(fast_path);
 633 
 634   // get the value of _do_not_unlock_if_synchronized into r3
 635   const Address do_not_unlock_if_synchronized(rthread,
 636     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 637   ldrb(r3, do_not_unlock_if_synchronized);
 638   strb(zr, do_not_unlock_if_synchronized); // reset the flag
 639 
 640  // get method access flags
 641   ldr(r1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 642   ldr(r2, Address(r1, Method::access_flags_offset()));
 643   tbz(r2, exact_log2(JVM_ACC_SYNCHRONIZED), unlocked);
 644 
 645   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 646   // is set.
 647   cbnz(r3, no_unlock);
 648 
 649   // unlock monitor
 650   push(state); // save result
 651 
 652   // BasicObjectLock will be first in list, since this is a
 653   // synchronized method. However, need to check that the object has
 654   // not been unlocked by an explicit monitorexit bytecode.
 655   const Address monitor(rfp, frame::interpreter_frame_initial_sp_offset *
 656                         wordSize - (int) sizeof(BasicObjectLock));
 657   // We use c_rarg1 so that if we go slow path it will be the correct
 658   // register for unlock_object to pass to VM directly
 659   lea(c_rarg1, monitor); // address of first monitor
 660 
 661   ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset()));
 662   cbnz(r0, unlock);
 663 
 664   pop(state);
 665   if (throw_monitor_exception) {
 666     // Entry already unlocked, need to throw exception
 667     call_VM(noreg, CAST_FROM_FN_PTR(address,
 668                    InterpreterRuntime::throw_illegal_monitor_state_exception));
 669     should_not_reach_here();
 670   } else {
 671     // Monitor already unlocked during a stack unroll. If requested,
 672     // install an illegal_monitor_state_exception.  Continue with
 673     // stack unrolling.
 674     if (install_monitor_exception) {
 675       call_VM(noreg, CAST_FROM_FN_PTR(address,
 676                      InterpreterRuntime::new_illegal_monitor_state_exception));
 677     }
 678     b(unlocked);
 679   }
 680 
 681   bind(unlock);
 682   unlock_object(c_rarg1);
 683   pop(state);
 684 
 685   // Check that for block-structured locking (i.e., that all locked
 686   // objects has been unlocked)
 687   bind(unlocked);
 688 
 689   // r0: Might contain return value
 690 
 691   // Check that all monitors are unlocked
 692   {
 693     Label loop, exception, entry, restart;
 694     const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 695     const Address monitor_block_top(
 696         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 697     const Address monitor_block_bot(
 698         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
 699 
 700     bind(restart);
 701     // We use c_rarg1 so that if we go slow path it will be the correct
 702     // register for unlock_object to pass to VM directly
 703     ldr(c_rarg1, monitor_block_top); // points to current entry, starting
 704                                      // with top-most entry
 705     lea(r19, monitor_block_bot);  // points to word before bottom of
 706                                   // monitor block
 707     b(entry);
 708 
 709     // Entry already locked, need to throw exception
 710     bind(exception);
 711 
 712     if (throw_monitor_exception) {
 713       // Throw exception
 714       MacroAssembler::call_VM(noreg,
 715                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
 716                                    throw_illegal_monitor_state_exception));
 717       should_not_reach_here();
 718     } else {
 719       // Stack unrolling. Unlock object and install illegal_monitor_exception.
 720       // Unlock does not block, so don't have to worry about the frame.
 721       // We don't have to preserve c_rarg1 since we are going to throw an exception.
 722 
 723       push(state);
 724       unlock_object(c_rarg1);
 725       pop(state);
 726 
 727       if (install_monitor_exception) {
 728         call_VM(noreg, CAST_FROM_FN_PTR(address,
 729                                         InterpreterRuntime::
 730                                         new_illegal_monitor_state_exception));
 731       }
 732 
 733       b(restart);
 734     }
 735 
 736     bind(loop);
 737     // check if current entry is used
 738     ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset()));
 739     cbnz(rscratch1, exception);
 740 
 741     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 742     bind(entry);
 743     cmp(c_rarg1, r19); // check if bottom reached
 744     br(Assembler::NE, loop); // if not at bottom then check this entry
 745   }
 746 
 747   bind(no_unlock);
 748 
 749   // jvmti support
 750   if (notify_jvmdi) {
 751     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 752   } else {
 753     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 754   }
 755 
 756   // remove activation
 757   // get sender esp
 758   ldr(rscratch2,
 759       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 760 
 761   if (StackReservedPages > 0) {
 762     // testing if reserved zone needs to be re-enabled
 763     Label no_reserved_zone_enabling;
 764 
 765     // check if already enabled - if so no re-enabling needed
 766     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 767     ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
 768     cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
 769     br(Assembler::EQ, no_reserved_zone_enabling);
 770 
 771     // look for an overflow into the stack reserved zone, i.e.
 772     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 773     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 774     cmp(rscratch2, rscratch1);
 775     br(Assembler::LS, no_reserved_zone_enabling);
 776 
 777     call_VM_leaf(
 778       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 779     call_VM(noreg, CAST_FROM_FN_PTR(address,
 780                    InterpreterRuntime::throw_delayed_StackOverflowError));
 781     should_not_reach_here();
 782 
 783     bind(no_reserved_zone_enabling);
 784   }
 785 
 786   if (state == atos && InlineTypeReturnedAsFields) {
 787     // Check if we are returning an non-null inline type and load its fields into registers
 788     Label skip;
 789     test_oop_is_not_inline_type(r0, rscratch2, skip);
 790 
 791     // Load fields from a buffered value with an inline class specific handler
 792     load_klass(rscratch1 /*dst*/, r0 /*src*/);
 793     ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 794     ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
 795     // Unpack handler can be null if inline type is not scalarizable in returns
 796     cbz(rscratch1, skip);
 797 
 798     blr(rscratch1);
 799 #ifdef ASSERT
 800     // TODO 8284443 Enable
 801     if (StressCallingConvention && false) {
 802       Label skip_stress;
 803       ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 804       ldrw(rscratch1, Address(rscratch1, Method::flags_offset()));
 805       tstw(rscratch1, ConstMethodFlags::has_scalarized_return_flag());
 806       br(Assembler::EQ, skip_stress);
 807       load_klass(r0, r0);
 808       orr(r0, r0, 1);
 809       bind(skip_stress);
 810     }
 811 #endif
 812     bind(skip);
 813     // Check above kills sender esp in rscratch2. Reload it.
 814     ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 815   }
 816 
 817   // restore sender esp
 818   mov(esp, rscratch2);
 819   // remove frame anchor
 820   leave();
 821   // If we're returning to interpreted code we will shortly be
 822   // adjusting SP to allow some space for ESP.  If we're returning to
 823   // compiled code the saved sender SP was saved in sender_sp, so this
 824   // restores it.
 825   andr(sp, esp, -16);
 826 }
 827 
 828 // Lock object
 829 //
 830 // Args:
 831 //      c_rarg1: BasicObjectLock to be used for locking
 832 //
 833 // Kills:
 834 //      r0
 835 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 836 //      rscratch1, rscratch2 (scratch regs)
 837 void InterpreterMacroAssembler::lock_object(Register lock_reg)
 838 {
 839   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
 840   if (LockingMode == LM_MONITOR) {
 841     call_VM(noreg,
 842             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 843             lock_reg);
 844   } else {
 845     Label count, done;
 846 
 847     const Register swap_reg = r0;
 848     const Register tmp = c_rarg2;
 849     const Register obj_reg = c_rarg3; // Will contain the oop
 850 
 851     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
 852     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 853     const int mark_offset = lock_offset +
 854                             BasicLock::displaced_header_offset_in_bytes();
 855 
 856     Label slow_case;
 857 
 858     // Load object pointer into obj_reg %c_rarg3
 859     ldr(obj_reg, Address(lock_reg, obj_offset));
 860 
 861     if (DiagnoseSyncOnValueBasedClasses != 0) {
 862       load_klass(tmp, obj_reg);
 863       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 864       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 865       br(Assembler::NE, slow_case);
 866     }
 867 
 868     if (LockingMode == LM_LIGHTWEIGHT) {
 869       ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 870       fast_lock(obj_reg, tmp, rscratch1, rscratch2, slow_case);
 871       b(count);
 872     } else if (LockingMode == LM_LEGACY) {
 873       // Load (object->mark() | 1) into swap_reg
 874       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 875       orr(swap_reg, rscratch1, 1);
 876       if (EnableValhalla) {
 877         // Mask inline_type bit such that we go to the slow path if object is an inline type
 878         andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
 879       }
 880 
 881       // Save (object->mark() | 1) into BasicLock's displaced header
 882       str(swap_reg, Address(lock_reg, mark_offset));
 883 
 884       assert(lock_offset == 0,
 885              "displached header must be first word in BasicObjectLock");
 886 
 887       Label fail;
 888       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 889 
 890       // Fast check for recursive lock.
 891       //
 892       // Can apply the optimization only if this is a stack lock
 893       // allocated in this thread. For efficiency, we can focus on
 894       // recently allocated stack locks (instead of reading the stack
 895       // base and checking whether 'mark' points inside the current
 896       // thread stack):
 897       //  1) (mark & 7) == 0, and
 898       //  2) sp <= mark < mark + os::pagesize()
 899       //
 900       // Warning: sp + os::pagesize can overflow the stack base. We must
 901       // neither apply the optimization for an inflated lock allocated
 902       // just above the thread stack (this is why condition 1 matters)
 903       // nor apply the optimization if the stack lock is inside the stack
 904       // of another thread. The latter is avoided even in case of overflow
 905       // because we have guard pages at the end of all stacks. Hence, if
 906       // we go over the stack base and hit the stack of another thread,
 907       // this should not be in a writeable area that could contain a
 908       // stack lock allocated by that thread. As a consequence, a stack
 909       // lock less than page size away from sp is guaranteed to be
 910       // owned by the current thread.
 911       //
 912       // These 3 tests can be done by evaluating the following
 913       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 914       // assuming both stack pointer and pagesize have their
 915       // least significant 3 bits clear.
 916       // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 917       // NOTE2: aarch64 does not like to subtract sp from rn so take a
 918       // copy
 919       mov(rscratch1, sp);
 920       sub(swap_reg, swap_reg, rscratch1);
 921       ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
 922 
 923       // Save the test result, for recursive case, the result is zero
 924       str(swap_reg, Address(lock_reg, mark_offset));
 925       br(Assembler::EQ, count);
 926     }
 927     bind(slow_case);
 928 
 929     // Call the runtime routine for slow case
 930     if (LockingMode == LM_LIGHTWEIGHT) {
 931       call_VM(noreg,
 932               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
 933               obj_reg);
 934     } else {
 935       call_VM(noreg,
 936               CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 937               lock_reg);
 938     }
 939     b(done);
 940 
 941     bind(count);
 942     increment(Address(rthread, JavaThread::held_monitor_count_offset()));
 943 
 944     bind(done);
 945   }
 946 }
 947 
 948 
 949 // Unlocks an object. Used in monitorexit bytecode and
 950 // remove_activation.  Throws an IllegalMonitorException if object is
 951 // not locked by current thread.
 952 //
 953 // Args:
 954 //      c_rarg1: BasicObjectLock for lock
 955 //
 956 // Kills:
 957 //      r0
 958 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 959 //      rscratch1, rscratch2 (scratch regs)
 960 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 961 {
 962   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 963 
 964   if (LockingMode == LM_MONITOR) {
 965     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 966   } else {
 967     Label count, done;
 968 
 969     const Register swap_reg   = r0;
 970     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 971     const Register obj_reg    = c_rarg3;  // Will contain the oop
 972 
 973     save_bcp(); // Save in case of exception
 974 
 975     if (LockingMode != LM_LIGHTWEIGHT) {
 976       // Convert from BasicObjectLock structure to object and BasicLock
 977       // structure Store the BasicLock address into %r0
 978       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
 979     }
 980 
 981     // Load oop into obj_reg(%c_rarg3)
 982     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
 983 
 984     // Free entry
 985     str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
 986 
 987     if (LockingMode == LM_LIGHTWEIGHT) {
 988       Label slow_case;
 989 
 990       // Check for non-symmetric locking. This is allowed by the spec and the interpreter
 991       // must handle it.
 992       Register tmp = rscratch1;
 993       // First check for lock-stack underflow.
 994       ldrw(tmp, Address(rthread, JavaThread::lock_stack_top_offset()));
 995       cmpw(tmp, (unsigned)LockStack::start_offset());
 996       br(Assembler::LE, slow_case);
 997       // Then check if the top of the lock-stack matches the unlocked object.
 998       subw(tmp, tmp, oopSize);
 999       ldr(tmp, Address(rthread, tmp));
1000       cmpoop(tmp, obj_reg);
1001       br(Assembler::NE, slow_case);
1002 
1003       ldr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1004       tbnz(header_reg, exact_log2(markWord::monitor_value), slow_case);
1005       fast_unlock(obj_reg, header_reg, swap_reg, rscratch1, slow_case);
1006       b(count);
1007       bind(slow_case);
1008     } else if (LockingMode == LM_LEGACY) {
1009       // Load the old header from BasicLock structure
1010       ldr(header_reg, Address(swap_reg,
1011                               BasicLock::displaced_header_offset_in_bytes()));
1012 
1013       // Test for recursion
1014       cbz(header_reg, count);
1015 
1016       // Atomic swap back the old header
1017       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1018     }
1019     // Call the runtime routine for slow case.
1020     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
1021     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1022     b(done);
1023 
1024     bind(count);
1025     decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1026 
1027     bind(done);
1028     restore_bcp();
1029   }
1030 }
1031 
1032 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1033                                                          Label& zero_continue) {
1034   assert(ProfileInterpreter, "must be profiling interpreter");
1035   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1036   cbz(mdp, zero_continue);
1037 }
1038 
1039 // Set the method data pointer for the current bcp.
1040 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1041   assert(ProfileInterpreter, "must be profiling interpreter");
1042   Label set_mdp;
1043   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
1044 
1045   // Test MDO to avoid the call if it is null.
1046   ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));
1047   cbz(r0, set_mdp);
1048   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rmethod, rbcp);
1049   // r0: mdi
1050   // mdo is guaranteed to be non-zero here, we checked for it before the call.
1051   ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1052   lea(r1, Address(r1, in_bytes(MethodData::data_offset())));
1053   add(r0, r1, r0);
1054   str(r0, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1055   bind(set_mdp);
1056   ldp(r0, r1, Address(post(sp, 2 * wordSize)));
1057 }
1058 
1059 void InterpreterMacroAssembler::verify_method_data_pointer() {
1060   assert(ProfileInterpreter, "must be profiling interpreter");
1061 #ifdef ASSERT
1062   Label verify_continue;
1063   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
1064   stp(r2, r3, Address(pre(sp, -2 * wordSize)));
1065   test_method_data_pointer(r3, verify_continue); // If mdp is zero, continue
1066   get_method(r1);
1067 
1068   // If the mdp is valid, it will point to a DataLayout header which is
1069   // consistent with the bcp.  The converse is highly probable also.
1070   ldrsh(r2, Address(r3, in_bytes(DataLayout::bci_offset())));
1071   ldr(rscratch1, Address(r1, Method::const_offset()));
1072   add(r2, r2, rscratch1, Assembler::LSL);
1073   lea(r2, Address(r2, ConstMethod::codes_offset()));
1074   cmp(r2, rbcp);
1075   br(Assembler::EQ, verify_continue);
1076   // r1: method
1077   // rbcp: bcp // rbcp == 22
1078   // r3: mdp
1079   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
1080                r1, rbcp, r3);
1081   bind(verify_continue);
1082   ldp(r2, r3, Address(post(sp, 2 * wordSize)));
1083   ldp(r0, r1, Address(post(sp, 2 * wordSize)));
1084 #endif // ASSERT
1085 }
1086 
1087 
1088 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
1089                                                 int constant,
1090                                                 Register value) {
1091   assert(ProfileInterpreter, "must be profiling interpreter");
1092   Address data(mdp_in, constant);
1093   str(value, data);
1094 }
1095 
1096 
1097 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1098                                                       int constant,
1099                                                       bool decrement) {
1100   increment_mdp_data_at(mdp_in, noreg, constant, decrement);
1101 }
1102 
1103 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1104                                                       Register reg,
1105                                                       int constant,
1106                                                       bool decrement) {
1107   assert(ProfileInterpreter, "must be profiling interpreter");
1108   // %%% this does 64bit counters at best it is wasting space
1109   // at worst it is a rare bug when counters overflow
1110 
1111   assert_different_registers(rscratch2, rscratch1, mdp_in, reg);
1112 
1113   Address addr1(mdp_in, constant);
1114   Address addr2(rscratch2, reg, Address::lsl(0));
1115   Address &addr = addr1;
1116   if (reg != noreg) {
1117     lea(rscratch2, addr1);
1118     addr = addr2;
1119   }
1120 
1121   if (decrement) {
1122     // Decrement the register.  Set condition codes.
1123     // Intel does this
1124     // addptr(data, (int32_t) -DataLayout::counter_increment);
1125     // If the decrement causes the counter to overflow, stay negative
1126     // Label L;
1127     // jcc(Assembler::negative, L);
1128     // addptr(data, (int32_t) DataLayout::counter_increment);
1129     // so we do this
1130     ldr(rscratch1, addr);
1131     subs(rscratch1, rscratch1, (unsigned)DataLayout::counter_increment);
1132     Label L;
1133     br(Assembler::LO, L);       // skip store if counter underflow
1134     str(rscratch1, addr);
1135     bind(L);
1136   } else {
1137     assert(DataLayout::counter_increment == 1,
1138            "flow-free idiom only works with 1");
1139     // Intel does this
1140     // Increment the register.  Set carry flag.
1141     // addptr(data, DataLayout::counter_increment);
1142     // If the increment causes the counter to overflow, pull back by 1.
1143     // sbbptr(data, (int32_t)0);
1144     // so we do this
1145     ldr(rscratch1, addr);
1146     adds(rscratch1, rscratch1, DataLayout::counter_increment);
1147     Label L;
1148     br(Assembler::CS, L);       // skip store if counter overflow
1149     str(rscratch1, addr);
1150     bind(L);
1151   }
1152 }
1153 
1154 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
1155                                                 int flag_byte_constant) {
1156   assert(ProfileInterpreter, "must be profiling interpreter");
1157   int flags_offset = in_bytes(DataLayout::flags_offset());
1158   // Set the flag
1159   ldrb(rscratch1, Address(mdp_in, flags_offset));
1160   orr(rscratch1, rscratch1, flag_byte_constant);
1161   strb(rscratch1, Address(mdp_in, flags_offset));
1162 }
1163 
1164 
1165 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1166                                                  int offset,
1167                                                  Register value,
1168                                                  Register test_value_out,
1169                                                  Label& not_equal_continue) {
1170   assert(ProfileInterpreter, "must be profiling interpreter");
1171   if (test_value_out == noreg) {
1172     ldr(rscratch1, Address(mdp_in, offset));
1173     cmp(value, rscratch1);
1174   } else {
1175     // Put the test value into a register, so caller can use it:
1176     ldr(test_value_out, Address(mdp_in, offset));
1177     cmp(value, test_value_out);
1178   }
1179   br(Assembler::NE, not_equal_continue);
1180 }
1181 
1182 
1183 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1184                                                      int offset_of_disp) {
1185   assert(ProfileInterpreter, "must be profiling interpreter");
1186   ldr(rscratch1, Address(mdp_in, offset_of_disp));
1187   add(mdp_in, mdp_in, rscratch1, LSL);
1188   str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1189 }
1190 
1191 
1192 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1193                                                      Register reg,
1194                                                      int offset_of_disp) {
1195   assert(ProfileInterpreter, "must be profiling interpreter");
1196   lea(rscratch1, Address(mdp_in, offset_of_disp));
1197   ldr(rscratch1, Address(rscratch1, reg, Address::lsl(0)));
1198   add(mdp_in, mdp_in, rscratch1, LSL);
1199   str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1200 }
1201 
1202 
1203 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
1204                                                        int constant) {
1205   assert(ProfileInterpreter, "must be profiling interpreter");
1206   add(mdp_in, mdp_in, (unsigned)constant);
1207   str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1208 }
1209 
1210 
1211 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1212   assert(ProfileInterpreter, "must be profiling interpreter");
1213   // save/restore across call_VM
1214   stp(zr, return_bci, Address(pre(sp, -2 * wordSize)));
1215   call_VM(noreg,
1216           CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
1217           return_bci);
1218   ldp(zr, return_bci, Address(post(sp, 2 * wordSize)));
1219 }
1220 
1221 
1222 void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
1223                                                      Register bumped_count) {
1224   if (ProfileInterpreter) {
1225     Label profile_continue;
1226 
1227     // If no method data exists, go to profile_continue.
1228     // Otherwise, assign to mdp
1229     test_method_data_pointer(mdp, profile_continue);
1230 
1231     // We are taking a branch.  Increment the taken count.
1232     // We inline increment_mdp_data_at to return bumped_count in a register
1233     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1234     Address data(mdp, in_bytes(JumpData::taken_offset()));
1235     ldr(bumped_count, data);
1236     assert(DataLayout::counter_increment == 1,
1237             "flow-free idiom only works with 1");
1238     // Intel does this to catch overflow
1239     // addptr(bumped_count, DataLayout::counter_increment);
1240     // sbbptr(bumped_count, 0);
1241     // so we do this
1242     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1243     Label L;
1244     br(Assembler::CS, L);       // skip store if counter overflow
1245     str(bumped_count, data);
1246     bind(L);
1247     // The method data pointer needs to be updated to reflect the new target.
1248     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1249     bind(profile_continue);
1250   }
1251 }
1252 
1253 
1254 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1255   if (ProfileInterpreter) {
1256     Label profile_continue;
1257 
1258     // If no method data exists, go to profile_continue.
1259     test_method_data_pointer(mdp, profile_continue);
1260 
1261     // We are taking a branch.  Increment the not taken count.
1262     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1263 
1264     // The method data pointer needs to be updated to correspond to
1265     // the next bytecode
1266     update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1267     bind(profile_continue);
1268   }
1269 }
1270 
1271 
1272 void InterpreterMacroAssembler::profile_call(Register mdp) {
1273   if (ProfileInterpreter) {
1274     Label profile_continue;
1275 
1276     // If no method data exists, go to profile_continue.
1277     test_method_data_pointer(mdp, profile_continue);
1278 
1279     // We are making a call.  Increment the count.
1280     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1281 
1282     // The method data pointer needs to be updated to reflect the new target.
1283     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1284     bind(profile_continue);
1285   }
1286 }
1287 
1288 void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1289   if (ProfileInterpreter) {
1290     Label profile_continue;
1291 
1292     // If no method data exists, go to profile_continue.
1293     test_method_data_pointer(mdp, profile_continue);
1294 
1295     // We are making a call.  Increment the count.
1296     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1297 
1298     // The method data pointer needs to be updated to reflect the new target.
1299     update_mdp_by_constant(mdp,
1300                            in_bytes(VirtualCallData::
1301                                     virtual_call_data_size()));
1302     bind(profile_continue);
1303   }
1304 }
1305 
1306 
1307 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1308                                                      Register mdp,
1309                                                      Register reg2,
1310                                                      bool receiver_can_be_null) {
1311   if (ProfileInterpreter) {
1312     Label profile_continue;
1313 
1314     // If no method data exists, go to profile_continue.
1315     test_method_data_pointer(mdp, profile_continue);
1316 
1317     Label skip_receiver_profile;
1318     if (receiver_can_be_null) {
1319       Label not_null;
1320       // We are making a call.  Increment the count for null receiver.
1321       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1322       b(skip_receiver_profile);
1323       bind(not_null);
1324     }
1325 
1326     // Record the receiver type.
1327     record_klass_in_profile(receiver, mdp, reg2);
1328     bind(skip_receiver_profile);
1329 
1330     // The method data pointer needs to be updated to reflect the new target.
1331     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1332     bind(profile_continue);
1333   }
1334 }
1335 
1336 // This routine creates a state machine for updating the multi-row
1337 // type profile at a virtual call site (or other type-sensitive bytecode).
1338 // The machine visits each row (of receiver/count) until the receiver type
1339 // is found, or until it runs out of rows.  At the same time, it remembers
1340 // the location of the first empty row.  (An empty row records null for its
1341 // receiver, and can be allocated for a newly-observed receiver type.)
1342 // Because there are two degrees of freedom in the state, a simple linear
1343 // search will not work; it must be a decision tree.  Hence this helper
1344 // function is recursive, to generate the required tree structured code.
1345 // It's the interpreter, so we are trading off code space for speed.
1346 // See below for example code.
1347 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1348                                         Register receiver, Register mdp,
1349                                         Register reg2, int start_row,
1350                                         Label& done) {
1351   if (TypeProfileWidth == 0) {
1352     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1353   } else {
1354     record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
1355         &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
1356   }
1357 }
1358 
1359 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
1360                                         Register reg2, int start_row, Label& done, int total_rows,
1361                                         OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn) {
1362   int last_row = total_rows - 1;
1363   assert(start_row <= last_row, "must be work left to do");
1364   // Test this row for both the item and for null.
1365   // Take any of three different outcomes:
1366   //   1. found item => increment count and goto done
1367   //   2. found null => keep looking for case 1, maybe allocate this cell
1368   //   3. found something else => keep looking for cases 1 and 2
1369   // Case 3 is handled by a recursive call.
1370   for (int row = start_row; row <= last_row; row++) {
1371     Label next_test;
1372     bool test_for_null_also = (row == start_row);
1373 
1374     // See if the item is item[n].
1375     int item_offset = in_bytes(item_offset_fn(row));
1376     test_mdp_data_at(mdp, item_offset, item,
1377                      (test_for_null_also ? reg2 : noreg),
1378                      next_test);
1379     // (Reg2 now contains the item from the CallData.)
1380 
1381     // The item is item[n].  Increment count[n].
1382     int count_offset = in_bytes(item_count_offset_fn(row));
1383     increment_mdp_data_at(mdp, count_offset);
1384     b(done);
1385     bind(next_test);
1386 
1387     if (test_for_null_also) {
1388       Label found_null;
1389       // Failed the equality check on item[n]...  Test for null.
1390       if (start_row == last_row) {
1391         // The only thing left to do is handle the null case.
1392         cbz(reg2, found_null);
1393         // Item did not match any saved item and there is no empty row for it.
1394         // Increment total counter to indicate polymorphic case.
1395         increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1396         b(done);
1397         bind(found_null);
1398         break;
1399       }
1400       // Since null is rare, make it be the branch-taken case.
1401       cbz(reg2, found_null);
1402 
1403       // Put all the "Case 3" tests here.
1404       record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
1405         item_offset_fn, item_count_offset_fn);
1406 
1407       // Found a null.  Keep searching for a matching item,
1408       // but remember that this is an empty (unused) slot.
1409       bind(found_null);
1410     }
1411   }
1412 
1413   // In the fall-through case, we found no matching item, but we
1414   // observed the item[start_row] is null.
1415 
1416   // Fill in the item field and increment the count.
1417   int item_offset = in_bytes(item_offset_fn(start_row));
1418   set_mdp_data_at(mdp, item_offset, item);
1419   int count_offset = in_bytes(item_count_offset_fn(start_row));
1420   mov(reg2, DataLayout::counter_increment);
1421   set_mdp_data_at(mdp, count_offset, reg2);
1422   if (start_row > 0) {
1423     b(done);
1424   }
1425 }
1426 
1427 // Example state machine code for three profile rows:
1428 //   // main copy of decision tree, rooted at row[1]
1429 //   if (row[0].rec == rec) { row[0].incr(); goto done; }
1430 //   if (row[0].rec != nullptr) {
1431 //     // inner copy of decision tree, rooted at row[1]
1432 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1433 //     if (row[1].rec != nullptr) {
1434 //       // degenerate decision tree, rooted at row[2]
1435 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1436 //       if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
1437 //       row[2].init(rec); goto done;
1438 //     } else {
1439 //       // remember row[1] is empty
1440 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1441 //       row[1].init(rec); goto done;
1442 //     }
1443 //   } else {
1444 //     // remember row[0] is empty
1445 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1446 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
1447 //     row[0].init(rec); goto done;
1448 //   }
1449 //   done:
1450 
1451 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1452                                                         Register mdp, Register reg2) {
1453   assert(ProfileInterpreter, "must be profiling");
1454   Label done;
1455 
1456   record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
1457 
1458   bind (done);
1459 }
1460 
1461 void InterpreterMacroAssembler::profile_ret(Register return_bci,
1462                                             Register mdp) {
1463   if (ProfileInterpreter) {
1464     Label profile_continue;
1465     uint row;
1466 
1467     // If no method data exists, go to profile_continue.
1468     test_method_data_pointer(mdp, profile_continue);
1469 
1470     // Update the total ret count.
1471     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1472 
1473     for (row = 0; row < RetData::row_limit(); row++) {
1474       Label next_test;
1475 
1476       // See if return_bci is equal to bci[n]:
1477       test_mdp_data_at(mdp,
1478                        in_bytes(RetData::bci_offset(row)),
1479                        return_bci, noreg,
1480                        next_test);
1481 
1482       // return_bci is equal to bci[n].  Increment the count.
1483       increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));
1484 
1485       // The method data pointer needs to be updated to reflect the new target.
1486       update_mdp_by_offset(mdp,
1487                            in_bytes(RetData::bci_displacement_offset(row)));
1488       b(profile_continue);
1489       bind(next_test);
1490     }
1491 
1492     update_mdp_for_ret(return_bci);
1493 
1494     bind(profile_continue);
1495   }
1496 }
1497 
1498 void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1499   if (ProfileInterpreter) {
1500     Label profile_continue;
1501 
1502     // If no method data exists, go to profile_continue.
1503     test_method_data_pointer(mdp, profile_continue);
1504 
1505     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1506 
1507     // The method data pointer needs to be updated.
1508     int mdp_delta = in_bytes(BitData::bit_data_size());
1509     if (TypeProfileCasts) {
1510       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1511     }
1512     update_mdp_by_constant(mdp, mdp_delta);
1513 
1514     bind(profile_continue);
1515   }
1516 }
1517 
1518 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1519   if (ProfileInterpreter) {
1520     Label profile_continue;
1521 
1522     // If no method data exists, go to profile_continue.
1523     test_method_data_pointer(mdp, profile_continue);
1524 
1525     // The method data pointer needs to be updated.
1526     int mdp_delta = in_bytes(BitData::bit_data_size());
1527     if (TypeProfileCasts) {
1528       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1529 
1530       // Record the object type.
1531       record_klass_in_profile(klass, mdp, reg2);
1532     }
1533     update_mdp_by_constant(mdp, mdp_delta);
1534 
1535     bind(profile_continue);
1536   }
1537 }
1538 
1539 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1540   if (ProfileInterpreter) {
1541     Label profile_continue;
1542 
1543     // If no method data exists, go to profile_continue.
1544     test_method_data_pointer(mdp, profile_continue);
1545 
1546     // Update the default case count
1547     increment_mdp_data_at(mdp,
1548                           in_bytes(MultiBranchData::default_count_offset()));
1549 
1550     // The method data pointer needs to be updated.
1551     update_mdp_by_offset(mdp,
1552                          in_bytes(MultiBranchData::
1553                                   default_displacement_offset()));
1554 
1555     bind(profile_continue);
1556   }
1557 }
1558 
1559 void InterpreterMacroAssembler::profile_switch_case(Register index,
1560                                                     Register mdp,
1561                                                     Register reg2) {
1562   if (ProfileInterpreter) {
1563     Label profile_continue;
1564 
1565     // If no method data exists, go to profile_continue.
1566     test_method_data_pointer(mdp, profile_continue);
1567 
1568     // Build the base (index * per_case_size_in_bytes()) +
1569     // case_array_offset_in_bytes()
1570     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1571     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1572     Assembler::maddw(index, index, reg2, rscratch1);
1573 
1574     // Update the case count
1575     increment_mdp_data_at(mdp,
1576                           index,
1577                           in_bytes(MultiBranchData::relative_count_offset()));
1578 
1579     // The method data pointer needs to be updated.
1580     update_mdp_by_offset(mdp,
1581                          index,
1582                          in_bytes(MultiBranchData::
1583                                   relative_displacement_offset()));
1584 
1585     bind(profile_continue);
1586   }
1587 }
1588 
1589 void InterpreterMacroAssembler::profile_array(Register mdp,
1590                                               Register array,
1591                                               Register tmp) {
1592   if (ProfileInterpreter) {
1593     Label profile_continue;
1594 
1595     // If no method data exists, go to profile_continue.
1596     test_method_data_pointer(mdp, profile_continue);
1597 
1598     mov(tmp, array);
1599     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::array_offset())));
1600 
1601     Label not_flat;
1602     test_non_flat_array_oop(array, tmp, not_flat);
1603 
1604     set_mdp_flag_at(mdp, ArrayLoadStoreData::flat_array_byte_constant());
1605 
1606     bind(not_flat);
1607 
1608     Label not_null_free;
1609     test_non_null_free_array_oop(array, tmp, not_null_free);
1610 
1611     set_mdp_flag_at(mdp, ArrayLoadStoreData::null_free_array_byte_constant());
1612 
1613     bind(not_null_free);
1614 
1615     bind(profile_continue);
1616   }
1617 }
1618 
1619 void InterpreterMacroAssembler::profile_element(Register mdp,
1620                                                 Register element,
1621                                                 Register tmp) {
1622   if (ProfileInterpreter) {
1623     Label profile_continue;
1624 
1625     // If no method data exists, go to profile_continue.
1626     test_method_data_pointer(mdp, profile_continue);
1627 
1628     mov(tmp, element);
1629     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::element_offset())));
1630 
1631     // The method data pointer needs to be updated.
1632     update_mdp_by_constant(mdp, in_bytes(ArrayLoadStoreData::array_load_store_data_size()));
1633 
1634     bind(profile_continue);
1635   }
1636 }
1637 
1638 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1639                                              Register left,
1640                                              Register right,
1641                                              Register tmp) {
1642   if (ProfileInterpreter) {
1643     Label profile_continue;
1644 
1645     // If no method data exists, go to profile_continue.
1646     test_method_data_pointer(mdp, profile_continue);
1647 
1648     mov(tmp, left);
1649     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1650 
1651     Label left_not_inline_type;
1652     test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1653     set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1654     bind(left_not_inline_type);
1655 
1656     mov(tmp, right);
1657     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1658 
1659     Label right_not_inline_type;
1660     test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1661     set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1662     bind(right_not_inline_type);
1663 
1664     bind(profile_continue);
1665   }
1666 }
1667 
1668 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1669   if (state == atos) {
1670     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1671   }
1672 }
1673 
1674 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1675 
1676 
1677 void InterpreterMacroAssembler::notify_method_entry() {
1678   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1679   // track stack depth.  If it is possible to enter interp_only_mode we add
1680   // the code to check if the event should be sent.
1681   if (JvmtiExport::can_post_interpreter_events()) {
1682     Label L;
1683     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1684     cbzw(r3, L);
1685     call_VM(noreg, CAST_FROM_FN_PTR(address,
1686                                     InterpreterRuntime::post_method_entry));
1687     bind(L);
1688   }
1689 
1690   {
1691     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1692     get_method(c_rarg1);
1693     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1694                  rthread, c_rarg1);
1695   }
1696 
1697   // RedefineClasses() tracing support for obsolete method entry
1698   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1699     get_method(c_rarg1);
1700     call_VM_leaf(
1701       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1702       rthread, c_rarg1);
1703   }
1704 
1705  }
1706 
1707 
1708 void InterpreterMacroAssembler::notify_method_exit(
1709     TosState state, NotifyMethodExitMode mode) {
1710   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1711   // track stack depth.  If it is possible to enter interp_only_mode we add
1712   // the code to check if the event should be sent.
1713   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1714     Label L;
1715     // Note: frame::interpreter_frame_result has a dependency on how the
1716     // method result is saved across the call to post_method_exit. If this
1717     // is changed then the interpreter_frame_result implementation will
1718     // need to be updated too.
1719 
1720     // template interpreter will leave the result on the top of the stack.
1721     push(state);
1722     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1723     cbz(r3, L);
1724     call_VM(noreg,
1725             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1726     bind(L);
1727     pop(state);
1728   }
1729 
1730   {
1731     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1732     push(state);
1733     get_method(c_rarg1);
1734     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1735                  rthread, c_rarg1);
1736     pop(state);
1737   }
1738 }
1739 
1740 
1741 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
1742 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
1743                                                         int increment, Address mask,
1744                                                         Register scratch, Register scratch2,
1745                                                         bool preloaded, Condition cond,
1746                                                         Label* where) {
1747   if (!preloaded) {
1748     ldrw(scratch, counter_addr);
1749   }
1750   add(scratch, scratch, increment);
1751   strw(scratch, counter_addr);
1752   ldrw(scratch2, mask);
1753   ands(scratch, scratch, scratch2);
1754   br(cond, *where);
1755 }
1756 
1757 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
1758                                                   int number_of_arguments) {
1759   // interpreter specific
1760   //
1761   // Note: No need to save/restore rbcp & rlocals pointer since these
1762   //       are callee saved registers and no blocking/ GC can happen
1763   //       in leaf calls.
1764 #ifdef ASSERT
1765   {
1766     Label L;
1767     ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1768     cbz(rscratch1, L);
1769     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
1770          " last_sp != nullptr");
1771     bind(L);
1772   }
1773 #endif /* ASSERT */
1774   // super call
1775   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
1776 }
1777 
1778 void InterpreterMacroAssembler::call_VM_base(Register oop_result,
1779                                              Register java_thread,
1780                                              Register last_java_sp,
1781                                              address  entry_point,
1782                                              int      number_of_arguments,
1783                                              bool     check_exceptions) {
1784   // interpreter specific
1785   //
1786   // Note: Could avoid restoring locals ptr (callee saved) - however doesn't
1787   //       really make a difference for these runtime calls, since they are
1788   //       slow anyway. Btw., bcp must be saved/restored since it may change
1789   //       due to GC.
1790   // assert(java_thread == noreg , "not expecting a precomputed java thread");
1791   save_bcp();
1792 #ifdef ASSERT
1793   {
1794     Label L;
1795     ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1796     cbz(rscratch1, L);
1797     stop("InterpreterMacroAssembler::call_VM_base:"
1798          " last_sp != nullptr");
1799     bind(L);
1800   }
1801 #endif /* ASSERT */
1802   // super call
1803   MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
1804                                entry_point, number_of_arguments,
1805                      check_exceptions);
1806 // interpreter specific
1807   restore_bcp();
1808   restore_locals();
1809 }
1810 
1811 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
1812   assert_different_registers(obj, rscratch1);
1813   Label update, next, none;
1814 
1815   verify_oop(obj);
1816 
1817   cbnz(obj, update);
1818   orptr(mdo_addr, TypeEntries::null_seen);
1819   b(next);
1820 
1821   bind(update);
1822   load_klass(obj, obj);
1823 
1824   ldr(rscratch1, mdo_addr);
1825   eor(obj, obj, rscratch1);
1826   tst(obj, TypeEntries::type_klass_mask);
1827   br(Assembler::EQ, next); // klass seen before, nothing to
1828                            // do. The unknown bit may have been
1829                            // set already but no need to check.
1830 
1831   tbnz(obj, exact_log2(TypeEntries::type_unknown), next);
1832   // already unknown. Nothing to do anymore.
1833 
1834   ldr(rscratch1, mdo_addr);
1835   cbz(rscratch1, none);
1836   cmp(rscratch1, (u1)TypeEntries::null_seen);
1837   br(Assembler::EQ, none);
1838   // There is a chance that the checks above (re-reading profiling
1839   // data from memory) fail if another thread has just set the
1840   // profiling to this obj's klass
1841   ldr(rscratch1, mdo_addr);
1842   eor(obj, obj, rscratch1);
1843   tst(obj, TypeEntries::type_klass_mask);
1844   br(Assembler::EQ, next);
1845 
1846   // different than before. Cannot keep accurate profile.
1847   orptr(mdo_addr, TypeEntries::type_unknown);
1848   b(next);
1849 
1850   bind(none);
1851   // first time here. Set profile type.
1852   str(obj, mdo_addr);
1853 
1854   bind(next);
1855 }
1856 
1857 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
1858   if (!ProfileInterpreter) {
1859     return;
1860   }
1861 
1862   if (MethodData::profile_arguments() || MethodData::profile_return()) {
1863     Label profile_continue;
1864 
1865     test_method_data_pointer(mdp, profile_continue);
1866 
1867     int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
1868 
1869     ldrb(rscratch1, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start));
1870     cmp(rscratch1, u1(is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag));
1871     br(Assembler::NE, profile_continue);
1872 
1873     if (MethodData::profile_arguments()) {
1874       Label done;
1875       int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
1876 
1877       for (int i = 0; i < TypeProfileArgsLimit; i++) {
1878         if (i > 0 || MethodData::profile_return()) {
1879           // If return value type is profiled we may have no argument to profile
1880           ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1881           sub(tmp, tmp, i*TypeStackSlotEntries::per_arg_count());
1882           cmp(tmp, (u1)TypeStackSlotEntries::per_arg_count());
1883           add(rscratch1, mdp, off_to_args);
1884           br(Assembler::LT, done);
1885         }
1886         ldr(tmp, Address(callee, Method::const_offset()));
1887         load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
1888         // stack offset o (zero based) from the start of the argument
1889         // list, for n arguments translates into offset n - o - 1 from
1890         // the end of the argument list
1891         ldr(rscratch1, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))));
1892         sub(tmp, tmp, rscratch1);
1893         sub(tmp, tmp, 1);
1894         Address arg_addr = argument_address(tmp);
1895         ldr(tmp, arg_addr);
1896 
1897         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i)));
1898         profile_obj_type(tmp, mdo_arg_addr);
1899 
1900         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1901         off_to_args += to_add;
1902       }
1903 
1904       if (MethodData::profile_return()) {
1905         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1906         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1907       }
1908 
1909       add(rscratch1, mdp, off_to_args);
1910       bind(done);
1911       mov(mdp, rscratch1);
1912 
1913       if (MethodData::profile_return()) {
1914         // We're right after the type profile for the last
1915         // argument. tmp is the number of cells left in the
1916         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1917         // if there's a return to profile.
1918         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1919         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1920       }
1921       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1922     } else {
1923       assert(MethodData::profile_return(), "either profile call args or call ret");
1924       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1925     }
1926 
1927     // mdp points right after the end of the
1928     // CallTypeData/VirtualCallTypeData, right after the cells for the
1929     // return value type if there's one
1930 
1931     bind(profile_continue);
1932   }
1933 }
1934 
1935 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1936   assert_different_registers(mdp, ret, tmp, rbcp);
1937   if (ProfileInterpreter && MethodData::profile_return()) {
1938     Label profile_continue, done;
1939 
1940     test_method_data_pointer(mdp, profile_continue);
1941 
1942     if (MethodData::profile_return_jsr292_only()) {
1943       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
1944 
1945       // If we don't profile all invoke bytecodes we must make sure
1946       // it's a bytecode we indeed profile. We can't go back to the
1947       // beginning of the ProfileData we intend to update to check its
1948       // type because we're right after it and we don't known its
1949       // length
1950       Label do_profile;
1951       ldrb(rscratch1, Address(rbcp, 0));
1952       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1953       br(Assembler::EQ, do_profile);
1954       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1955       br(Assembler::EQ, do_profile);
1956       get_method(tmp);
1957       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1958       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1959       br(Assembler::NE, profile_continue);
1960 
1961       bind(do_profile);
1962     }
1963 
1964     Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
1965     mov(tmp, ret);
1966     profile_obj_type(tmp, mdo_ret_addr);
1967 
1968     bind(profile_continue);
1969   }
1970 }
1971 
1972 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1973   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1974   if (ProfileInterpreter && MethodData::profile_parameters()) {
1975     Label profile_continue, done;
1976 
1977     test_method_data_pointer(mdp, profile_continue);
1978 
1979     // Load the offset of the area within the MDO used for
1980     // parameters. If it's negative we're not profiling any parameters
1981     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1982     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1983 
1984     // Compute a pointer to the area for parameters from the offset
1985     // and move the pointer to the slot for the last
1986     // parameters. Collect profiling from last parameter down.
1987     // mdo start + parameters offset + array length - 1
1988     add(mdp, mdp, tmp1);
1989     ldr(tmp1, Address(mdp, ArrayData::array_len_offset()));
1990     sub(tmp1, tmp1, TypeStackSlotEntries::per_arg_count());
1991 
1992     Label loop;
1993     bind(loop);
1994 
1995     int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
1996     int type_base = in_bytes(ParametersTypeData::type_offset(0));
1997     int per_arg_scale = exact_log2(DataLayout::cell_size);
1998     add(rscratch1, mdp, off_base);
1999     add(rscratch2, mdp, type_base);
2000 
2001     Address arg_off(rscratch1, tmp1, Address::lsl(per_arg_scale));
2002     Address arg_type(rscratch2, tmp1, Address::lsl(per_arg_scale));
2003 
2004     // load offset on the stack from the slot for this parameter
2005     ldr(tmp2, arg_off);
2006     neg(tmp2, tmp2);
2007     // read the parameter from the local area
2008     ldr(tmp2, Address(rlocals, tmp2, Address::lsl(Interpreter::logStackElementSize)));
2009 
2010     // profile the parameter
2011     profile_obj_type(tmp2, arg_type);
2012 
2013     // go to next parameter
2014     subs(tmp1, tmp1, TypeStackSlotEntries::per_arg_count());
2015     br(Assembler::GE, loop);
2016 
2017     bind(profile_continue);
2018   }
2019 }
2020 
2021 void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) {
2022   // Get index out of bytecode pointer, get_cache_entry_pointer_at_bcp
2023   get_cache_index_at_bcp(index, 1, sizeof(u4));
2024   // Get address of invokedynamic array
2025   ldr(cache, Address(rcpool, in_bytes(ConstantPoolCache::invokedynamic_entries_offset())));
2026   // Scale the index to be the entry index * sizeof(ResolvedIndyEntry)
2027   lsl(index, index, log2i_exact(sizeof(ResolvedIndyEntry)));
2028   add(cache, cache, Array<ResolvedIndyEntry>::base_offset_in_bytes());
2029   lea(cache, Address(cache, index));
2030 }
2031 
2032 void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) {
2033   // Get index out of bytecode pointer
2034   get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
2035   // Take shortcut if the size is a power of 2
2036   if (is_power_of_2(sizeof(ResolvedFieldEntry))) {
2037     lsl(index, index, log2i_exact(sizeof(ResolvedFieldEntry))); // Scale index by power of 2
2038   } else {
2039     mov(cache, sizeof(ResolvedFieldEntry));
2040     mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedFieldEntry)
2041   }
2042   // Get address of field entries array
2043   ldr(cache, Address(rcpool, ConstantPoolCache::field_entries_offset()));
2044   add(cache, cache, Array<ResolvedFieldEntry>::base_offset_in_bytes());
2045   lea(cache, Address(cache, index));
2046 }