1 /*
   2  * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/inlineKlass.hpp"
  40 #include "prims/jvmtiExport.hpp"
  41 #include "prims/jvmtiThreadState.hpp"
  42 #include "runtime/basicLock.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 
  49 void InterpreterMacroAssembler::narrow(Register result) {
  50 
  51   // Get method->_constMethod->_result_type
  52   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  53   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  54   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  55 
  56   Label done, notBool, notByte, notChar;
  57 
  58   // common case first
  59   cmpw(rscratch1, T_INT);
  60   br(Assembler::EQ, done);
  61 
  62   // mask integer result to narrower return type.
  63   cmpw(rscratch1, T_BOOLEAN);
  64   br(Assembler::NE, notBool);
  65   andw(result, result, 0x1);
  66   b(done);
  67 
  68   bind(notBool);
  69   cmpw(rscratch1, T_BYTE);
  70   br(Assembler::NE, notByte);
  71   sbfx(result, result, 0, 8);
  72   b(done);
  73 
  74   bind(notByte);
  75   cmpw(rscratch1, T_CHAR);
  76   br(Assembler::NE, notChar);
  77   ubfx(result, result, 0, 16);  // truncate upper 16 bits
  78   b(done);
  79 
  80   bind(notChar);
  81   sbfx(result, result, 0, 16);     // sign-extend short
  82 
  83   // Nothing to do for T_INT
  84   bind(done);
  85 }
  86 
  87 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  88   assert(entry, "Entry must have been generated by now");
  89   b(entry);
  90 }
  91 
  92 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
  93   if (JvmtiExport::can_pop_frame()) {
  94     Label L;
  95     // Initiate popframe handling only if it is not already being
  96     // processed.  If the flag has the popframe_processing bit set, it
  97     // means that this code is called *during* popframe handling - we
  98     // don't want to reenter.
  99     // This method is only called just after the call into the vm in
 100     // call_VM_base, so the arg registers are available.
 101     ldrw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
 102     tbz(rscratch1, exact_log2(JavaThread::popframe_pending_bit), L);
 103     tbnz(rscratch1, exact_log2(JavaThread::popframe_processing_bit), L);
 104     // Call Interpreter::remove_activation_preserving_args_entry() to get the
 105     // address of the same-named entrypoint in the generated interpreter code.
 106     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
 107     br(r0);
 108     bind(L);
 109   }
 110 }
 111 
 112 
 113 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
 114   ldr(r2, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 115   const Address tos_addr(r2, JvmtiThreadState::earlyret_tos_offset());
 116   const Address oop_addr(r2, JvmtiThreadState::earlyret_oop_offset());
 117   const Address val_addr(r2, JvmtiThreadState::earlyret_value_offset());
 118   switch (state) {
 119     case atos: ldr(r0, oop_addr);
 120                str(zr, oop_addr);
 121                verify_oop(r0, state);               break;
 122     case ltos: ldr(r0, val_addr);                   break;
 123     case btos:                                   // fall through
 124     case ztos:                                   // fall through
 125     case ctos:                                   // fall through
 126     case stos:                                   // fall through
 127     case itos: ldrw(r0, val_addr);                  break;
 128     case ftos: ldrs(v0, val_addr);                  break;
 129     case dtos: ldrd(v0, val_addr);                  break;
 130     case vtos: /* nothing to do */                  break;
 131     default  : ShouldNotReachHere();
 132   }
 133   // Clean up tos value in the thread object
 134   movw(rscratch1, (int) ilgl);
 135   strw(rscratch1, tos_addr);
 136   strw(zr, val_addr);
 137 }
 138 
 139 
 140 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
 141   if (JvmtiExport::can_force_early_return()) {
 142     Label L;
 143     ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 144     cbz(rscratch1, L); // if (thread->jvmti_thread_state() == NULL) exit;
 145 
 146     // Initiate earlyret handling only if it is not already being processed.
 147     // If the flag has the earlyret_processing bit set, it means that this code
 148     // is called *during* earlyret handling - we don't want to reenter.
 149     ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_state_offset()));
 150     cmpw(rscratch1, JvmtiThreadState::earlyret_pending);
 151     br(Assembler::NE, L);
 152 
 153     // Call Interpreter::remove_activation_early_entry() to get the address of the
 154     // same-named entrypoint in the generated interpreter code.
 155     ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 156     ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_tos_offset()));
 157     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), rscratch1);
 158     br(r0);
 159     bind(L);
 160   }
 161 }
 162 
 163 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
 164   Register reg,
 165   int bcp_offset) {
 166   assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
 167   ldrh(reg, Address(rbcp, bcp_offset));
 168   rev16(reg, reg);
 169 }
 170 
 171 void InterpreterMacroAssembler::get_dispatch() {
 172   uint64_t offset;
 173   adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
 174   lea(rdispatch, Address(rdispatch, offset));
 175 }
 176 
 177 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
 178                                                        int bcp_offset,
 179                                                        size_t index_size) {
 180   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 181   if (index_size == sizeof(u2)) {
 182     load_unsigned_short(index, Address(rbcp, bcp_offset));
 183   } else if (index_size == sizeof(u4)) {
 184     // assert(EnableInvokeDynamic, "giant index used only for JSR 292");
 185     ldrw(index, Address(rbcp, bcp_offset));
 186     // Check if the secondary index definition is still ~x, otherwise
 187     // we have to change the following assembler code to calculate the
 188     // plain index.
 189     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
 190     eonw(index, index, zr);  // convert to plain index
 191   } else if (index_size == sizeof(u1)) {
 192     load_unsigned_byte(index, Address(rbcp, bcp_offset));
 193   } else {
 194     ShouldNotReachHere();
 195   }
 196 }
 197 
 198 // Return
 199 // Rindex: index into constant pool
 200 // Rcache: address of cache entry - ConstantPoolCache::base_offset()
 201 //
 202 // A caller must add ConstantPoolCache::base_offset() to Rcache to get
 203 // the true address of the cache entry.
 204 //
 205 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
 206                                                            Register index,
 207                                                            int bcp_offset,
 208                                                            size_t index_size) {
 209   assert_different_registers(cache, index);
 210   assert_different_registers(cache, rcpool);
 211   get_cache_index_at_bcp(index, bcp_offset, index_size);
 212   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 213   // convert from field index to ConstantPoolCacheEntry
 214   // aarch64 already has the cache in rcpool so there is no need to
 215   // install it in cache. instead we pre-add the indexed offset to
 216   // rcpool and return it in cache. All clients of this method need to
 217   // be modified accordingly.
 218   add(cache, rcpool, index, Assembler::LSL, 5);
 219 }
 220 
 221 
 222 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
 223                                                                         Register index,
 224                                                                         Register bytecode,
 225                                                                         int byte_no,
 226                                                                         int bcp_offset,
 227                                                                         size_t index_size) {
 228   get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
 229   // We use a 32-bit load here since the layout of 64-bit words on
 230   // little-endian machines allow us that.
 231   // n.b. unlike x86 cache already includes the index offset
 232   lea(bytecode, Address(cache,
 233                          ConstantPoolCache::base_offset()
 234                          + ConstantPoolCacheEntry::indices_offset()));
 235   ldarw(bytecode, bytecode);
 236   const int shift_count = (1 + byte_no) * BitsPerByte;
 237   ubfx(bytecode, bytecode, shift_count, BitsPerByte);
 238 }
 239 
 240 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
 241                                                                Register tmp,
 242                                                                int bcp_offset,
 243                                                                size_t index_size) {
 244   assert(cache != tmp, "must use different register");
 245   get_cache_index_at_bcp(tmp, bcp_offset, index_size);
 246   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 247   // convert from field index to ConstantPoolCacheEntry index
 248   // and from word offset to byte offset
 249   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 250   ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 251   // skip past the header
 252   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 253   add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord);  // construct pointer to cache entry
 254 }
 255 
 256 void InterpreterMacroAssembler::get_method_counters(Register method,
 257                                                     Register mcs, Label& skip) {
 258   Label has_counters;
 259   ldr(mcs, Address(method, Method::method_counters_offset()));
 260   cbnz(mcs, has_counters);
 261   call_VM(noreg, CAST_FROM_FN_PTR(address,
 262           InterpreterRuntime::build_method_counters), method);
 263   ldr(mcs, Address(method, Method::method_counters_offset()));
 264   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 265   bind(has_counters);
 266 }
 267 
 268 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
 269                                                   Register t1, Register t2,
 270                                                   bool clear_fields, Label& alloc_failed) {
 271   MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
 272   {
 273     SkipIfEqual skip_if(this, &DTraceAllocProbes, 0);
 274     // Trigger dtrace event for fastpath
 275     push(atos);
 276     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), new_obj);
 277     pop(atos);
 278   }
 279 }
 280 
 281 void InterpreterMacroAssembler::read_inlined_field(Register holder_klass,
 282                                                    Register field_index, Register field_offset,
 283                                                    Register temp, Register obj) {
 284   Label alloc_failed, empty_value, done;
 285   const Register src = field_offset;
 286   const Register alloc_temp = rscratch1;
 287   const Register dst_temp   = temp;
 288   assert_different_registers(obj, holder_klass, field_index, field_offset, dst_temp);
 289 
 290   // Grab the inline field klass
 291   push(holder_klass);
 292   const Register field_klass = holder_klass;
 293   get_inline_type_field_klass(holder_klass, field_index, field_klass);
 294 
 295   //check for empty value klass
 296   test_klass_is_empty_inline_type(field_klass, dst_temp, empty_value);
 297 
 298   // allocate buffer
 299   push(obj); // save holder
 300   allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
 301 
 302   // Have an oop instance buffer, copy into it
 303   data_for_oop(obj, dst_temp, field_klass);
 304   pop(alloc_temp);             // restore holder
 305   lea(src, Address(alloc_temp, field_offset));
 306   // call_VM_leaf, clobbers a few regs, save restore new obj
 307   push(obj);
 308   access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, field_klass);
 309   pop(obj);
 310   pop(holder_klass);
 311   b(done);
 312 
 313   bind(empty_value);
 314   get_empty_inline_type_oop(field_klass, dst_temp, obj);
 315   pop(holder_klass);
 316   b(done);
 317 
 318   bind(alloc_failed);
 319   pop(obj);
 320   pop(holder_klass);
 321   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_inlined_field),
 322           obj, field_index, holder_klass);
 323 
 324   bind(done);
 325 
 326   // Ensure the stores to copy the inline field contents are visible
 327   // before any subsequent store that publishes this reference.
 328   membar(Assembler::StoreStore);
 329 }
 330 
 331 // Load object from cpool->resolved_references(index)
 332 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 333                                            Register result, Register index, Register tmp) {
 334   assert_different_registers(result, index);
 335 
 336   get_constant_pool(result);
 337   // load pointer for resolved_references[] objArray
 338   ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
 339   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
 340   resolve_oop_handle(result, tmp);
 341   // Add in the index
 342   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 343   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)));
 344 }
 345 
 346 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 347                              Register cpool, Register index, Register klass, Register temp) {
 348   add(temp, cpool, index, LSL, LogBytesPerWord);
 349   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 350   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses
 351   add(klass, klass, temp, LSL, LogBytesPerWord);
 352   ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
 353 }
 354 
 355 void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
 356                                                               Register method,
 357                                                               Register cache) {
 358   const int method_offset = in_bytes(
 359     ConstantPoolCache::base_offset() +
 360       ((byte_no == TemplateTable::f2_byte)
 361        ? ConstantPoolCacheEntry::f2_offset()
 362        : ConstantPoolCacheEntry::f1_offset()));
 363 
 364   ldr(method, Address(cache, method_offset)); // get f1 Method*
 365 }
 366 
 367 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 368 // subtype of super_klass.
 369 //
 370 // Args:
 371 //      r0: superklass
 372 //      Rsub_klass: subklass
 373 //
 374 // Kills:
 375 //      r2, r5
 376 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 377                                                   Label& ok_is_subtype,
 378                                                   bool profile) {
 379   assert(Rsub_klass != r0, "r0 holds superklass");
 380   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 381   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 382 
 383   // Profile the not-null value's klass.
 384   if (profile) {
 385     profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
 386   }
 387 
 388   // Do the check.
 389   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 390 
 391   // Profile the failure of the check.
 392   if (profile) {
 393     profile_typecheck_failed(r2); // blows r2
 394   }
 395 }
 396 
 397 // Java Expression Stack
 398 
 399 void InterpreterMacroAssembler::pop_ptr(Register r) {
 400   ldr(r, post(esp, wordSize));
 401 }
 402 
 403 void InterpreterMacroAssembler::pop_i(Register r) {
 404   ldrw(r, post(esp, wordSize));
 405 }
 406 
 407 void InterpreterMacroAssembler::pop_l(Register r) {
 408   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 409 }
 410 
 411 void InterpreterMacroAssembler::push_ptr(Register r) {
 412   str(r, pre(esp, -wordSize));
 413  }
 414 
 415 void InterpreterMacroAssembler::push_i(Register r) {
 416   str(r, pre(esp, -wordSize));
 417 }
 418 
 419 void InterpreterMacroAssembler::push_l(Register r) {
 420   str(zr, pre(esp, -wordSize));
 421   str(r, pre(esp, - wordSize));
 422 }
 423 
 424 void InterpreterMacroAssembler::pop_f(FloatRegister r) {
 425   ldrs(r, post(esp, wordSize));
 426 }
 427 
 428 void InterpreterMacroAssembler::pop_d(FloatRegister r) {
 429   ldrd(r, post(esp, 2 * Interpreter::stackElementSize));
 430 }
 431 
 432 void InterpreterMacroAssembler::push_f(FloatRegister r) {
 433   strs(r, pre(esp, -wordSize));
 434 }
 435 
 436 void InterpreterMacroAssembler::push_d(FloatRegister r) {
 437   strd(r, pre(esp, 2* -wordSize));
 438 }
 439 
 440 void InterpreterMacroAssembler::pop(TosState state) {
 441   switch (state) {
 442   case atos: pop_ptr();                 break;
 443   case btos:
 444   case ztos:
 445   case ctos:
 446   case stos:
 447   case itos: pop_i();                   break;
 448   case ltos: pop_l();                   break;
 449   case ftos: pop_f();                   break;
 450   case dtos: pop_d();                   break;
 451   case vtos: /* nothing to do */        break;
 452   default:   ShouldNotReachHere();
 453   }
 454   verify_oop(r0, state);
 455 }
 456 
 457 void InterpreterMacroAssembler::push(TosState state) {
 458   verify_oop(r0, state);
 459   switch (state) {
 460   case atos: push_ptr();                break;
 461   case btos:
 462   case ztos:
 463   case ctos:
 464   case stos:
 465   case itos: push_i();                  break;
 466   case ltos: push_l();                  break;
 467   case ftos: push_f();                  break;
 468   case dtos: push_d();                  break;
 469   case vtos: /* nothing to do */        break;
 470   default  : ShouldNotReachHere();
 471   }
 472 }
 473 
 474 // Helpers for swap and dup
 475 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
 476   ldr(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
 477 }
 478 
 479 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
 480   str(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
 481 }
 482 
 483 void InterpreterMacroAssembler::load_float(Address src) {
 484   ldrs(v0, src);
 485 }
 486 
 487 void InterpreterMacroAssembler::load_double(Address src) {
 488   ldrd(v0, src);
 489 }
 490 
 491 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
 492   // set sender sp
 493   mov(r13, sp);
 494   // record last_sp
 495   str(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 496 }
 497 
 498 // Jump to from_interpreted entry of a call unless single stepping is possible
 499 // in this thread in which case we must call the i2i entry
 500 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
 501   prepare_to_jump_from_interpreted();
 502 
 503   if (JvmtiExport::can_post_interpreter_events()) {
 504     Label run_compiled_code;
 505     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 506     // compiled code in threads for which the event is enabled.  Check here for
 507     // interp_only_mode if these events CAN be enabled.
 508     ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
 509     cbzw(rscratch1, run_compiled_code);
 510     ldr(rscratch1, Address(method, Method::interpreter_entry_offset()));
 511     br(rscratch1);
 512     bind(run_compiled_code);
 513   }
 514 
 515   ldr(rscratch1, Address(method, Method::from_interpreted_offset()));
 516   br(rscratch1);
 517 }
 518 
 519 // The following two routines provide a hook so that an implementation
 520 // can schedule the dispatch in two parts.  amd64 does not do this.
 521 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
 522 }
 523 
 524 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
 525     dispatch_next(state, step);
 526 }
 527 
 528 void InterpreterMacroAssembler::dispatch_base(TosState state,
 529                                               address* table,
 530                                               bool verifyoop,
 531                                               bool generate_poll) {
 532   if (VerifyActivationFrameSize) {
 533     Unimplemented();
 534   }
 535   if (verifyoop) {
 536     verify_oop(r0, state);
 537   }
 538 
 539   Label safepoint;
 540   address* const safepoint_table = Interpreter::safept_table(state);
 541   bool needs_thread_local_poll = generate_poll && table != safepoint_table;
 542 
 543   if (needs_thread_local_poll) {
 544     NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
 545     ldr(rscratch2, Address(rthread, JavaThread::polling_word_offset()));
 546     tbnz(rscratch2, exact_log2(SafepointMechanism::poll_bit()), safepoint);
 547   }
 548 
 549   if (table == Interpreter::dispatch_table(state)) {
 550     addw(rscratch2, rscratch1, Interpreter::distance_from_dispatch_table(state));
 551     ldr(rscratch2, Address(rdispatch, rscratch2, Address::uxtw(3)));
 552   } else {
 553     mov(rscratch2, (address)table);
 554     ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));
 555   }
 556   br(rscratch2);
 557 
 558   if (needs_thread_local_poll) {
 559     bind(safepoint);
 560     lea(rscratch2, ExternalAddress((address)safepoint_table));
 561     ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));
 562     br(rscratch2);
 563   }
 564 }
 565 
 566 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
 567   dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
 568 }
 569 
 570 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
 571   dispatch_base(state, Interpreter::normal_table(state));
 572 }
 573 
 574 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
 575   dispatch_base(state, Interpreter::normal_table(state), false);
 576 }
 577 
 578 
 579 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
 580   // load next bytecode
 581   ldrb(rscratch1, Address(pre(rbcp, step)));
 582   dispatch_base(state, Interpreter::dispatch_table(state), generate_poll);
 583 }
 584 
 585 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
 586   // load current bytecode
 587   ldrb(rscratch1, Address(rbcp, 0));
 588   dispatch_base(state, table);
 589 }
 590 
 591 // remove activation
 592 //
 593 // Apply stack watermark barrier.
 594 // Unlock the receiver if this is a synchronized method.
 595 // Unlock any Java monitors from syncronized blocks.
 596 // Remove the activation from the stack.
 597 //
 598 // If there are locked Java monitors
 599 //    If throw_monitor_exception
 600 //       throws IllegalMonitorStateException
 601 //    Else if install_monitor_exception
 602 //       installs IllegalMonitorStateException
 603 //    Else
 604 //       no error processing
 605 void InterpreterMacroAssembler::remove_activation(
 606         TosState state,
 607         bool throw_monitor_exception,
 608         bool install_monitor_exception,
 609         bool notify_jvmdi) {
 610   // Note: Registers r3 xmm0 may be in use for the
 611   // result check if synchronized method
 612   Label unlocked, unlock, no_unlock;
 613 
 614   // The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
 615   // that would normally not be safe to use. Such bad returns into unsafe territory of
 616   // the stack, will call InterpreterRuntime::at_unwind.
 617   Label slow_path;
 618   Label fast_path;
 619   safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
 620   br(Assembler::AL, fast_path);
 621   bind(slow_path);
 622   push(state);
 623   set_last_Java_frame(esp, rfp, (address)pc(), rscratch1);
 624   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
 625   reset_last_Java_frame(true);
 626   pop(state);
 627   bind(fast_path);
 628 
 629   // get the value of _do_not_unlock_if_synchronized into r3
 630   const Address do_not_unlock_if_synchronized(rthread,
 631     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 632   ldrb(r3, do_not_unlock_if_synchronized);
 633   strb(zr, do_not_unlock_if_synchronized); // reset the flag
 634 
 635  // get method access flags
 636   ldr(r1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 637   ldr(r2, Address(r1, Method::access_flags_offset()));
 638   tbz(r2, exact_log2(JVM_ACC_SYNCHRONIZED), unlocked);
 639 
 640   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 641   // is set.
 642   cbnz(r3, no_unlock);
 643 
 644   // unlock monitor
 645   push(state); // save result
 646 
 647   // BasicObjectLock will be first in list, since this is a
 648   // synchronized method. However, need to check that the object has
 649   // not been unlocked by an explicit monitorexit bytecode.
 650   const Address monitor(rfp, frame::interpreter_frame_initial_sp_offset *
 651                         wordSize - (int) sizeof(BasicObjectLock));
 652   // We use c_rarg1 so that if we go slow path it will be the correct
 653   // register for unlock_object to pass to VM directly
 654   lea(c_rarg1, monitor); // address of first monitor
 655 
 656   ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
 657   cbnz(r0, unlock);
 658 
 659   pop(state);
 660   if (throw_monitor_exception) {
 661     // Entry already unlocked, need to throw exception
 662     call_VM(noreg, CAST_FROM_FN_PTR(address,
 663                    InterpreterRuntime::throw_illegal_monitor_state_exception));
 664     should_not_reach_here();
 665   } else {
 666     // Monitor already unlocked during a stack unroll. If requested,
 667     // install an illegal_monitor_state_exception.  Continue with
 668     // stack unrolling.
 669     if (install_monitor_exception) {
 670       call_VM(noreg, CAST_FROM_FN_PTR(address,
 671                      InterpreterRuntime::new_illegal_monitor_state_exception));
 672     }
 673     b(unlocked);
 674   }
 675 
 676   bind(unlock);
 677   unlock_object(c_rarg1);
 678   pop(state);
 679 
 680   // Check that for block-structured locking (i.e., that all locked
 681   // objects has been unlocked)
 682   bind(unlocked);
 683 
 684   // r0: Might contain return value
 685 
 686   // Check that all monitors are unlocked
 687   {
 688     Label loop, exception, entry, restart;
 689     const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 690     const Address monitor_block_top(
 691         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 692     const Address monitor_block_bot(
 693         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
 694 
 695     bind(restart);
 696     // We use c_rarg1 so that if we go slow path it will be the correct
 697     // register for unlock_object to pass to VM directly
 698     ldr(c_rarg1, monitor_block_top); // points to current entry, starting
 699                                      // with top-most entry
 700     lea(r19, monitor_block_bot);  // points to word before bottom of
 701                                   // monitor block
 702     b(entry);
 703 
 704     // Entry already locked, need to throw exception
 705     bind(exception);
 706 
 707     if (throw_monitor_exception) {
 708       // Throw exception
 709       MacroAssembler::call_VM(noreg,
 710                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
 711                                    throw_illegal_monitor_state_exception));
 712       should_not_reach_here();
 713     } else {
 714       // Stack unrolling. Unlock object and install illegal_monitor_exception.
 715       // Unlock does not block, so don't have to worry about the frame.
 716       // We don't have to preserve c_rarg1 since we are going to throw an exception.
 717 
 718       push(state);
 719       unlock_object(c_rarg1);
 720       pop(state);
 721 
 722       if (install_monitor_exception) {
 723         call_VM(noreg, CAST_FROM_FN_PTR(address,
 724                                         InterpreterRuntime::
 725                                         new_illegal_monitor_state_exception));
 726       }
 727 
 728       b(restart);
 729     }
 730 
 731     bind(loop);
 732     // check if current entry is used
 733     ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
 734     cbnz(rscratch1, exception);
 735 
 736     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 737     bind(entry);
 738     cmp(c_rarg1, r19); // check if bottom reached
 739     br(Assembler::NE, loop); // if not at bottom then check this entry
 740   }
 741 
 742   bind(no_unlock);
 743 
 744   // jvmti support
 745   if (notify_jvmdi) {
 746     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 747   } else {
 748     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 749   }
 750 
 751   // remove activation
 752   // get sender esp
 753   ldr(rscratch2,
 754       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 755 
 756   if (StackReservedPages > 0) {
 757     // testing if reserved zone needs to be re-enabled
 758     Label no_reserved_zone_enabling;
 759 
 760     // look for an overflow into the stack reserved zone, i.e.
 761     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 762     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 763     cmp(rscratch2, rscratch1);
 764     br(Assembler::LS, no_reserved_zone_enabling);
 765 
 766     call_VM_leaf(
 767       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 768     call_VM(noreg, CAST_FROM_FN_PTR(address,
 769                    InterpreterRuntime::throw_delayed_StackOverflowError));
 770     should_not_reach_here();
 771 
 772     bind(no_reserved_zone_enabling);
 773   }
 774 
 775 
 776   if (state == atos && InlineTypeReturnedAsFields) {
 777     Label skip;
 778     // Test if the return type is an inline type
 779     ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 780     ldr(rscratch1, Address(rscratch1, Method::const_offset()));
 781     ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
 782     cmpw(rscratch1, (u1) T_INLINE_TYPE);
 783     br(Assembler::NE, skip);
 784 
 785     // We are returning an inline type, load its fields into registers
 786     // Load fields from a buffered value with an inline class specific handler
 787 
 788     load_klass(rscratch1 /*dst*/, r0 /*src*/);
 789     ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 790     ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
 791     cbz(rscratch1, skip);
 792 
 793     blr(rscratch1);
 794 
 795     // call above kills sender esp in rscratch2. Reload it.
 796     ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 797     bind(skip);
 798   }
 799 
 800   // restore sender esp
 801   mov(esp, rscratch2);
 802   // remove frame anchor
 803   leave();
 804   // If we're returning to interpreted code we will shortly be
 805   // adjusting SP to allow some space for ESP.  If we're returning to
 806   // compiled code the saved sender SP was saved in sender_sp, so this
 807   // restores it.
 808   andr(sp, esp, -16);
 809 }
 810 
 811 // Lock object
 812 //
 813 // Args:
 814 //      c_rarg1: BasicObjectLock to be used for locking
 815 //
 816 // Kills:
 817 //      r0
 818 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 819 //      rscratch1, rscratch2 (scratch regs)
 820 void InterpreterMacroAssembler::lock_object(Register lock_reg)
 821 {
 822   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
 823   if (UseHeavyMonitors) {
 824     call_VM(noreg,
 825             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 826             lock_reg);
 827   } else {
 828     Label done;
 829 
 830     const Register swap_reg = r0;
 831     const Register tmp = c_rarg2;
 832     const Register obj_reg = c_rarg3; // Will contain the oop
 833 
 834     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 835     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 836     const int mark_offset = lock_offset +
 837                             BasicLock::displaced_header_offset_in_bytes();
 838 
 839     Label slow_case;
 840 
 841     // Load object pointer into obj_reg %c_rarg3
 842     ldr(obj_reg, Address(lock_reg, obj_offset));
 843 
 844     if (DiagnoseSyncOnValueBasedClasses != 0) {
 845       load_klass(tmp, obj_reg);
 846       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 847       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 848       br(Assembler::NE, slow_case);
 849     }
 850 
 851     // Load (object->mark() | 1) into swap_reg
 852     ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 853     orr(swap_reg, rscratch1, 1);
 854     if (EnableValhalla) {
 855       // Mask inline_type bit such that we go to the slow path if object is an inline type
 856       andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
 857     }
 858 
 859     // Save (object->mark() | 1) into BasicLock's displaced header
 860     str(swap_reg, Address(lock_reg, mark_offset));
 861 
 862     assert(lock_offset == 0,
 863            "displached header must be first word in BasicObjectLock");
 864 
 865     Label fail;
 866     cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
 867 
 868     // Fast check for recursive lock.
 869     //
 870     // Can apply the optimization only if this is a stack lock
 871     // allocated in this thread. For efficiency, we can focus on
 872     // recently allocated stack locks (instead of reading the stack
 873     // base and checking whether 'mark' points inside the current
 874     // thread stack):
 875     //  1) (mark & 7) == 0, and
 876     //  2) sp <= mark < mark + os::pagesize()
 877     //
 878     // Warning: sp + os::pagesize can overflow the stack base. We must
 879     // neither apply the optimization for an inflated lock allocated
 880     // just above the thread stack (this is why condition 1 matters)
 881     // nor apply the optimization if the stack lock is inside the stack
 882     // of another thread. The latter is avoided even in case of overflow
 883     // because we have guard pages at the end of all stacks. Hence, if
 884     // we go over the stack base and hit the stack of another thread,
 885     // this should not be in a writeable area that could contain a
 886     // stack lock allocated by that thread. As a consequence, a stack
 887     // lock less than page size away from sp is guaranteed to be
 888     // owned by the current thread.
 889     //
 890     // These 3 tests can be done by evaluating the following
 891     // expression: ((mark - sp) & (7 - os::vm_page_size())),
 892     // assuming both stack pointer and pagesize have their
 893     // least significant 3 bits clear.
 894     // NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
 895     // NOTE2: aarch64 does not like to subtract sp from rn so take a
 896     // copy
 897     mov(rscratch1, sp);
 898     sub(swap_reg, swap_reg, rscratch1);
 899     ands(swap_reg, swap_reg, (uint64_t)(7 - os::vm_page_size()));
 900 
 901     // Save the test result, for recursive case, the result is zero
 902     str(swap_reg, Address(lock_reg, mark_offset));
 903     br(Assembler::EQ, done);
 904 
 905     bind(slow_case);
 906 
 907     // Call the runtime routine for slow case
 908     call_VM(noreg,
 909             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 910             lock_reg);
 911 
 912     bind(done);
 913   }
 914 }
 915 
 916 
 917 // Unlocks an object. Used in monitorexit bytecode and
 918 // remove_activation.  Throws an IllegalMonitorException if object is
 919 // not locked by current thread.
 920 //
 921 // Args:
 922 //      c_rarg1: BasicObjectLock for lock
 923 //
 924 // Kills:
 925 //      r0
 926 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 927 //      rscratch1, rscratch2 (scratch regs)
 928 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 929 {
 930   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 931 
 932   if (UseHeavyMonitors) {
 933     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 934   } else {
 935     Label done;
 936 
 937     const Register swap_reg   = r0;
 938     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 939     const Register obj_reg    = c_rarg3;  // Will contain the oop
 940 
 941     save_bcp(); // Save in case of exception
 942 
 943     // Convert from BasicObjectLock structure to object and BasicLock
 944     // structure Store the BasicLock address into %r0
 945     lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
 946 
 947     // Load oop into obj_reg(%c_rarg3)
 948     ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 949 
 950     // Free entry
 951     str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 952 
 953     // Load the old header from BasicLock structure
 954     ldr(header_reg, Address(swap_reg,
 955                             BasicLock::displaced_header_offset_in_bytes()));
 956 
 957     // Test for recursion
 958     cbz(header_reg, done);
 959 
 960     // Atomic swap back the old header
 961     cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
 962 
 963     // Call the runtime routine for slow case.
 964     str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
 965     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 966 
 967     bind(done);
 968 
 969     restore_bcp();
 970   }
 971 }
 972 
 973 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 974                                                          Label& zero_continue) {
 975   assert(ProfileInterpreter, "must be profiling interpreter");
 976   ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 977   cbz(mdp, zero_continue);
 978 }
 979 
 980 // Set the method data pointer for the current bcp.
 981 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 982   assert(ProfileInterpreter, "must be profiling interpreter");
 983   Label set_mdp;
 984   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
 985 
 986   // Test MDO to avoid the call if it is NULL.
 987   ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));
 988   cbz(r0, set_mdp);
 989   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rmethod, rbcp);
 990   // r0: mdi
 991   // mdo is guaranteed to be non-zero here, we checked for it before the call.
 992   ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
 993   lea(r1, Address(r1, in_bytes(MethodData::data_offset())));
 994   add(r0, r1, r0);
 995   str(r0, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
 996   bind(set_mdp);
 997   ldp(r0, r1, Address(post(sp, 2 * wordSize)));
 998 }
 999 
1000 void InterpreterMacroAssembler::verify_method_data_pointer() {
1001   assert(ProfileInterpreter, "must be profiling interpreter");
1002 #ifdef ASSERT
1003   Label verify_continue;
1004   stp(r0, r1, Address(pre(sp, -2 * wordSize)));
1005   stp(r2, r3, Address(pre(sp, -2 * wordSize)));
1006   test_method_data_pointer(r3, verify_continue); // If mdp is zero, continue
1007   get_method(r1);
1008 
1009   // If the mdp is valid, it will point to a DataLayout header which is
1010   // consistent with the bcp.  The converse is highly probable also.
1011   ldrsh(r2, Address(r3, in_bytes(DataLayout::bci_offset())));
1012   ldr(rscratch1, Address(r1, Method::const_offset()));
1013   add(r2, r2, rscratch1, Assembler::LSL);
1014   lea(r2, Address(r2, ConstMethod::codes_offset()));
1015   cmp(r2, rbcp);
1016   br(Assembler::EQ, verify_continue);
1017   // r1: method
1018   // rbcp: bcp // rbcp == 22
1019   // r3: mdp
1020   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
1021                r1, rbcp, r3);
1022   bind(verify_continue);
1023   ldp(r2, r3, Address(post(sp, 2 * wordSize)));
1024   ldp(r0, r1, Address(post(sp, 2 * wordSize)));
1025 #endif // ASSERT
1026 }
1027 
1028 
1029 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
1030                                                 int constant,
1031                                                 Register value) {
1032   assert(ProfileInterpreter, "must be profiling interpreter");
1033   Address data(mdp_in, constant);
1034   str(value, data);
1035 }
1036 
1037 
1038 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1039                                                       int constant,
1040                                                       bool decrement) {
1041   increment_mdp_data_at(mdp_in, noreg, constant, decrement);
1042 }
1043 
1044 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1045                                                       Register reg,
1046                                                       int constant,
1047                                                       bool decrement) {
1048   assert(ProfileInterpreter, "must be profiling interpreter");
1049   // %%% this does 64bit counters at best it is wasting space
1050   // at worst it is a rare bug when counters overflow
1051 
1052   assert_different_registers(rscratch2, rscratch1, mdp_in, reg);
1053 
1054   Address addr1(mdp_in, constant);
1055   Address addr2(rscratch2, reg, Address::lsl(0));
1056   Address &addr = addr1;
1057   if (reg != noreg) {
1058     lea(rscratch2, addr1);
1059     addr = addr2;
1060   }
1061 
1062   if (decrement) {
1063     // Decrement the register.  Set condition codes.
1064     // Intel does this
1065     // addptr(data, (int32_t) -DataLayout::counter_increment);
1066     // If the decrement causes the counter to overflow, stay negative
1067     // Label L;
1068     // jcc(Assembler::negative, L);
1069     // addptr(data, (int32_t) DataLayout::counter_increment);
1070     // so we do this
1071     ldr(rscratch1, addr);
1072     subs(rscratch1, rscratch1, (unsigned)DataLayout::counter_increment);
1073     Label L;
1074     br(Assembler::LO, L);       // skip store if counter underflow
1075     str(rscratch1, addr);
1076     bind(L);
1077   } else {
1078     assert(DataLayout::counter_increment == 1,
1079            "flow-free idiom only works with 1");
1080     // Intel does this
1081     // Increment the register.  Set carry flag.
1082     // addptr(data, DataLayout::counter_increment);
1083     // If the increment causes the counter to overflow, pull back by 1.
1084     // sbbptr(data, (int32_t)0);
1085     // so we do this
1086     ldr(rscratch1, addr);
1087     adds(rscratch1, rscratch1, DataLayout::counter_increment);
1088     Label L;
1089     br(Assembler::CS, L);       // skip store if counter overflow
1090     str(rscratch1, addr);
1091     bind(L);
1092   }
1093 }
1094 
1095 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
1096                                                 int flag_byte_constant) {
1097   assert(ProfileInterpreter, "must be profiling interpreter");
1098   int flags_offset = in_bytes(DataLayout::flags_offset());
1099   // Set the flag
1100   ldrb(rscratch1, Address(mdp_in, flags_offset));
1101   orr(rscratch1, rscratch1, flag_byte_constant);
1102   strb(rscratch1, Address(mdp_in, flags_offset));
1103 }
1104 
1105 
1106 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1107                                                  int offset,
1108                                                  Register value,
1109                                                  Register test_value_out,
1110                                                  Label& not_equal_continue) {
1111   assert(ProfileInterpreter, "must be profiling interpreter");
1112   if (test_value_out == noreg) {
1113     ldr(rscratch1, Address(mdp_in, offset));
1114     cmp(value, rscratch1);
1115   } else {
1116     // Put the test value into a register, so caller can use it:
1117     ldr(test_value_out, Address(mdp_in, offset));
1118     cmp(value, test_value_out);
1119   }
1120   br(Assembler::NE, not_equal_continue);
1121 }
1122 
1123 
1124 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1125                                                      int offset_of_disp) {
1126   assert(ProfileInterpreter, "must be profiling interpreter");
1127   ldr(rscratch1, Address(mdp_in, offset_of_disp));
1128   add(mdp_in, mdp_in, rscratch1, LSL);
1129   str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1130 }
1131 
1132 
1133 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1134                                                      Register reg,
1135                                                      int offset_of_disp) {
1136   assert(ProfileInterpreter, "must be profiling interpreter");
1137   lea(rscratch1, Address(mdp_in, offset_of_disp));
1138   ldr(rscratch1, Address(rscratch1, reg, Address::lsl(0)));
1139   add(mdp_in, mdp_in, rscratch1, LSL);
1140   str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1141 }
1142 
1143 
1144 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
1145                                                        int constant) {
1146   assert(ProfileInterpreter, "must be profiling interpreter");
1147   add(mdp_in, mdp_in, (unsigned)constant);
1148   str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1149 }
1150 
1151 
1152 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1153   assert(ProfileInterpreter, "must be profiling interpreter");
1154   // save/restore across call_VM
1155   stp(zr, return_bci, Address(pre(sp, -2 * wordSize)));
1156   call_VM(noreg,
1157           CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
1158           return_bci);
1159   ldp(zr, return_bci, Address(post(sp, 2 * wordSize)));
1160 }
1161 
1162 
1163 void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
1164                                                      Register bumped_count) {
1165   if (ProfileInterpreter) {
1166     Label profile_continue;
1167 
1168     // If no method data exists, go to profile_continue.
1169     // Otherwise, assign to mdp
1170     test_method_data_pointer(mdp, profile_continue);
1171 
1172     // We are taking a branch.  Increment the taken count.
1173     // We inline increment_mdp_data_at to return bumped_count in a register
1174     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1175     Address data(mdp, in_bytes(JumpData::taken_offset()));
1176     ldr(bumped_count, data);
1177     assert(DataLayout::counter_increment == 1,
1178             "flow-free idiom only works with 1");
1179     // Intel does this to catch overflow
1180     // addptr(bumped_count, DataLayout::counter_increment);
1181     // sbbptr(bumped_count, 0);
1182     // so we do this
1183     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1184     Label L;
1185     br(Assembler::CS, L);       // skip store if counter overflow
1186     str(bumped_count, data);
1187     bind(L);
1188     // The method data pointer needs to be updated to reflect the new target.
1189     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1190     bind(profile_continue);
1191   }
1192 }
1193 
1194 
1195 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1196   if (ProfileInterpreter) {
1197     Label profile_continue;
1198 
1199     // If no method data exists, go to profile_continue.
1200     test_method_data_pointer(mdp, profile_continue);
1201 
1202     // We are taking a branch.  Increment the not taken count.
1203     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1204 
1205     // The method data pointer needs to be updated to correspond to
1206     // the next bytecode
1207     update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1208     bind(profile_continue);
1209   }
1210 }
1211 
1212 
1213 void InterpreterMacroAssembler::profile_call(Register mdp) {
1214   if (ProfileInterpreter) {
1215     Label profile_continue;
1216 
1217     // If no method data exists, go to profile_continue.
1218     test_method_data_pointer(mdp, profile_continue);
1219 
1220     // We are making a call.  Increment the count.
1221     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1222 
1223     // The method data pointer needs to be updated to reflect the new target.
1224     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1225     bind(profile_continue);
1226   }
1227 }
1228 
1229 void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1230   if (ProfileInterpreter) {
1231     Label profile_continue;
1232 
1233     // If no method data exists, go to profile_continue.
1234     test_method_data_pointer(mdp, profile_continue);
1235 
1236     // We are making a call.  Increment the count.
1237     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1238 
1239     // The method data pointer needs to be updated to reflect the new target.
1240     update_mdp_by_constant(mdp,
1241                            in_bytes(VirtualCallData::
1242                                     virtual_call_data_size()));
1243     bind(profile_continue);
1244   }
1245 }
1246 
1247 
1248 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1249                                                      Register mdp,
1250                                                      Register reg2,
1251                                                      bool receiver_can_be_null) {
1252   if (ProfileInterpreter) {
1253     Label profile_continue;
1254 
1255     // If no method data exists, go to profile_continue.
1256     test_method_data_pointer(mdp, profile_continue);
1257 
1258     Label skip_receiver_profile;
1259     if (receiver_can_be_null) {
1260       Label not_null;
1261       // We are making a call.  Increment the count for null receiver.
1262       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1263       b(skip_receiver_profile);
1264       bind(not_null);
1265     }
1266 
1267     // Record the receiver type.
1268     record_klass_in_profile(receiver, mdp, reg2, true);
1269     bind(skip_receiver_profile);
1270 
1271     // The method data pointer needs to be updated to reflect the new target.
1272     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1273     bind(profile_continue);
1274   }
1275 }
1276 
1277 // This routine creates a state machine for updating the multi-row
1278 // type profile at a virtual call site (or other type-sensitive bytecode).
1279 // The machine visits each row (of receiver/count) until the receiver type
1280 // is found, or until it runs out of rows.  At the same time, it remembers
1281 // the location of the first empty row.  (An empty row records null for its
1282 // receiver, and can be allocated for a newly-observed receiver type.)
1283 // Because there are two degrees of freedom in the state, a simple linear
1284 // search will not work; it must be a decision tree.  Hence this helper
1285 // function is recursive, to generate the required tree structured code.
1286 // It's the interpreter, so we are trading off code space for speed.
1287 // See below for example code.
1288 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1289                                         Register receiver, Register mdp,
1290                                         Register reg2, int start_row,
1291                                         Label& done, bool is_virtual_call) {
1292   if (TypeProfileWidth == 0) {
1293     if (is_virtual_call) {
1294       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1295     }
1296 #if INCLUDE_JVMCI
1297     else if (EnableJVMCI) {
1298       increment_mdp_data_at(mdp, in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()));
1299     }
1300 #endif // INCLUDE_JVMCI
1301   } else {
1302     int non_profiled_offset = -1;
1303     if (is_virtual_call) {
1304       non_profiled_offset = in_bytes(CounterData::count_offset());
1305     }
1306 #if INCLUDE_JVMCI
1307     else if (EnableJVMCI) {
1308       non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset());
1309     }
1310 #endif // INCLUDE_JVMCI
1311 
1312     record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
1313         &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset);
1314   }
1315 }
1316 
1317 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
1318                                         Register reg2, int start_row, Label& done, int total_rows,
1319                                         OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn,
1320                                         int non_profiled_offset) {
1321   int last_row = total_rows - 1;
1322   assert(start_row <= last_row, "must be work left to do");
1323   // Test this row for both the item and for null.
1324   // Take any of three different outcomes:
1325   //   1. found item => increment count and goto done
1326   //   2. found null => keep looking for case 1, maybe allocate this cell
1327   //   3. found something else => keep looking for cases 1 and 2
1328   // Case 3 is handled by a recursive call.
1329   for (int row = start_row; row <= last_row; row++) {
1330     Label next_test;
1331     bool test_for_null_also = (row == start_row);
1332 
1333     // See if the item is item[n].
1334     int item_offset = in_bytes(item_offset_fn(row));
1335     test_mdp_data_at(mdp, item_offset, item,
1336                      (test_for_null_also ? reg2 : noreg),
1337                      next_test);
1338     // (Reg2 now contains the item from the CallData.)
1339 
1340     // The item is item[n].  Increment count[n].
1341     int count_offset = in_bytes(item_count_offset_fn(row));
1342     increment_mdp_data_at(mdp, count_offset);
1343     b(done);
1344     bind(next_test);
1345 
1346     if (test_for_null_also) {
1347       Label found_null;
1348       // Failed the equality check on item[n]...  Test for null.
1349       if (start_row == last_row) {
1350         // The only thing left to do is handle the null case.
1351         if (non_profiled_offset >= 0) {
1352           cbz(reg2, found_null);
1353           // Item did not match any saved item and there is no empty row for it.
1354           // Increment total counter to indicate polymorphic case.
1355           increment_mdp_data_at(mdp, non_profiled_offset);
1356           b(done);
1357           bind(found_null);
1358         } else {
1359           cbnz(reg2, done);
1360         }
1361         break;
1362       }
1363       // Since null is rare, make it be the branch-taken case.
1364       cbz(reg2, found_null);
1365 
1366       // Put all the "Case 3" tests here.
1367       record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
1368         item_offset_fn, item_count_offset_fn, non_profiled_offset);
1369 
1370       // Found a null.  Keep searching for a matching item,
1371       // but remember that this is an empty (unused) slot.
1372       bind(found_null);
1373     }
1374   }
1375 
1376   // In the fall-through case, we found no matching item, but we
1377   // observed the item[start_row] is NULL.
1378 
1379   // Fill in the item field and increment the count.
1380   int item_offset = in_bytes(item_offset_fn(start_row));
1381   set_mdp_data_at(mdp, item_offset, item);
1382   int count_offset = in_bytes(item_count_offset_fn(start_row));
1383   mov(reg2, DataLayout::counter_increment);
1384   set_mdp_data_at(mdp, count_offset, reg2);
1385   if (start_row > 0) {
1386     b(done);
1387   }
1388 }
1389 
1390 // Example state machine code for three profile rows:
1391 //   // main copy of decision tree, rooted at row[1]
1392 //   if (row[0].rec == rec) { row[0].incr(); goto done; }
1393 //   if (row[0].rec != NULL) {
1394 //     // inner copy of decision tree, rooted at row[1]
1395 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1396 //     if (row[1].rec != NULL) {
1397 //       // degenerate decision tree, rooted at row[2]
1398 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1399 //       if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
1400 //       row[2].init(rec); goto done;
1401 //     } else {
1402 //       // remember row[1] is empty
1403 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1404 //       row[1].init(rec); goto done;
1405 //     }
1406 //   } else {
1407 //     // remember row[0] is empty
1408 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1409 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
1410 //     row[0].init(rec); goto done;
1411 //   }
1412 //   done:
1413 
1414 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1415                                                         Register mdp, Register reg2,
1416                                                         bool is_virtual_call) {
1417   assert(ProfileInterpreter, "must be profiling");
1418   Label done;
1419 
1420   record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
1421 
1422   bind (done);
1423 }
1424 
1425 void InterpreterMacroAssembler::profile_ret(Register return_bci,
1426                                             Register mdp) {
1427   if (ProfileInterpreter) {
1428     Label profile_continue;
1429     uint row;
1430 
1431     // If no method data exists, go to profile_continue.
1432     test_method_data_pointer(mdp, profile_continue);
1433 
1434     // Update the total ret count.
1435     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1436 
1437     for (row = 0; row < RetData::row_limit(); row++) {
1438       Label next_test;
1439 
1440       // See if return_bci is equal to bci[n]:
1441       test_mdp_data_at(mdp,
1442                        in_bytes(RetData::bci_offset(row)),
1443                        return_bci, noreg,
1444                        next_test);
1445 
1446       // return_bci is equal to bci[n].  Increment the count.
1447       increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));
1448 
1449       // The method data pointer needs to be updated to reflect the new target.
1450       update_mdp_by_offset(mdp,
1451                            in_bytes(RetData::bci_displacement_offset(row)));
1452       b(profile_continue);
1453       bind(next_test);
1454     }
1455 
1456     update_mdp_for_ret(return_bci);
1457 
1458     bind(profile_continue);
1459   }
1460 }
1461 
1462 void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1463   if (ProfileInterpreter) {
1464     Label profile_continue;
1465 
1466     // If no method data exists, go to profile_continue.
1467     test_method_data_pointer(mdp, profile_continue);
1468 
1469     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1470 
1471     // The method data pointer needs to be updated.
1472     int mdp_delta = in_bytes(BitData::bit_data_size());
1473     if (TypeProfileCasts) {
1474       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1475     }
1476     update_mdp_by_constant(mdp, mdp_delta);
1477 
1478     bind(profile_continue);
1479   }
1480 }
1481 
1482 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) {
1483   if (ProfileInterpreter && TypeProfileCasts) {
1484     Label profile_continue;
1485 
1486     // If no method data exists, go to profile_continue.
1487     test_method_data_pointer(mdp, profile_continue);
1488 
1489     int count_offset = in_bytes(CounterData::count_offset());
1490     // Back up the address, since we have already bumped the mdp.
1491     count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1492 
1493     // *Decrement* the counter.  We expect to see zero or small negatives.
1494     increment_mdp_data_at(mdp, count_offset, true);
1495 
1496     bind (profile_continue);
1497   }
1498 }
1499 
1500 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1501   if (ProfileInterpreter) {
1502     Label profile_continue;
1503 
1504     // If no method data exists, go to profile_continue.
1505     test_method_data_pointer(mdp, profile_continue);
1506 
1507     // The method data pointer needs to be updated.
1508     int mdp_delta = in_bytes(BitData::bit_data_size());
1509     if (TypeProfileCasts) {
1510       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1511 
1512       // Record the object type.
1513       record_klass_in_profile(klass, mdp, reg2, false);
1514     }
1515     update_mdp_by_constant(mdp, mdp_delta);
1516 
1517     bind(profile_continue);
1518   }
1519 }
1520 
1521 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1522   if (ProfileInterpreter) {
1523     Label profile_continue;
1524 
1525     // If no method data exists, go to profile_continue.
1526     test_method_data_pointer(mdp, profile_continue);
1527 
1528     // Update the default case count
1529     increment_mdp_data_at(mdp,
1530                           in_bytes(MultiBranchData::default_count_offset()));
1531 
1532     // The method data pointer needs to be updated.
1533     update_mdp_by_offset(mdp,
1534                          in_bytes(MultiBranchData::
1535                                   default_displacement_offset()));
1536 
1537     bind(profile_continue);
1538   }
1539 }
1540 
1541 void InterpreterMacroAssembler::profile_switch_case(Register index,
1542                                                     Register mdp,
1543                                                     Register reg2) {
1544   if (ProfileInterpreter) {
1545     Label profile_continue;
1546 
1547     // If no method data exists, go to profile_continue.
1548     test_method_data_pointer(mdp, profile_continue);
1549 
1550     // Build the base (index * per_case_size_in_bytes()) +
1551     // case_array_offset_in_bytes()
1552     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1553     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1554     Assembler::maddw(index, index, reg2, rscratch1);
1555 
1556     // Update the case count
1557     increment_mdp_data_at(mdp,
1558                           index,
1559                           in_bytes(MultiBranchData::relative_count_offset()));
1560 
1561     // The method data pointer needs to be updated.
1562     update_mdp_by_offset(mdp,
1563                          index,
1564                          in_bytes(MultiBranchData::
1565                                   relative_displacement_offset()));
1566 
1567     bind(profile_continue);
1568   }
1569 }
1570 
1571 void InterpreterMacroAssembler::profile_array(Register mdp,
1572                                               Register array,
1573                                               Register tmp) {
1574   if (ProfileInterpreter) {
1575     Label profile_continue;
1576 
1577     // If no method data exists, go to profile_continue.
1578     test_method_data_pointer(mdp, profile_continue);
1579 
1580     mov(tmp, array);
1581     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::array_offset())));
1582 
1583     Label not_flat;
1584     test_non_flattened_array_oop(array, tmp, not_flat);
1585 
1586     set_mdp_flag_at(mdp, ArrayLoadStoreData::flat_array_byte_constant());
1587 
1588     bind(not_flat);
1589 
1590     Label not_null_free;
1591     test_non_null_free_array_oop(array, tmp, not_null_free);
1592 
1593     set_mdp_flag_at(mdp, ArrayLoadStoreData::null_free_array_byte_constant());
1594 
1595     bind(not_null_free);
1596 
1597     bind(profile_continue);
1598   }
1599 }
1600 
1601 void InterpreterMacroAssembler::profile_element(Register mdp,
1602                                                 Register element,
1603                                                 Register tmp) {
1604   if (ProfileInterpreter) {
1605     Label profile_continue;
1606 
1607     // If no method data exists, go to profile_continue.
1608     test_method_data_pointer(mdp, profile_continue);
1609 
1610     mov(tmp, element);
1611     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::element_offset())));
1612 
1613     // The method data pointer needs to be updated.
1614     update_mdp_by_constant(mdp, in_bytes(ArrayLoadStoreData::array_load_store_data_size()));
1615 
1616     bind(profile_continue);
1617   }
1618 }
1619 
1620 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1621                                              Register left,
1622                                              Register right,
1623                                              Register tmp) {
1624   if (ProfileInterpreter) {
1625     Label profile_continue;
1626 
1627     // If no method data exists, go to profile_continue.
1628     test_method_data_pointer(mdp, profile_continue);
1629 
1630     mov(tmp, left);
1631     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1632 
1633     Label left_not_inline_type;
1634     test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1635     set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1636     bind(left_not_inline_type);
1637 
1638     mov(tmp, right);
1639     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1640 
1641     Label right_not_inline_type;
1642     test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1643     set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1644     bind(right_not_inline_type);
1645 
1646     bind(profile_continue);
1647   }
1648 }
1649 
1650 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1651   if (state == atos) {
1652     MacroAssembler::verify_oop(reg);
1653   }
1654 }
1655 
1656 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1657 
1658 
1659 void InterpreterMacroAssembler::notify_method_entry() {
1660   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1661   // track stack depth.  If it is possible to enter interp_only_mode we add
1662   // the code to check if the event should be sent.
1663   if (JvmtiExport::can_post_interpreter_events()) {
1664     Label L;
1665     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1666     cbzw(r3, L);
1667     call_VM(noreg, CAST_FROM_FN_PTR(address,
1668                                     InterpreterRuntime::post_method_entry));
1669     bind(L);
1670   }
1671 
1672   {
1673     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1674     get_method(c_rarg1);
1675     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1676                  rthread, c_rarg1);
1677   }
1678 
1679   // RedefineClasses() tracing support for obsolete method entry
1680   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1681     get_method(c_rarg1);
1682     call_VM_leaf(
1683       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1684       rthread, c_rarg1);
1685   }
1686 
1687  }
1688 
1689 
1690 void InterpreterMacroAssembler::notify_method_exit(
1691     TosState state, NotifyMethodExitMode mode) {
1692   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1693   // track stack depth.  If it is possible to enter interp_only_mode we add
1694   // the code to check if the event should be sent.
1695   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1696     Label L;
1697     // Note: frame::interpreter_frame_result has a dependency on how the
1698     // method result is saved across the call to post_method_exit. If this
1699     // is changed then the interpreter_frame_result implementation will
1700     // need to be updated too.
1701 
1702     // template interpreter will leave the result on the top of the stack.
1703     push(state);
1704     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1705     cbz(r3, L);
1706     call_VM(noreg,
1707             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1708     bind(L);
1709     pop(state);
1710   }
1711 
1712   {
1713     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1714     push(state);
1715     get_method(c_rarg1);
1716     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1717                  rthread, c_rarg1);
1718     pop(state);
1719   }
1720 }
1721 
1722 
1723 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
1724 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
1725                                                         int increment, Address mask,
1726                                                         Register scratch, Register scratch2,
1727                                                         bool preloaded, Condition cond,
1728                                                         Label* where) {
1729   if (!preloaded) {
1730     ldrw(scratch, counter_addr);
1731   }
1732   add(scratch, scratch, increment);
1733   strw(scratch, counter_addr);
1734   ldrw(scratch2, mask);
1735   ands(scratch, scratch, scratch2);
1736   br(cond, *where);
1737 }
1738 
1739 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
1740                                                   int number_of_arguments) {
1741   // interpreter specific
1742   //
1743   // Note: No need to save/restore rbcp & rlocals pointer since these
1744   //       are callee saved registers and no blocking/ GC can happen
1745   //       in leaf calls.
1746 #ifdef ASSERT
1747   {
1748     Label L;
1749     ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1750     cbz(rscratch1, L);
1751     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
1752          " last_sp != NULL");
1753     bind(L);
1754   }
1755 #endif /* ASSERT */
1756   // super call
1757   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
1758 }
1759 
1760 void InterpreterMacroAssembler::call_VM_base(Register oop_result,
1761                                              Register java_thread,
1762                                              Register last_java_sp,
1763                                              address  entry_point,
1764                                              int      number_of_arguments,
1765                                              bool     check_exceptions) {
1766   // interpreter specific
1767   //
1768   // Note: Could avoid restoring locals ptr (callee saved) - however doesn't
1769   //       really make a difference for these runtime calls, since they are
1770   //       slow anyway. Btw., bcp must be saved/restored since it may change
1771   //       due to GC.
1772   // assert(java_thread == noreg , "not expecting a precomputed java thread");
1773   save_bcp();
1774 #ifdef ASSERT
1775   {
1776     Label L;
1777     ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1778     cbz(rscratch1, L);
1779     stop("InterpreterMacroAssembler::call_VM_base:"
1780          " last_sp != NULL");
1781     bind(L);
1782   }
1783 #endif /* ASSERT */
1784   // super call
1785   MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
1786                                entry_point, number_of_arguments,
1787                      check_exceptions);
1788 // interpreter specific
1789   restore_bcp();
1790   restore_locals();
1791 }
1792 
1793 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
1794   assert_different_registers(obj, rscratch1);
1795   Label update, next, none;
1796 
1797   verify_oop(obj);
1798 
1799   cbnz(obj, update);
1800   orptr(mdo_addr, TypeEntries::null_seen);
1801   b(next);
1802 
1803   bind(update);
1804   load_klass(obj, obj);
1805 
1806   ldr(rscratch1, mdo_addr);
1807   eor(obj, obj, rscratch1);
1808   tst(obj, TypeEntries::type_klass_mask);
1809   br(Assembler::EQ, next); // klass seen before, nothing to
1810                            // do. The unknown bit may have been
1811                            // set already but no need to check.
1812 
1813   tbnz(obj, exact_log2(TypeEntries::type_unknown), next);
1814   // already unknown. Nothing to do anymore.
1815 
1816   ldr(rscratch1, mdo_addr);
1817   cbz(rscratch1, none);
1818   cmp(rscratch1, (u1)TypeEntries::null_seen);
1819   br(Assembler::EQ, none);
1820   // There is a chance that the checks above (re-reading profiling
1821   // data from memory) fail if another thread has just set the
1822   // profiling to this obj's klass
1823   ldr(rscratch1, mdo_addr);
1824   eor(obj, obj, rscratch1);
1825   tst(obj, TypeEntries::type_klass_mask);
1826   br(Assembler::EQ, next);
1827 
1828   // different than before. Cannot keep accurate profile.
1829   orptr(mdo_addr, TypeEntries::type_unknown);
1830   b(next);
1831 
1832   bind(none);
1833   // first time here. Set profile type.
1834   str(obj, mdo_addr);
1835 
1836   bind(next);
1837 }
1838 
1839 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
1840   if (!ProfileInterpreter) {
1841     return;
1842   }
1843 
1844   if (MethodData::profile_arguments() || MethodData::profile_return()) {
1845     Label profile_continue;
1846 
1847     test_method_data_pointer(mdp, profile_continue);
1848 
1849     int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
1850 
1851     ldrb(rscratch1, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start));
1852     cmp(rscratch1, u1(is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag));
1853     br(Assembler::NE, profile_continue);
1854 
1855     if (MethodData::profile_arguments()) {
1856       Label done;
1857       int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
1858 
1859       for (int i = 0; i < TypeProfileArgsLimit; i++) {
1860         if (i > 0 || MethodData::profile_return()) {
1861           // If return value type is profiled we may have no argument to profile
1862           ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1863           sub(tmp, tmp, i*TypeStackSlotEntries::per_arg_count());
1864           cmp(tmp, (u1)TypeStackSlotEntries::per_arg_count());
1865           add(rscratch1, mdp, off_to_args);
1866           br(Assembler::LT, done);
1867         }
1868         ldr(tmp, Address(callee, Method::const_offset()));
1869         load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
1870         // stack offset o (zero based) from the start of the argument
1871         // list, for n arguments translates into offset n - o - 1 from
1872         // the end of the argument list
1873         ldr(rscratch1, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))));
1874         sub(tmp, tmp, rscratch1);
1875         sub(tmp, tmp, 1);
1876         Address arg_addr = argument_address(tmp);
1877         ldr(tmp, arg_addr);
1878 
1879         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i)));
1880         profile_obj_type(tmp, mdo_arg_addr);
1881 
1882         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1883         off_to_args += to_add;
1884       }
1885 
1886       if (MethodData::profile_return()) {
1887         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1888         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1889       }
1890 
1891       add(rscratch1, mdp, off_to_args);
1892       bind(done);
1893       mov(mdp, rscratch1);
1894 
1895       if (MethodData::profile_return()) {
1896         // We're right after the type profile for the last
1897         // argument. tmp is the number of cells left in the
1898         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1899         // if there's a return to profile.
1900         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1901         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1902       }
1903       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1904     } else {
1905       assert(MethodData::profile_return(), "either profile call args or call ret");
1906       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1907     }
1908 
1909     // mdp points right after the end of the
1910     // CallTypeData/VirtualCallTypeData, right after the cells for the
1911     // return value type if there's one
1912 
1913     bind(profile_continue);
1914   }
1915 }
1916 
1917 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1918   assert_different_registers(mdp, ret, tmp, rbcp);
1919   if (ProfileInterpreter && MethodData::profile_return()) {
1920     Label profile_continue, done;
1921 
1922     test_method_data_pointer(mdp, profile_continue);
1923 
1924     if (MethodData::profile_return_jsr292_only()) {
1925       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
1926 
1927       // If we don't profile all invoke bytecodes we must make sure
1928       // it's a bytecode we indeed profile. We can't go back to the
1929       // begining of the ProfileData we intend to update to check its
1930       // type because we're right after it and we don't known its
1931       // length
1932       Label do_profile;
1933       ldrb(rscratch1, Address(rbcp, 0));
1934       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1935       br(Assembler::EQ, do_profile);
1936       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1937       br(Assembler::EQ, do_profile);
1938       get_method(tmp);
1939       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
1940       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1941       br(Assembler::NE, profile_continue);
1942 
1943       bind(do_profile);
1944     }
1945 
1946     Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
1947     mov(tmp, ret);
1948     profile_obj_type(tmp, mdo_ret_addr);
1949 
1950     bind(profile_continue);
1951   }
1952 }
1953 
1954 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1955   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1956   if (ProfileInterpreter && MethodData::profile_parameters()) {
1957     Label profile_continue, done;
1958 
1959     test_method_data_pointer(mdp, profile_continue);
1960 
1961     // Load the offset of the area within the MDO used for
1962     // parameters. If it's negative we're not profiling any parameters
1963     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1964     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1965 
1966     // Compute a pointer to the area for parameters from the offset
1967     // and move the pointer to the slot for the last
1968     // parameters. Collect profiling from last parameter down.
1969     // mdo start + parameters offset + array length - 1
1970     add(mdp, mdp, tmp1);
1971     ldr(tmp1, Address(mdp, ArrayData::array_len_offset()));
1972     sub(tmp1, tmp1, TypeStackSlotEntries::per_arg_count());
1973 
1974     Label loop;
1975     bind(loop);
1976 
1977     int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
1978     int type_base = in_bytes(ParametersTypeData::type_offset(0));
1979     int per_arg_scale = exact_log2(DataLayout::cell_size);
1980     add(rscratch1, mdp, off_base);
1981     add(rscratch2, mdp, type_base);
1982 
1983     Address arg_off(rscratch1, tmp1, Address::lsl(per_arg_scale));
1984     Address arg_type(rscratch2, tmp1, Address::lsl(per_arg_scale));
1985 
1986     // load offset on the stack from the slot for this parameter
1987     ldr(tmp2, arg_off);
1988     neg(tmp2, tmp2);
1989     // read the parameter from the local area
1990     ldr(tmp2, Address(rlocals, tmp2, Address::lsl(Interpreter::logStackElementSize)));
1991 
1992     // profile the parameter
1993     profile_obj_type(tmp2, arg_type);
1994 
1995     // go to next parameter
1996     subs(tmp1, tmp1, TypeStackSlotEntries::per_arg_count());
1997     br(Assembler::GE, loop);
1998 
1999     bind(profile_continue);
2000   }
2001 }