1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_riscv.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "prims/jvmtiExport.hpp"
  40 #include "prims/jvmtiThreadState.hpp"
  41 #include "runtime/basicLock.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/javaThread.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 
  48 void InterpreterMacroAssembler::narrow(Register result) {
  49   // Get method->_constMethod->_result_type
  50   ld(t0, Address(fp, frame::interpreter_frame_method_offset * wordSize));
  51   ld(t0, Address(t0, Method::const_offset()));
  52   lbu(t0, Address(t0, ConstMethod::result_type_offset()));
  53 
  54   Label done, notBool, notByte, notChar;
  55 
  56   // common case first
  57   mv(t1, T_INT);
  58   beq(t0, t1, done);
  59 
  60   // mask integer result to narrower return type.
  61   mv(t1, T_BOOLEAN);
  62   bne(t0, t1, notBool);
  63 
  64   andi(result, result, 0x1);
  65   j(done);
  66 
  67   bind(notBool);
  68   mv(t1, T_BYTE);
  69   bne(t0, t1, notByte);
  70   sign_extend(result, result, 8);
  71   j(done);
  72 
  73   bind(notByte);
  74   mv(t1, T_CHAR);
  75   bne(t0, t1, notChar);
  76   zero_extend(result, result, 16);
  77   j(done);
  78 
  79   bind(notChar);
  80   sign_extend(result, result, 16);
  81 
  82   // Nothing to do for T_INT
  83   bind(done);
  84   addw(result, result, zr);
  85 }
  86 
  87 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  88   assert(entry != NULL, "Entry must have been generated by now");
  89   j(entry);
  90 }
  91 
  92 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
  93   if (JvmtiExport::can_pop_frame()) {
  94     Label L;
  95     // Initiate popframe handling only if it is not already being
  96     // processed. If the flag has the popframe_processing bit set,
  97     // it means that this code is called *during* popframe handling - we
  98     // don't want to reenter.
  99     // This method is only called just after the call into the vm in
 100     // call_VM_base, so the arg registers are available.
 101     lwu(t1, Address(xthread, JavaThread::popframe_condition_offset()));
 102     andi(t0, t1, JavaThread::popframe_pending_bit);
 103     beqz(t0, L);
 104     andi(t0, t1, JavaThread::popframe_processing_bit);
 105     bnez(t0, L);
 106     // Call Interpreter::remove_activation_preserving_args_entry() to get the
 107     // address of the same-named entrypoint in the generated interpreter code.
 108     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
 109     jr(x10);
 110     bind(L);
 111   }
 112 }
 113 
 114 
 115 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
 116   ld(x12, Address(xthread, JavaThread::jvmti_thread_state_offset()));
 117   const Address tos_addr(x12, JvmtiThreadState::earlyret_tos_offset());
 118   const Address oop_addr(x12, JvmtiThreadState::earlyret_oop_offset());
 119   const Address val_addr(x12, JvmtiThreadState::earlyret_value_offset());
 120   switch (state) {
 121     case atos:
 122       ld(x10, oop_addr);
 123       sd(zr, oop_addr);
 124       verify_oop(x10);
 125       break;
 126     case ltos:
 127       ld(x10, val_addr);
 128       break;
 129     case btos:  // fall through
 130     case ztos:  // fall through
 131     case ctos:  // fall through
 132     case stos:  // fall through
 133     case itos:
 134       lwu(x10, val_addr);
 135       break;
 136     case ftos:
 137       flw(f10, val_addr);
 138       break;
 139     case dtos:
 140       fld(f10, val_addr);
 141       break;
 142     case vtos:
 143       /* nothing to do */
 144       break;
 145     default:
 146       ShouldNotReachHere();
 147   }
 148   // Clean up tos value in the thread object
 149   mv(t0, (int)ilgl);
 150   sw(t0, tos_addr);
 151   sw(zr, val_addr);
 152 }
 153 
 154 
 155 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
 156   if (JvmtiExport::can_force_early_return()) {
 157     Label L;
 158     ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset()));
 159     beqz(t0, L);  // if [thread->jvmti_thread_state() == NULL] then exit
 160 
 161     // Initiate earlyret handling only if it is not already being processed.
 162     // If the flag has the earlyret_processing bit set, it means that this code
 163     // is called *during* earlyret handling - we don't want to reenter.
 164     lwu(t0, Address(t0, JvmtiThreadState::earlyret_state_offset()));
 165     mv(t1, JvmtiThreadState::earlyret_pending);
 166     bne(t0, t1, L);
 167 
 168     // Call Interpreter::remove_activation_early_entry() to get the address of the
 169     // same-named entrypoint in the generated interpreter code.
 170     ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset()));
 171     lwu(t0, Address(t0, JvmtiThreadState::earlyret_tos_offset()));
 172     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), t0);
 173     jr(x10);
 174     bind(L);
 175   }
 176 }
 177 
 178 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
 179   assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
 180   lhu(reg, Address(xbcp, bcp_offset));
 181   revb_h(reg, reg);
 182 }
 183 
 184 void InterpreterMacroAssembler::get_dispatch() {
 185   ExternalAddress target((address)Interpreter::dispatch_table());
 186   relocate(target.rspec(), [&] {
 187     int32_t offset;
 188     la_patchable(xdispatch, target, offset);
 189     addi(xdispatch, xdispatch, offset);
 190   });
 191 }
 192 
 193 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
 194                                                        int bcp_offset,
 195                                                        size_t index_size) {
 196   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 197   if (index_size == sizeof(u2)) {
 198     load_unsigned_short(index, Address(xbcp, bcp_offset));
 199   } else if (index_size == sizeof(u4)) {
 200     lwu(index, Address(xbcp, bcp_offset));
 201     // Check if the secondary index definition is still ~x, otherwise
 202     // we have to change the following assembler code to calculate the
 203     // plain index.
 204     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
 205     xori(index, index, -1);
 206     addw(index, index, zr);
 207   } else if (index_size == sizeof(u1)) {
 208     load_unsigned_byte(index, Address(xbcp, bcp_offset));
 209   } else {
 210     ShouldNotReachHere();
 211   }
 212 }
 213 
 214 // Return
 215 // Rindex: index into constant pool
 216 // Rcache: address of cache entry - ConstantPoolCache::base_offset()
 217 //
 218 // A caller must add ConstantPoolCache::base_offset() to Rcache to get
 219 // the true address of the cache entry.
 220 //
 221 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
 222                                                            Register index,
 223                                                            int bcp_offset,
 224                                                            size_t index_size) {
 225   assert_different_registers(cache, index);
 226   assert_different_registers(cache, xcpool);
 227   get_cache_index_at_bcp(index, bcp_offset, index_size);
 228   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 229   // Convert from field index to ConstantPoolCacheEntry
 230   // riscv already has the cache in xcpool so there is no need to
 231   // install it in cache. Instead we pre-add the indexed offset to
 232   // xcpool and return it in cache. All clients of this method need to
 233   // be modified accordingly.
 234   shadd(cache, index, xcpool, cache, 5);
 235 }
 236 
 237 
 238 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
 239                                                                         Register index,
 240                                                                         Register bytecode,
 241                                                                         int byte_no,
 242                                                                         int bcp_offset,
 243                                                                         size_t index_size) {
 244   get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
 245   // We use a 32-bit load here since the layout of 64-bit words on
 246   // little-endian machines allow us that.
 247   // n.b. unlike x86 cache already includes the index offset
 248   la(bytecode, Address(cache,
 249                        ConstantPoolCache::base_offset() +
 250                        ConstantPoolCacheEntry::indices_offset()));
 251   membar(MacroAssembler::AnyAny);
 252   lwu(bytecode, bytecode);
 253   membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
 254   const int shift_count = (1 + byte_no) * BitsPerByte;
 255   slli(bytecode, bytecode, XLEN - (shift_count + BitsPerByte));
 256   srli(bytecode, bytecode, XLEN - BitsPerByte);
 257 }
 258 
 259 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
 260                                                                Register tmp,
 261                                                                int bcp_offset,
 262                                                                size_t index_size) {
 263   assert_different_registers(cache, tmp);
 264   get_cache_index_at_bcp(tmp, bcp_offset, index_size);
 265   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 266   // Convert from field index to ConstantPoolCacheEntry index
 267   // and from word offset to byte offset
 268   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord,
 269          "else change next line");
 270   ld(cache, Address(fp, frame::interpreter_frame_cache_offset * wordSize));
 271   // skip past the header
 272   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 273   // construct pointer to cache entry
 274   shadd(cache, tmp, cache, tmp, 2 + LogBytesPerWord);
 275 }
 276 
 277 // Load object from cpool->resolved_references(index)
 278 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 279                                 Register result, Register index, Register tmp) {
 280   assert_different_registers(result, index);
 281 
 282   get_constant_pool(result);
 283   // Load pointer for resolved_references[] objArray
 284   ld(result, Address(result, ConstantPool::cache_offset_in_bytes()));
 285   ld(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
 286   resolve_oop_handle(result, tmp, t1);
 287   // Add in the index
 288   addi(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 289   shadd(result, index, result, index, LogBytesPerHeapOop);
 290   load_heap_oop(result, Address(result, 0), tmp, t1);
 291 }
 292 
 293 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 294                                 Register cpool, Register index, Register klass, Register temp) {
 295   shadd(temp, index, cpool, temp, LogBytesPerWord);
 296   lhu(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 297   ld(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses
 298   shadd(klass, temp, klass, temp, LogBytesPerWord);
 299   ld(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
 300 }
 301 
 302 void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
 303                                                               Register method,
 304                                                               Register cache) {
 305   const int method_offset = in_bytes(
 306     ConstantPoolCache::base_offset() +
 307       ((byte_no == TemplateTable::f2_byte)
 308        ? ConstantPoolCacheEntry::f2_offset()
 309        : ConstantPoolCacheEntry::f1_offset()));
 310 
 311   ld(method, Address(cache, method_offset)); // get f1 Method*
 312 }
 313 
 314 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 315 // subtype of super_klass.
 316 //
 317 // Args:
 318 //      x10: superklass
 319 //      Rsub_klass: subklass
 320 //
 321 // Kills:
 322 //      x12, x15
 323 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 324                                                   Label& ok_is_subtype) {
 325   assert(Rsub_klass != x10, "x10 holds superklass");
 326   assert(Rsub_klass != x12, "x12 holds 2ndary super array length");
 327   assert(Rsub_klass != x15, "x15 holds 2ndary super array scan ptr");
 328 
 329   // Profile the not-null value's klass.
 330   profile_typecheck(x12, Rsub_klass, x15); // blows x12, reloads x15
 331 
 332   // Do the check.
 333   check_klass_subtype(Rsub_klass, x10, x12, ok_is_subtype); // blows x12
 334 
 335   // Profile the failure of the check.
 336   profile_typecheck_failed(x12); // blows x12
 337 }
 338 
 339 // Java Expression Stack
 340 
 341 void InterpreterMacroAssembler::pop_ptr(Register r) {
 342   ld(r, Address(esp, 0));
 343   addi(esp, esp, wordSize);
 344 }
 345 
 346 void InterpreterMacroAssembler::pop_i(Register r) {
 347   lw(r, Address(esp, 0)); // lw do signed extended
 348   addi(esp, esp, wordSize);
 349 }
 350 
 351 void InterpreterMacroAssembler::pop_l(Register r) {
 352   ld(r, Address(esp, 0));
 353   addi(esp, esp, 2 * Interpreter::stackElementSize);
 354 }
 355 
 356 void InterpreterMacroAssembler::push_ptr(Register r) {
 357   addi(esp, esp, -wordSize);
 358   sd(r, Address(esp, 0));
 359 }
 360 
 361 void InterpreterMacroAssembler::push_i(Register r) {
 362   addi(esp, esp, -wordSize);
 363   addw(r, r, zr); // signed extended
 364   sd(r, Address(esp, 0));
 365 }
 366 
 367 void InterpreterMacroAssembler::push_l(Register r) {
 368   addi(esp, esp, -2 * wordSize);
 369   sd(zr, Address(esp, wordSize));
 370   sd(r, Address(esp));
 371 }
 372 
 373 void InterpreterMacroAssembler::pop_f(FloatRegister r) {
 374   flw(r, Address(esp, 0));
 375   addi(esp, esp, wordSize);
 376 }
 377 
 378 void InterpreterMacroAssembler::pop_d(FloatRegister r) {
 379   fld(r, Address(esp, 0));
 380   addi(esp, esp, 2 * Interpreter::stackElementSize);
 381 }
 382 
 383 void InterpreterMacroAssembler::push_f(FloatRegister r) {
 384   addi(esp, esp, -wordSize);
 385   fsw(r, Address(esp, 0));
 386 }
 387 
 388 void InterpreterMacroAssembler::push_d(FloatRegister r) {
 389   addi(esp, esp, -2 * wordSize);
 390   fsd(r, Address(esp, 0));
 391 }
 392 
 393 void InterpreterMacroAssembler::pop(TosState state) {
 394   switch (state) {
 395     case atos:
 396       pop_ptr();
 397       verify_oop(x10);
 398       break;
 399     case btos:  // fall through
 400     case ztos:  // fall through
 401     case ctos:  // fall through
 402     case stos:  // fall through
 403     case itos:
 404       pop_i();
 405       break;
 406     case ltos:
 407       pop_l();
 408       break;
 409     case ftos:
 410       pop_f();
 411       break;
 412     case dtos:
 413       pop_d();
 414       break;
 415     case vtos:
 416       /* nothing to do */
 417       break;
 418     default:
 419       ShouldNotReachHere();
 420   }
 421 }
 422 
 423 void InterpreterMacroAssembler::push(TosState state) {
 424   switch (state) {
 425     case atos:
 426       verify_oop(x10);
 427       push_ptr();
 428       break;
 429     case btos:  // fall through
 430     case ztos:  // fall through
 431     case ctos:  // fall through
 432     case stos:  // fall through
 433     case itos:
 434       push_i();
 435       break;
 436     case ltos:
 437       push_l();
 438       break;
 439     case ftos:
 440       push_f();
 441       break;
 442     case dtos:
 443       push_d();
 444       break;
 445     case vtos:
 446       /* nothing to do */
 447       break;
 448     default:
 449       ShouldNotReachHere();
 450   }
 451 }
 452 
 453 // Helpers for swap and dup
 454 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
 455   ld(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
 456 }
 457 
 458 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
 459   sd(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
 460 }
 461 
 462 void InterpreterMacroAssembler::load_float(Address src) {
 463   flw(f10, src);
 464 }
 465 
 466 void InterpreterMacroAssembler::load_double(Address src) {
 467   fld(f10, src);
 468 }
 469 
 470 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
 471   // set sender sp
 472   mv(x19_sender_sp, sp);
 473   // record last_sp
 474   sd(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
 475 }
 476 
 477 // Jump to from_interpreted entry of a call unless single stepping is possible
 478 // in this thread in which case we must call the i2i entry
 479 void InterpreterMacroAssembler::jump_from_interpreted(Register method) {
 480   prepare_to_jump_from_interpreted();
 481   if (JvmtiExport::can_post_interpreter_events()) {
 482     Label run_compiled_code;
 483     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 484     // compiled code in threads for which the event is enabled.  Check here for
 485     // interp_only_mode if these events CAN be enabled.
 486     lwu(t0, Address(xthread, JavaThread::interp_only_mode_offset()));
 487     beqz(t0, run_compiled_code);
 488     ld(t0, Address(method, Method::interpreter_entry_offset()));
 489     jr(t0);
 490     bind(run_compiled_code);
 491   }
 492 
 493   ld(t0, Address(method, Method::from_interpreted_offset()));
 494   jr(t0);
 495 }
 496 
 497 // The following two routines provide a hook so that an implementation
 498 // can schedule the dispatch in two parts.  amd64 does not do this.
 499 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
 500 }
 501 
 502 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
 503   dispatch_next(state, step);
 504 }
 505 
 506 void InterpreterMacroAssembler::dispatch_base(TosState state,
 507                                               address* table,
 508                                               bool verifyoop,
 509                                               bool generate_poll,
 510                                               Register Rs) {
 511   // Pay attention to the argument Rs, which is acquiesce in t0.
 512   if (VerifyActivationFrameSize) {
 513     Unimplemented();
 514   }
 515   if (verifyoop && state == atos) {
 516     verify_oop(x10);
 517   }
 518 
 519   Label safepoint;
 520   address* const safepoint_table = Interpreter::safept_table(state);
 521   bool needs_thread_local_poll = generate_poll && table != safepoint_table;
 522 
 523   if (needs_thread_local_poll) {
 524     NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
 525     ld(t1, Address(xthread, JavaThread::polling_word_offset()));
 526     andi(t1, t1, SafepointMechanism::poll_bit());
 527     bnez(t1, safepoint);
 528   }
 529   if (table == Interpreter::dispatch_table(state)) {
 530     mv(t1, Interpreter::distance_from_dispatch_table(state));
 531     add(t1, Rs, t1);
 532     shadd(t1, t1, xdispatch, t1, 3);
 533   } else {
 534     mv(t1, (address)table);
 535     shadd(t1, Rs, t1, Rs, 3);
 536   }
 537   ld(t1, Address(t1));
 538   jr(t1);
 539 
 540   if (needs_thread_local_poll) {
 541     bind(safepoint);
 542     la(t1, ExternalAddress((address)safepoint_table));
 543     shadd(t1, Rs, t1, Rs, 3);
 544     ld(t1, Address(t1));
 545     jr(t1);
 546   }
 547 }
 548 
 549 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll, Register Rs) {
 550   dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll, Rs);
 551 }
 552 
 553 void InterpreterMacroAssembler::dispatch_only_normal(TosState state, Register Rs) {
 554   dispatch_base(state, Interpreter::normal_table(state), true, false, Rs);
 555 }
 556 
 557 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state, Register Rs) {
 558   dispatch_base(state, Interpreter::normal_table(state), false, false, Rs);
 559 }
 560 
 561 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
 562   // load next bytecode
 563   load_unsigned_byte(t0, Address(xbcp, step));
 564   add(xbcp, xbcp, step);
 565   dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
 566 }
 567 
 568 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
 569   // load current bytecode
 570   lbu(t0, Address(xbcp, 0));
 571   dispatch_base(state, table);
 572 }
 573 
 574 // remove activation
 575 //
 576 // Apply stack watermark barrier.
 577 // Unlock the receiver if this is a synchronized method.
 578 // Unlock any Java monitors from synchronized blocks.
 579 // Remove the activation from the stack.
 580 //
 581 // If there are locked Java monitors
 582 //    If throw_monitor_exception
 583 //       throws IllegalMonitorStateException
 584 //    Else if install_monitor_exception
 585 //       installs IllegalMonitorStateException
 586 //    Else
 587 //       no error processing
 588 void InterpreterMacroAssembler::remove_activation(
 589                                 TosState state,
 590                                 bool throw_monitor_exception,
 591                                 bool install_monitor_exception,
 592                                 bool notify_jvmdi) {
 593   // Note: Registers x13 may be in use for the
 594   // result check if synchronized method
 595   Label unlocked, unlock, no_unlock;
 596 
 597   // The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
 598   // that would normally not be safe to use. Such bad returns into unsafe territory of
 599   // the stack, will call InterpreterRuntime::at_unwind.
 600   Label slow_path;
 601   Label fast_path;
 602   safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
 603   j(fast_path);
 604 
 605   bind(slow_path);
 606   push(state);
 607   set_last_Java_frame(esp, fp, (address)pc(), t0);
 608   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), xthread);
 609   reset_last_Java_frame(true);
 610   pop(state);
 611 
 612   bind(fast_path);
 613 
 614   // get the value of _do_not_unlock_if_synchronized into x13
 615   const Address do_not_unlock_if_synchronized(xthread,
 616     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 617   lbu(x13, do_not_unlock_if_synchronized);
 618   sb(zr, do_not_unlock_if_synchronized); // reset the flag
 619 
 620   // get method access flags
 621   ld(x11, Address(fp, frame::interpreter_frame_method_offset * wordSize));
 622   ld(x12, Address(x11, Method::access_flags_offset()));
 623   andi(t0, x12, JVM_ACC_SYNCHRONIZED);
 624   beqz(t0, unlocked);
 625 
 626   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 627   // is set.
 628   bnez(x13, no_unlock);
 629 
 630   // unlock monitor
 631   push(state); // save result
 632 
 633   // BasicObjectLock will be first in list, since this is a
 634   // synchronized method. However, need to check that the object has
 635   // not been unlocked by an explicit monitorexit bytecode.
 636   const Address monitor(fp, frame::interpreter_frame_initial_sp_offset *
 637                         wordSize - (int) sizeof(BasicObjectLock));
 638   // We use c_rarg1 so that if we go slow path it will be the correct
 639   // register for unlock_object to pass to VM directly
 640   la(c_rarg1, monitor); // address of first monitor
 641 
 642   ld(x10, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
 643   bnez(x10, unlock);
 644 
 645   pop(state);
 646   if (throw_monitor_exception) {
 647     // Entry already unlocked, need to throw exception
 648     call_VM(noreg, CAST_FROM_FN_PTR(address,
 649                                     InterpreterRuntime::throw_illegal_monitor_state_exception));
 650     should_not_reach_here();
 651   } else {
 652     // Monitor already unlocked during a stack unroll. If requested,
 653     // install an illegal_monitor_state_exception.  Continue with
 654     // stack unrolling.
 655     if (install_monitor_exception) {
 656       call_VM(noreg, CAST_FROM_FN_PTR(address,
 657                                       InterpreterRuntime::new_illegal_monitor_state_exception));
 658     }
 659     j(unlocked);
 660   }
 661 
 662   bind(unlock);
 663   unlock_object(c_rarg1);
 664   pop(state);
 665 
 666   // Check that for block-structured locking (i.e., that all locked
 667   // objects has been unlocked)
 668   bind(unlocked);
 669 
 670   // x10: Might contain return value
 671 
 672   // Check that all monitors are unlocked
 673   {
 674     Label loop, exception, entry, restart;
 675     const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 676     const Address monitor_block_top(
 677       fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 678     const Address monitor_block_bot(
 679       fp, frame::interpreter_frame_initial_sp_offset * wordSize);
 680 
 681     bind(restart);
 682     // We use c_rarg1 so that if we go slow path it will be the correct
 683     // register for unlock_object to pass to VM directly
 684     ld(c_rarg1, monitor_block_top); // points to current entry, starting
 685                                      // with top-most entry
 686     la(x9, monitor_block_bot);  // points to word before bottom of
 687                                   // monitor block
 688 
 689     j(entry);
 690 
 691     // Entry already locked, need to throw exception
 692     bind(exception);
 693 
 694     if (throw_monitor_exception) {
 695       // Throw exception
 696       MacroAssembler::call_VM(noreg,
 697                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
 698                                                throw_illegal_monitor_state_exception));
 699 
 700       should_not_reach_here();
 701     } else {
 702       // Stack unrolling. Unlock object and install illegal_monitor_exception.
 703       // Unlock does not block, so don't have to worry about the frame.
 704       // We don't have to preserve c_rarg1 since we are going to throw an exception.
 705 
 706       push(state);
 707       unlock_object(c_rarg1);
 708       pop(state);
 709 
 710       if (install_monitor_exception) {
 711         call_VM(noreg, CAST_FROM_FN_PTR(address,
 712                                         InterpreterRuntime::
 713                                         new_illegal_monitor_state_exception));
 714       }
 715 
 716       j(restart);
 717     }
 718 
 719     bind(loop);
 720     // check if current entry is used
 721     add(t0, c_rarg1, BasicObjectLock::obj_offset_in_bytes());
 722     ld(t0, Address(t0, 0));
 723     bnez(t0, exception);
 724 
 725     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 726     bind(entry);
 727     bne(c_rarg1, x9, loop); // check if bottom reached if not at bottom then check this entry
 728   }
 729 
 730   bind(no_unlock);
 731 
 732   // jvmti support
 733   if (notify_jvmdi) {
 734     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 735 
 736   } else {
 737     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 738   }
 739 
 740   // remove activation
 741   // get sender esp
 742   ld(t1,
 743      Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize));
 744   if (StackReservedPages > 0) {
 745     // testing if reserved zone needs to be re-enabled
 746     Label no_reserved_zone_enabling;
 747 
 748     ld(t0, Address(xthread, JavaThread::reserved_stack_activation_offset()));
 749     ble(t1, t0, no_reserved_zone_enabling);
 750 
 751     call_VM_leaf(
 752       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), xthread);
 753     call_VM(noreg, CAST_FROM_FN_PTR(address,
 754                                     InterpreterRuntime::throw_delayed_StackOverflowError));
 755     should_not_reach_here();
 756 
 757     bind(no_reserved_zone_enabling);
 758   }
 759 
 760   // restore sender esp
 761   mv(esp, t1);
 762 
 763   // remove frame anchor
 764   leave();
 765   // If we're returning to interpreted code we will shortly be
 766   // adjusting SP to allow some space for ESP.  If we're returning to
 767   // compiled code the saved sender SP was saved in sender_sp, so this
 768   // restores it.
 769   andi(sp, esp, -16);
 770 }
 771 
 772 // Lock object
 773 //
 774 // Args:
 775 //      c_rarg1: BasicObjectLock to be used for locking
 776 //
 777 // Kills:
 778 //      x10
 779 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 780 //      t0, t1 (temp regs)
 781 void InterpreterMacroAssembler::lock_object(Register lock_reg)
 782 {
 783   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
 784   if (UseHeavyMonitors) {
 785     call_VM(noreg,
 786             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 787             lock_reg);
 788   } else {
 789     Label count, done;
 790 
 791     const Register swap_reg = x10;
 792     const Register tmp = c_rarg2;
 793     const Register obj_reg = c_rarg3; // Will contain the oop
 794 
 795     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 796     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 797     const int mark_offset = lock_offset +
 798                             BasicLock::displaced_header_offset_in_bytes();
 799 
 800     Label slow_case;
 801 
 802     // Load object pointer into obj_reg c_rarg3
 803     ld(obj_reg, Address(lock_reg, obj_offset));
 804 
 805     if (DiagnoseSyncOnValueBasedClasses != 0) {
 806       load_klass(tmp, obj_reg);
 807       lwu(tmp, Address(tmp, Klass::access_flags_offset()));
 808       andi(tmp, tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 809       bnez(tmp, slow_case);
 810     }
 811 
 812     if (UseFastLocking) {
 813       ld(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 814       fast_lock(obj_reg, tmp, t0, t1, slow_case);
 815       j(count);
 816     } else {
 817       // Load (object->mark() | 1) into swap_reg
 818       ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 819       ori(swap_reg, t0, 1);
 820 
 821       // Save (object->mark() | 1) into BasicLock's displaced header
 822       sd(swap_reg, Address(lock_reg, mark_offset));
 823 
 824       assert(lock_offset == 0,
 825              "displached header must be first word in BasicObjectLock");
 826 
 827       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, t0, count, /*fallthrough*/NULL);
 828 
 829       // Test if the oopMark is an obvious stack pointer, i.e.,
 830       //  1) (mark & 7) == 0, and
 831       //  2) sp <= mark < mark + os::pagesize()
 832       //
 833       // These 3 tests can be done by evaluating the following
 834       // expression: ((mark - sp) & (7 - os::vm_page_size())),
 835       // assuming both stack pointer and pagesize have their
 836       // least significant 3 bits clear.
 837       // NOTE: the oopMark is in swap_reg x10 as the result of cmpxchg
 838       sub(swap_reg, swap_reg, sp);
 839       mv(t0, (int64_t)(7 - (int)os::vm_page_size()));
 840       andr(swap_reg, swap_reg, t0);
 841 
 842       // Save the test result, for recursive case, the result is zero
 843       sd(swap_reg, Address(lock_reg, mark_offset));
 844       beqz(swap_reg, count);
 845     }
 846 
 847     bind(slow_case);
 848 
 849     // Call the runtime routine for slow case
 850     call_VM(noreg,
 851             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 852             UseFastLocking ? obj_reg : lock_reg);
 853 
 854     j(done);
 855 
 856     bind(count);
 857     increment(Address(xthread, JavaThread::held_monitor_count_offset()));
 858 
 859     bind(done);
 860   }
 861 }
 862 
 863 
 864 // Unlocks an object. Used in monitorexit bytecode and
 865 // remove_activation.  Throws an IllegalMonitorException if object is
 866 // not locked by current thread.
 867 //
 868 // Args:
 869 //      c_rarg1: BasicObjectLock for lock
 870 //
 871 // Kills:
 872 //      x10
 873 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
 874 //      t0, t1 (temp regs)
 875 void InterpreterMacroAssembler::unlock_object(Register lock_reg)
 876 {
 877   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
 878 
 879   if (UseHeavyMonitors) {
 880     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 881   } else {
 882     Label count, done;
 883 
 884     const Register swap_reg   = x10;
 885     const Register header_reg = c_rarg2;  // Will contain the old oopMark
 886     const Register obj_reg    = c_rarg3;  // Will contain the oop
 887 
 888     save_bcp(); // Save in case of exception
 889 
 890     if (UseFastLocking) {
 891       Label slow_case;
 892       // Load oop into obj_reg(c_rarg3)
 893       ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 894 
 895       // Free entry
 896       sd(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 897 
 898       // Check for non-symmetric locking. This is allowed by the spec and the interpreter
 899       // must handle it.
 900       Register tmp = header_reg;
 901       ld(tmp, Address(xthread, JavaThread::lock_stack_current_offset()));
 902       ld(tmp, Address(tmp, -oopSize));
 903       bne(tmp, obj_reg, slow_case);
 904 
 905       ld(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 906       fast_unlock(obj_reg, header_reg, swap_reg, t0, slow_case);
 907       j(count);
 908 
 909       bind(slow_case);
 910     } else {
 911       // Convert from BasicObjectLock structure to object and BasicLock
 912       // structure Store the BasicLock address into x10
 913       la(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
 914 
 915       // Load oop into obj_reg(c_rarg3)
 916       ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 917 
 918       // Free entry
 919       sd(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
 920 
 921       // Load the old header from BasicLock structure
 922       ld(header_reg, Address(swap_reg,
 923                              BasicLock::displaced_header_offset_in_bytes()));
 924 
 925       // Test for recursion
 926       beqz(header_reg, count);
 927 
 928       // Atomic swap back the old header
 929       cmpxchg_obj_header(swap_reg, header_reg, obj_reg, t0, count, /*fallthrough*/NULL);
 930     }
 931 
 932     // Call the runtime routine for slow case.
 933     sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
 934     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
 935 
 936     j(done);
 937 
 938     bind(count);
 939     decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
 940 
 941     bind(done);
 942 
 943     restore_bcp();
 944   }
 945 }
 946 
 947 
 948 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
 949                                                          Label& zero_continue) {
 950   assert(ProfileInterpreter, "must be profiling interpreter");
 951   ld(mdp, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
 952   beqz(mdp, zero_continue);
 953 }
 954 
 955 // Set the method data pointer for the current bcp.
 956 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
 957   assert(ProfileInterpreter, "must be profiling interpreter");
 958   Label set_mdp;
 959   push_reg(RegSet::of(x10, x11), sp); // save x10, x11
 960 
 961   // Test MDO to avoid the call if it is NULL.
 962   ld(x10, Address(xmethod, in_bytes(Method::method_data_offset())));
 963   beqz(x10, set_mdp);
 964   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), xmethod, xbcp);
 965   // x10: mdi
 966   // mdo is guaranteed to be non-zero here, we checked for it before the call.
 967   ld(x11, Address(xmethod, in_bytes(Method::method_data_offset())));
 968   la(x11, Address(x11, in_bytes(MethodData::data_offset())));
 969   add(x10, x11, x10);
 970   sd(x10, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
 971   bind(set_mdp);
 972   pop_reg(RegSet::of(x10, x11), sp);
 973 }
 974 
 975 void InterpreterMacroAssembler::verify_method_data_pointer() {
 976   assert(ProfileInterpreter, "must be profiling interpreter");
 977 #ifdef ASSERT
 978   Label verify_continue;
 979   add(sp, sp, -4 * wordSize);
 980   sd(x10, Address(sp, 0));
 981   sd(x11, Address(sp, wordSize));
 982   sd(x12, Address(sp, 2 * wordSize));
 983   sd(x13, Address(sp, 3 * wordSize));
 984   test_method_data_pointer(x13, verify_continue); // If mdp is zero, continue
 985   get_method(x11);
 986 
 987   // If the mdp is valid, it will point to a DataLayout header which is
 988   // consistent with the bcp.  The converse is highly probable also.
 989   lh(x12, Address(x13, in_bytes(DataLayout::bci_offset())));
 990   ld(t0, Address(x11, Method::const_offset()));
 991   add(x12, x12, t0);
 992   la(x12, Address(x12, ConstMethod::codes_offset()));
 993   beq(x12, xbcp, verify_continue);
 994   // x10: method
 995   // xbcp: bcp // xbcp == 22
 996   // x13: mdp
 997   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
 998                x11, xbcp, x13);
 999   bind(verify_continue);
1000   ld(x10, Address(sp, 0));
1001   ld(x11, Address(sp, wordSize));
1002   ld(x12, Address(sp, 2 * wordSize));
1003   ld(x13, Address(sp, 3 * wordSize));
1004   add(sp, sp, 4 * wordSize);
1005 #endif // ASSERT
1006 }
1007 
1008 
1009 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
1010                                                 int constant,
1011                                                 Register value) {
1012   assert(ProfileInterpreter, "must be profiling interpreter");
1013   Address data(mdp_in, constant);
1014   sd(value, data);
1015 }
1016 
1017 
1018 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1019                                                       int constant,
1020                                                       bool decrement) {
1021   increment_mdp_data_at(mdp_in, noreg, constant, decrement);
1022 }
1023 
1024 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1025                                                       Register reg,
1026                                                       int constant,
1027                                                       bool decrement) {
1028   assert(ProfileInterpreter, "must be profiling interpreter");
1029   // %%% this does 64bit counters at best it is wasting space
1030   // at worst it is a rare bug when counters overflow
1031 
1032   assert_different_registers(t1, t0, mdp_in, reg);
1033 
1034   Address addr1(mdp_in, constant);
1035   Address addr2(t1, 0);
1036   Address &addr = addr1;
1037   if (reg != noreg) {
1038     la(t1, addr1);
1039     add(t1, t1, reg);
1040     addr = addr2;
1041   }
1042 
1043   if (decrement) {
1044     ld(t0, addr);
1045     addi(t0, t0, -DataLayout::counter_increment);
1046     Label L;
1047     bltz(t0, L);      // skip store if counter underflow
1048     sd(t0, addr);
1049     bind(L);
1050   } else {
1051     assert(DataLayout::counter_increment == 1,
1052            "flow-free idiom only works with 1");
1053     ld(t0, addr);
1054     addi(t0, t0, DataLayout::counter_increment);
1055     Label L;
1056     blez(t0, L);       // skip store if counter overflow
1057     sd(t0, addr);
1058     bind(L);
1059   }
1060 }
1061 
1062 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
1063                                                 int flag_byte_constant) {
1064   assert(ProfileInterpreter, "must be profiling interpreter");
1065   int flags_offset = in_bytes(DataLayout::flags_offset());
1066   // Set the flag
1067   lbu(t1, Address(mdp_in, flags_offset));
1068   ori(t1, t1, flag_byte_constant);
1069   sb(t1, Address(mdp_in, flags_offset));
1070 }
1071 
1072 
1073 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1074                                                  int offset,
1075                                                  Register value,
1076                                                  Register test_value_out,
1077                                                  Label& not_equal_continue) {
1078   assert(ProfileInterpreter, "must be profiling interpreter");
1079   if (test_value_out == noreg) {
1080     ld(t1, Address(mdp_in, offset));
1081     bne(value, t1, not_equal_continue);
1082   } else {
1083     // Put the test value into a register, so caller can use it:
1084     ld(test_value_out, Address(mdp_in, offset));
1085     bne(value, test_value_out, not_equal_continue);
1086   }
1087 }
1088 
1089 
1090 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1091                                                      int offset_of_disp) {
1092   assert(ProfileInterpreter, "must be profiling interpreter");
1093   ld(t1, Address(mdp_in, offset_of_disp));
1094   add(mdp_in, mdp_in, t1);
1095   sd(mdp_in, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
1096 }
1097 
1098 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1099                                                      Register reg,
1100                                                      int offset_of_disp) {
1101   assert(ProfileInterpreter, "must be profiling interpreter");
1102   add(t1, mdp_in, reg);
1103   ld(t1, Address(t1, offset_of_disp));
1104   add(mdp_in, mdp_in, t1);
1105   sd(mdp_in, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
1106 }
1107 
1108 
1109 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
1110                                                        int constant) {
1111   assert(ProfileInterpreter, "must be profiling interpreter");
1112   addi(mdp_in, mdp_in, (unsigned)constant);
1113   sd(mdp_in, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
1114 }
1115 
1116 
1117 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1118   assert(ProfileInterpreter, "must be profiling interpreter");
1119 
1120   // save/restore across call_VM
1121   addi(sp, sp, -2 * wordSize);
1122   sd(zr, Address(sp, 0));
1123   sd(return_bci, Address(sp, wordSize));
1124   call_VM(noreg,
1125           CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
1126           return_bci);
1127   ld(zr, Address(sp, 0));
1128   ld(return_bci, Address(sp, wordSize));
1129   addi(sp, sp, 2 * wordSize);
1130 }
1131 
1132 void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
1133                                                      Register bumped_count) {
1134   if (ProfileInterpreter) {
1135     Label profile_continue;
1136 
1137     // If no method data exists, go to profile_continue.
1138     // Otherwise, assign to mdp
1139     test_method_data_pointer(mdp, profile_continue);
1140 
1141     // We are taking a branch.  Increment the taken count.
1142     Address data(mdp, in_bytes(JumpData::taken_offset()));
1143     ld(bumped_count, data);
1144     assert(DataLayout::counter_increment == 1,
1145             "flow-free idiom only works with 1");
1146     addi(bumped_count, bumped_count, DataLayout::counter_increment);
1147     Label L;
1148     // eg: bumped_count=0x7fff ffff ffff ffff  + 1 < 0. so we use <= 0;
1149     blez(bumped_count, L);       // skip store if counter overflow,
1150     sd(bumped_count, data);
1151     bind(L);
1152     // The method data pointer needs to be updated to reflect the new target.
1153     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1154     bind(profile_continue);
1155   }
1156 }
1157 
1158 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1159   if (ProfileInterpreter) {
1160     Label profile_continue;
1161 
1162     // If no method data exists, go to profile_continue.
1163     test_method_data_pointer(mdp, profile_continue);
1164 
1165     // We are taking a branch.  Increment the not taken count.
1166     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1167 
1168     // The method data pointer needs to be updated to correspond to
1169     // the next bytecode
1170     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1171     bind(profile_continue);
1172   }
1173 }
1174 
1175 void InterpreterMacroAssembler::profile_call(Register mdp) {
1176   if (ProfileInterpreter) {
1177     Label profile_continue;
1178 
1179     // If no method data exists, go to profile_continue.
1180     test_method_data_pointer(mdp, profile_continue);
1181 
1182     // We are making a call.  Increment the count.
1183     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1184 
1185     // The method data pointer needs to be updated to reflect the new target.
1186     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1187     bind(profile_continue);
1188   }
1189 }
1190 
1191 void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1192   if (ProfileInterpreter) {
1193     Label profile_continue;
1194 
1195     // If no method data exists, go to profile_continue.
1196     test_method_data_pointer(mdp, profile_continue);
1197 
1198     // We are making a call.  Increment the count.
1199     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1200 
1201     // The method data pointer needs to be updated to reflect the new target.
1202     update_mdp_by_constant(mdp,
1203                            in_bytes(VirtualCallData::
1204                                     virtual_call_data_size()));
1205     bind(profile_continue);
1206   }
1207 }
1208 
1209 
1210 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1211                                                      Register mdp,
1212                                                      Register reg2,
1213                                                      bool receiver_can_be_null) {
1214   if (ProfileInterpreter) {
1215     Label profile_continue;
1216 
1217     // If no method data exists, go to profile_continue.
1218     test_method_data_pointer(mdp, profile_continue);
1219 
1220     Label skip_receiver_profile;
1221     if (receiver_can_be_null) {
1222       Label not_null;
1223       // We are making a call.  Increment the count for null receiver.
1224       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1225       j(skip_receiver_profile);
1226       bind(not_null);
1227     }
1228 
1229     // Record the receiver type.
1230     record_klass_in_profile(receiver, mdp, reg2, true);
1231     bind(skip_receiver_profile);
1232 
1233     // The method data pointer needs to be updated to reflect the new target.
1234 
1235     update_mdp_by_constant(mdp,
1236                            in_bytes(VirtualCallData::
1237                                     virtual_call_data_size()));
1238     bind(profile_continue);
1239   }
1240 }
1241 
1242 // This routine creates a state machine for updating the multi-row
1243 // type profile at a virtual call site (or other type-sensitive bytecode).
1244 // The machine visits each row (of receiver/count) until the receiver type
1245 // is found, or until it runs out of rows.  At the same time, it remembers
1246 // the location of the first empty row.  (An empty row records null for its
1247 // receiver, and can be allocated for a newly-observed receiver type.)
1248 // Because there are two degrees of freedom in the state, a simple linear
1249 // search will not work; it must be a decision tree.  Hence this helper
1250 // function is recursive, to generate the required tree structured code.
1251 // It's the interpreter, so we are trading off code space for speed.
1252 // See below for example code.
1253 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1254                                 Register receiver, Register mdp,
1255                                 Register reg2,
1256                                 Label& done, bool is_virtual_call) {
1257   if (TypeProfileWidth == 0) {
1258     if (is_virtual_call) {
1259       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1260     }
1261 
1262   } else {
1263     int non_profiled_offset = -1;
1264     if (is_virtual_call) {
1265       non_profiled_offset = in_bytes(CounterData::count_offset());
1266     }
1267 
1268     record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
1269       &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset);
1270   }
1271 }
1272 
1273 void InterpreterMacroAssembler::record_item_in_profile_helper(
1274   Register item, Register mdp, Register reg2, int start_row, Label& done, int total_rows,
1275   OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, int non_profiled_offset) {
1276   int last_row = total_rows - 1;
1277   assert(start_row <= last_row, "must be work left to do");
1278   // Test this row for both the item and for null.
1279   // Take any of three different outcomes:
1280   //   1. found item => increment count and goto done
1281   //   2. found null => keep looking for case 1, maybe allocate this cell
1282   //   3. found something else => keep looking for cases 1 and 2
1283   // Case 3 is handled by a recursive call.
1284   for (int row = start_row; row <= last_row; row++) {
1285     Label next_test;
1286     bool test_for_null_also = (row == start_row);
1287 
1288     // See if the item is item[n].
1289     int item_offset = in_bytes(item_offset_fn(row));
1290     test_mdp_data_at(mdp, item_offset, item,
1291                      (test_for_null_also ? reg2 : noreg),
1292                      next_test);
1293     // (Reg2 now contains the item from the CallData.)
1294 
1295     // The item is item[n].  Increment count[n].
1296     int count_offset = in_bytes(item_count_offset_fn(row));
1297     increment_mdp_data_at(mdp, count_offset);
1298     j(done);
1299     bind(next_test);
1300 
1301     if (test_for_null_also) {
1302       Label found_null;
1303       // Failed the equality check on item[n]...  Test for null.
1304       if (start_row == last_row) {
1305         // The only thing left to do is handle the null case.
1306         if (non_profiled_offset >= 0) {
1307           beqz(reg2, found_null);
1308           // Item did not match any saved item and there is no empty row for it.
1309           // Increment total counter to indicate polymorphic case.
1310           increment_mdp_data_at(mdp, non_profiled_offset);
1311           j(done);
1312           bind(found_null);
1313         } else {
1314           bnez(reg2, done);
1315         }
1316         break;
1317       }
1318       // Since null is rare, make it be the branch-taken case.
1319       beqz(reg2, found_null);
1320 
1321       // Put all the "Case 3" tests here.
1322       record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
1323         item_offset_fn, item_count_offset_fn, non_profiled_offset);
1324 
1325       // Found a null.  Keep searching for a matching item,
1326       // but remember that this is an empty (unused) slot.
1327       bind(found_null);
1328     }
1329   }
1330 
1331   // In the fall-through case, we found no matching item, but we
1332   // observed the item[start_row] is NULL.
1333   // Fill in the item field and increment the count.
1334   int item_offset = in_bytes(item_offset_fn(start_row));
1335   set_mdp_data_at(mdp, item_offset, item);
1336   int count_offset = in_bytes(item_count_offset_fn(start_row));
1337   mv(reg2, DataLayout::counter_increment);
1338   set_mdp_data_at(mdp, count_offset, reg2);
1339   if (start_row > 0) {
1340     j(done);
1341   }
1342 }
1343 
1344 // Example state machine code for three profile rows:
1345 //   # main copy of decision tree, rooted at row[1]
1346 //   if (row[0].rec == rec) then [
1347 //     row[0].incr()
1348 //     goto done
1349 //   ]
1350 //   if (row[0].rec != NULL) then [
1351 //     # inner copy of decision tree, rooted at row[1]
1352 //     if (row[1].rec == rec) then [
1353 //       row[1].incr()
1354 //       goto done
1355 //     ]
1356 //     if (row[1].rec != NULL) then [
1357 //       # degenerate decision tree, rooted at row[2]
1358 //       if (row[2].rec == rec) then [
1359 //         row[2].incr()
1360 //         goto done
1361 //       ]
1362 //       if (row[2].rec != NULL) then [
1363 //         count.incr()
1364 //         goto done
1365 //       ] # overflow
1366 //       row[2].init(rec)
1367 //       goto done
1368 //     ] else [
1369 //       # remember row[1] is empty
1370 //       if (row[2].rec == rec) then [
1371 //         row[2].incr()
1372 //         goto done
1373 //       ]
1374 //       row[1].init(rec)
1375 //       goto done
1376 //     ]
1377 //   else [
1378 //     # remember row[0] is empty
1379 //     if (row[1].rec == rec) then [
1380 //       row[1].incr()
1381 //       goto done
1382 //     ]
1383 //     if (row[2].rec == rec) then [
1384 //       row[2].incr()
1385 //       goto done
1386 //     ]
1387 //     row[0].init(rec)
1388 //     goto done
1389 //   ]
1390 //   done:
1391 
1392 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1393                                                         Register mdp, Register reg2,
1394                                                         bool is_virtual_call) {
1395   assert(ProfileInterpreter, "must be profiling");
1396   Label done;
1397 
1398   record_klass_in_profile_helper(receiver, mdp, reg2, done, is_virtual_call);
1399 
1400   bind(done);
1401 }
1402 
1403 void InterpreterMacroAssembler::profile_ret(Register return_bci, Register mdp) {
1404   if (ProfileInterpreter) {
1405     Label profile_continue;
1406 
1407     // If no method data exists, go to profile_continue.
1408     test_method_data_pointer(mdp, profile_continue);
1409 
1410     // Update the total ret count.
1411     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1412 
1413     for (uint row = 0; row < RetData::row_limit(); row++) {
1414       Label next_test;
1415 
1416       // See if return_bci is equal to bci[n]:
1417       test_mdp_data_at(mdp,
1418                        in_bytes(RetData::bci_offset(row)),
1419                        return_bci, noreg,
1420                        next_test);
1421 
1422       // return_bci is equal to bci[n].  Increment the count.
1423       increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));
1424 
1425       // The method data pointer needs to be updated to reflect the new target.
1426       update_mdp_by_offset(mdp,
1427                            in_bytes(RetData::bci_displacement_offset(row)));
1428       j(profile_continue);
1429       bind(next_test);
1430     }
1431 
1432     update_mdp_for_ret(return_bci);
1433 
1434     bind(profile_continue);
1435   }
1436 }
1437 
1438 void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1439   if (ProfileInterpreter) {
1440     Label profile_continue;
1441 
1442     // If no method data exists, go to profile_continue.
1443     test_method_data_pointer(mdp, profile_continue);
1444 
1445     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1446 
1447     // The method data pointer needs to be updated.
1448     int mdp_delta = in_bytes(BitData::bit_data_size());
1449     if (TypeProfileCasts) {
1450       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1451     }
1452     update_mdp_by_constant(mdp, mdp_delta);
1453 
1454     bind(profile_continue);
1455   }
1456 }
1457 
1458 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) {
1459     if (ProfileInterpreter && TypeProfileCasts) {
1460     Label profile_continue;
1461 
1462     // If no method data exists, go to profile_continue.
1463     test_method_data_pointer(mdp, profile_continue);
1464 
1465     int count_offset = in_bytes(CounterData::count_offset());
1466     // Back up the address, since we have already bumped the mdp.
1467     count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1468 
1469     // *Decrement* the counter.  We expect to see zero or small negatives.
1470     increment_mdp_data_at(mdp, count_offset, true);
1471 
1472     bind (profile_continue);
1473   }
1474 }
1475 
1476 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1477   if (ProfileInterpreter) {
1478     Label profile_continue;
1479 
1480     // If no method data exists, go to profile_continue.
1481     test_method_data_pointer(mdp, profile_continue);
1482 
1483     // The method data pointer needs to be updated.
1484     int mdp_delta = in_bytes(BitData::bit_data_size());
1485     if (TypeProfileCasts) {
1486       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1487 
1488       // Record the object type.
1489       record_klass_in_profile(klass, mdp, reg2, false);
1490     }
1491     update_mdp_by_constant(mdp, mdp_delta);
1492 
1493     bind(profile_continue);
1494   }
1495 }
1496 
1497 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1498   if (ProfileInterpreter) {
1499     Label profile_continue;
1500 
1501     // If no method data exists, go to profile_continue.
1502     test_method_data_pointer(mdp, profile_continue);
1503 
1504     // Update the default case count
1505     increment_mdp_data_at(mdp,
1506                           in_bytes(MultiBranchData::default_count_offset()));
1507 
1508     // The method data pointer needs to be updated.
1509     update_mdp_by_offset(mdp,
1510                          in_bytes(MultiBranchData::
1511                                   default_displacement_offset()));
1512 
1513     bind(profile_continue);
1514   }
1515 }
1516 
1517 void InterpreterMacroAssembler::profile_switch_case(Register index,
1518                                                     Register mdp,
1519                                                     Register reg2) {
1520   if (ProfileInterpreter) {
1521     Label profile_continue;
1522 
1523     // If no method data exists, go to profile_continue.
1524     test_method_data_pointer(mdp, profile_continue);
1525 
1526     // Build the base (index * per_case_size_in_bytes()) +
1527     // case_array_offset_in_bytes()
1528     mv(reg2, in_bytes(MultiBranchData::per_case_size()));
1529     mv(t0, in_bytes(MultiBranchData::case_array_offset()));
1530     Assembler::mul(index, index, reg2);
1531     Assembler::add(index, index, t0);
1532 
1533     // Update the case count
1534     increment_mdp_data_at(mdp,
1535                           index,
1536                           in_bytes(MultiBranchData::relative_count_offset()));
1537 
1538     // The method data pointer need to be updated.
1539     update_mdp_by_offset(mdp,
1540                          index,
1541                          in_bytes(MultiBranchData::
1542                                   relative_displacement_offset()));
1543 
1544     bind(profile_continue);
1545   }
1546 }
1547 
1548 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1549 
1550 void InterpreterMacroAssembler::notify_method_entry() {
1551   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1552   // track stack depth.  If it is possible to enter interp_only_mode we add
1553   // the code to check if the event should be sent.
1554   if (JvmtiExport::can_post_interpreter_events()) {
1555     Label L;
1556     lwu(x13, Address(xthread, JavaThread::interp_only_mode_offset()));
1557     beqz(x13, L);
1558     call_VM(noreg, CAST_FROM_FN_PTR(address,
1559                                     InterpreterRuntime::post_method_entry));
1560     bind(L);
1561   }
1562 
1563   {
1564     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1565     get_method(c_rarg1);
1566     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1567                  xthread, c_rarg1);
1568   }
1569 
1570   // RedefineClasses() tracing support for obsolete method entry
1571   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1572     get_method(c_rarg1);
1573     call_VM_leaf(
1574       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1575       xthread, c_rarg1);
1576   }
1577 }
1578 
1579 
1580 void InterpreterMacroAssembler::notify_method_exit(
1581     TosState state, NotifyMethodExitMode mode) {
1582   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1583   // track stack depth.  If it is possible to enter interp_only_mode we add
1584   // the code to check if the event should be sent.
1585   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1586     Label L;
1587     // Note: frame::interpreter_frame_result has a dependency on how the
1588     // method result is saved across the call to post_method_exit. If this
1589     // is changed then the interpreter_frame_result implementation will
1590     // need to be updated too.
1591 
1592     // template interpreter will leave the result on the top of the stack.
1593     push(state);
1594     lwu(x13, Address(xthread, JavaThread::interp_only_mode_offset()));
1595     beqz(x13, L);
1596     call_VM(noreg,
1597             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1598     bind(L);
1599     pop(state);
1600   }
1601 
1602   {
1603     SkipIfEqual skip(this, &DTraceMethodProbes, false);
1604     push(state);
1605     get_method(c_rarg1);
1606     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1607                  xthread, c_rarg1);
1608     pop(state);
1609   }
1610 }
1611 
1612 
1613 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
1614 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
1615                                                         int increment, Address mask,
1616                                                         Register tmp1, Register tmp2,
1617                                                         bool preloaded, Label* where) {
1618   Label done;
1619   if (!preloaded) {
1620     lwu(tmp1, counter_addr);
1621   }
1622   add(tmp1, tmp1, increment);
1623   sw(tmp1, counter_addr);
1624   lwu(tmp2, mask);
1625   andr(tmp1, tmp1, tmp2);
1626   bnez(tmp1, done);
1627   j(*where); // offset is too large so we have to use j instead of beqz here
1628   bind(done);
1629 }
1630 
1631 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
1632                                                   int number_of_arguments) {
1633   // interpreter specific
1634   //
1635   // Note: No need to save/restore rbcp & rlocals pointer since these
1636   //       are callee saved registers and no blocking/ GC can happen
1637   //       in leaf calls.
1638 #ifdef ASSERT
1639   {
1640    Label L;
1641    ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1642    beqz(t0, L);
1643    stop("InterpreterMacroAssembler::call_VM_leaf_base:"
1644         " last_sp != NULL");
1645    bind(L);
1646   }
1647 #endif /* ASSERT */
1648   // super call
1649   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
1650 }
1651 
1652 void InterpreterMacroAssembler::call_VM_base(Register oop_result,
1653                                              Register java_thread,
1654                                              Register last_java_sp,
1655                                              address  entry_point,
1656                                              int      number_of_arguments,
1657                                              bool     check_exceptions) {
1658   // interpreter specific
1659   //
1660   // Note: Could avoid restoring locals ptr (callee saved) - however doesn't
1661   //       really make a difference for these runtime calls, since they are
1662   //       slow anyway. Btw., bcp must be saved/restored since it may change
1663   //       due to GC.
1664   save_bcp();
1665 #ifdef ASSERT
1666   {
1667     Label L;
1668     ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
1669     beqz(t0, L);
1670     stop("InterpreterMacroAssembler::call_VM_base:"
1671          " last_sp != NULL");
1672     bind(L);
1673   }
1674 #endif /* ASSERT */
1675   // super call
1676   MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
1677                                entry_point, number_of_arguments,
1678                                check_exceptions);
1679 // interpreter specific
1680   restore_bcp();
1681   restore_locals();
1682 }
1683 
1684 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) {
1685   assert_different_registers(obj, tmp, t0, mdo_addr.base());
1686   Label update, next, none;
1687 
1688   verify_oop(obj);
1689 
1690   bnez(obj, update);
1691   orptr(mdo_addr, TypeEntries::null_seen, t0, tmp);
1692   j(next);
1693 
1694   bind(update);
1695   load_klass(obj, obj);
1696 
1697   ld(t0, mdo_addr);
1698   xorr(obj, obj, t0);
1699   andi(t0, obj, TypeEntries::type_klass_mask);
1700   beqz(t0, next); // klass seen before, nothing to
1701                   // do. The unknown bit may have been
1702                   // set already but no need to check.
1703 
1704   andi(t0, obj, TypeEntries::type_unknown);
1705   bnez(t0, next);
1706   // already unknown. Nothing to do anymore.
1707 
1708   ld(t0, mdo_addr);
1709   beqz(t0, none);
1710   mv(tmp, (u1)TypeEntries::null_seen);
1711   beq(t0, tmp, none);
1712   // There is a chance that the checks above (re-reading profiling
1713   // data from memory) fail if another thread has just set the
1714   // profiling to this obj's klass
1715   ld(t0, mdo_addr);
1716   xorr(obj, obj, t0);
1717   andi(t0, obj, TypeEntries::type_klass_mask);
1718   beqz(t0, next);
1719 
1720   // different than before. Cannot keep accurate profile.
1721   orptr(mdo_addr, TypeEntries::type_unknown, t0, tmp);
1722   j(next);
1723 
1724   bind(none);
1725   // first time here. Set profile type.
1726   sd(obj, mdo_addr);
1727 
1728   bind(next);
1729 }
1730 
1731 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
1732   if (!ProfileInterpreter) {
1733     return;
1734   }
1735 
1736   if (MethodData::profile_arguments() || MethodData::profile_return()) {
1737     Label profile_continue;
1738 
1739     test_method_data_pointer(mdp, profile_continue);
1740 
1741     int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
1742 
1743     lbu(t0, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start));
1744     if (is_virtual) {
1745       mv(tmp, (u1)DataLayout::virtual_call_type_data_tag);
1746       bne(t0, tmp, profile_continue);
1747     } else {
1748       mv(tmp, (u1)DataLayout::call_type_data_tag);
1749       bne(t0, tmp, profile_continue);
1750     }
1751 
1752     // calculate slot step
1753     static int stack_slot_offset0 = in_bytes(TypeEntriesAtCall::stack_slot_offset(0));
1754     static int slot_step = in_bytes(TypeEntriesAtCall::stack_slot_offset(1)) - stack_slot_offset0;
1755 
1756     // calculate type step
1757     static int argument_type_offset0 = in_bytes(TypeEntriesAtCall::argument_type_offset(0));
1758     static int type_step = in_bytes(TypeEntriesAtCall::argument_type_offset(1)) - argument_type_offset0;
1759 
1760     if (MethodData::profile_arguments()) {
1761       Label done, loop, loopEnd, profileArgument, profileReturnType;
1762       RegSet pushed_registers;
1763       pushed_registers += x15;
1764       pushed_registers += x16;
1765       pushed_registers += x17;
1766       Register mdo_addr = x15;
1767       Register index = x16;
1768       Register off_to_args = x17;
1769       push_reg(pushed_registers, sp);
1770 
1771       mv(off_to_args, in_bytes(TypeEntriesAtCall::args_data_offset()));
1772       mv(t0, TypeProfileArgsLimit);
1773       beqz(t0, loopEnd);
1774 
1775       mv(index, zr); // index < TypeProfileArgsLimit
1776       bind(loop);
1777       bgtz(index, profileReturnType);
1778       mv(t0, (int)MethodData::profile_return());
1779       beqz(t0, profileArgument); // (index > 0 || MethodData::profile_return()) == false
1780       bind(profileReturnType);
1781       // If return value type is profiled we may have no argument to profile
1782       ld(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1783       mv(t1, - TypeStackSlotEntries::per_arg_count());
1784       mul(t1, index, t1);
1785       add(tmp, tmp, t1);
1786       mv(t1, TypeStackSlotEntries::per_arg_count());
1787       add(t0, mdp, off_to_args);
1788       blt(tmp, t1, done);
1789 
1790       bind(profileArgument);
1791 
1792       ld(tmp, Address(callee, Method::const_offset()));
1793       load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
1794       // stack offset o (zero based) from the start of the argument
1795       // list, for n arguments translates into offset n - o - 1 from
1796       // the end of the argument list
1797       mv(t0, stack_slot_offset0);
1798       mv(t1, slot_step);
1799       mul(t1, index, t1);
1800       add(t0, t0, t1);
1801       add(t0, mdp, t0);
1802       ld(t0, Address(t0));
1803       sub(tmp, tmp, t0);
1804       addi(tmp, tmp, -1);
1805       Address arg_addr = argument_address(tmp);
1806       ld(tmp, arg_addr);
1807 
1808       mv(t0, argument_type_offset0);
1809       mv(t1, type_step);
1810       mul(t1, index, t1);
1811       add(t0, t0, t1);
1812       add(mdo_addr, mdp, t0);
1813       Address mdo_arg_addr(mdo_addr, 0);
1814       profile_obj_type(tmp, mdo_arg_addr, t1);
1815 
1816       int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1817       addi(off_to_args, off_to_args, to_add);
1818 
1819       // increment index by 1
1820       addi(index, index, 1);
1821       mv(t1, TypeProfileArgsLimit);
1822       blt(index, t1, loop);
1823       bind(loopEnd);
1824 
1825       if (MethodData::profile_return()) {
1826         ld(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1827         addi(tmp, tmp, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1828       }
1829 
1830       add(t0, mdp, off_to_args);
1831       bind(done);
1832       mv(mdp, t0);
1833 
1834       // unspill the clobbered registers
1835       pop_reg(pushed_registers, sp);
1836 
1837       if (MethodData::profile_return()) {
1838         // We're right after the type profile for the last
1839         // argument. tmp is the number of cells left in the
1840         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1841         // if there's a return to profile.
1842         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1843         shadd(mdp, tmp, mdp, tmp, exact_log2(DataLayout::cell_size));
1844       }
1845       sd(mdp, Address(fp, frame::interpreter_frame_mdp_offset * wordSize));
1846     } else {
1847       assert(MethodData::profile_return(), "either profile call args or call ret");
1848       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1849     }
1850 
1851     // mdp points right after the end of the
1852     // CallTypeData/VirtualCallTypeData, right after the cells for the
1853     // return value type if there's one
1854 
1855     bind(profile_continue);
1856   }
1857 }
1858 
1859 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1860   assert_different_registers(mdp, ret, tmp, xbcp, t0, t1);
1861   if (ProfileInterpreter && MethodData::profile_return()) {
1862     Label profile_continue, done;
1863 
1864     test_method_data_pointer(mdp, profile_continue);
1865 
1866     if (MethodData::profile_return_jsr292_only()) {
1867       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
1868 
1869       // If we don't profile all invoke bytecodes we must make sure
1870       // it's a bytecode we indeed profile. We can't go back to the
1871       // beginning of the ProfileData we intend to update to check its
1872       // type because we're right after it and we don't known its
1873       // length
1874       Label do_profile;
1875       lbu(t0, Address(xbcp, 0));
1876       mv(tmp, (u1)Bytecodes::_invokedynamic);
1877       beq(t0, tmp, do_profile);
1878       mv(tmp, (u1)Bytecodes::_invokehandle);
1879       beq(t0, tmp, do_profile);
1880       get_method(tmp);
1881       lhu(t0, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
1882       mv(t1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1883       bne(t0, t1, profile_continue);
1884       bind(do_profile);
1885     }
1886 
1887     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1888     mv(tmp, ret);
1889     profile_obj_type(tmp, mdo_ret_addr, t1);
1890 
1891     bind(profile_continue);
1892   }
1893 }
1894 
1895 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2, Register tmp3) {
1896   assert_different_registers(t0, t1, mdp, tmp1, tmp2, tmp3);
1897   if (ProfileInterpreter && MethodData::profile_parameters()) {
1898     Label profile_continue, done;
1899 
1900     test_method_data_pointer(mdp, profile_continue);
1901 
1902     // Load the offset of the area within the MDO used for
1903     // parameters. If it's negative we're not profiling any parameters
1904     lwu(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1905     srli(tmp2, tmp1, 31);
1906     bnez(tmp2, profile_continue);  // i.e. sign bit set
1907 
1908     // Compute a pointer to the area for parameters from the offset
1909     // and move the pointer to the slot for the last
1910     // parameters. Collect profiling from last parameter down.
1911     // mdo start + parameters offset + array length - 1
1912     add(mdp, mdp, tmp1);
1913     ld(tmp1, Address(mdp, ArrayData::array_len_offset()));
1914     add(tmp1, tmp1, - TypeStackSlotEntries::per_arg_count());
1915 
1916     Label loop;
1917     bind(loop);
1918 
1919     int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
1920     int type_base = in_bytes(ParametersTypeData::type_offset(0));
1921     int per_arg_scale = exact_log2(DataLayout::cell_size);
1922     add(t0, mdp, off_base);
1923     add(t1, mdp, type_base);
1924 
1925     shadd(tmp2, tmp1, t0, tmp2, per_arg_scale);
1926     // load offset on the stack from the slot for this parameter
1927     ld(tmp2, Address(tmp2, 0));
1928     neg(tmp2, tmp2);
1929 
1930     // read the parameter from the local area
1931     shadd(tmp2, tmp2, xlocals, tmp2, Interpreter::logStackElementSize);
1932     ld(tmp2, Address(tmp2, 0));
1933 
1934     // profile the parameter
1935     shadd(t1, tmp1, t1, t0, per_arg_scale);
1936     Address arg_type(t1, 0);
1937     profile_obj_type(tmp2, arg_type, tmp3);
1938 
1939     // go to next parameter
1940     add(tmp1, tmp1, - TypeStackSlotEntries::per_arg_count());
1941     bgez(tmp1, loop);
1942 
1943     bind(profile_continue);
1944   }
1945 }
1946 
1947 void InterpreterMacroAssembler::get_method_counters(Register method,
1948                                                     Register mcs, Label& skip) {
1949   Label has_counters;
1950   ld(mcs, Address(method, Method::method_counters_offset()));
1951   bnez(mcs, has_counters);
1952   call_VM(noreg, CAST_FROM_FN_PTR(address,
1953           InterpreterRuntime::build_method_counters), method);
1954   ld(mcs, Address(method, Method::method_counters_offset()));
1955   beqz(mcs, skip); // No MethodCounters allocated, OutOfMemory
1956   bind(has_counters);
1957 }
1958 
1959 #ifdef ASSERT
1960 void InterpreterMacroAssembler::verify_access_flags(Register access_flags, uint32_t flag_bits,
1961                                                     const char* msg, bool stop_by_hit) {
1962   Label L;
1963   andi(t0, access_flags, flag_bits);
1964   if (stop_by_hit) {
1965     beqz(t0, L);
1966   } else {
1967     bnez(t0, L);
1968   }
1969   stop(msg);
1970   bind(L);
1971 }
1972 
1973 void InterpreterMacroAssembler::verify_frame_setup() {
1974   Label L;
1975   const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
1976   ld(t0, monitor_block_top);
1977   beq(esp, t0, L);
1978   stop("broken stack frame setup in interpreter");
1979   bind(L);
1980 }
1981 #endif