1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2023 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_ppc.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "oops/methodCounters.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/resolvedFieldEntry.hpp"
  36 #include "oops/resolvedIndyEntry.hpp"
  37 #include "prims/jvmtiExport.hpp"
  38 #include "prims/jvmtiThreadState.hpp"
  39 #include "runtime/frame.inline.hpp"
  40 #include "runtime/safepointMechanism.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "runtime/vm_version.hpp"
  43 #include "utilities/macros.hpp"
  44 #include "utilities/powerOfTwo.hpp"
  45 
  46 // Implementation of InterpreterMacroAssembler.
  47 
  48 // This file specializes the assembler with interpreter-specific macros.
  49 
  50 #ifdef PRODUCT
  51 #define BLOCK_COMMENT(str) // nothing
  52 #else
  53 #define BLOCK_COMMENT(str) block_comment(str)
  54 #endif
  55 
  56 void InterpreterMacroAssembler::null_check_throw(Register a, int offset, Register temp_reg) {
  57   address exception_entry = Interpreter::throw_NullPointerException_entry();
  58   MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry);
  59 }
  60 
  61 void InterpreterMacroAssembler::load_klass_check_null_throw(Register dst, Register src, Register temp_reg) {
  62   null_check_throw(src, oopDesc::klass_offset_in_bytes(), temp_reg);
  63   load_klass(dst, src);
  64 }
  65 
  66 void InterpreterMacroAssembler::jump_to_entry(address entry, Register Rscratch) {
  67   assert(entry, "Entry must have been generated by now");
  68   if (is_within_range_of_b(entry, pc())) {
  69     b(entry);
  70   } else {
  71     load_const_optimized(Rscratch, entry, R0);
  72     mtctr(Rscratch);
  73     bctr();
  74   }
  75 }
  76 
  77 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr, bool generate_poll) {
  78   Register bytecode = R12_scratch2;
  79   if (bcp_incr != 0) {
  80     lbzu(bytecode, bcp_incr, R14_bcp);
  81   } else {
  82     lbz(bytecode, 0, R14_bcp);
  83   }
  84 
  85   dispatch_Lbyte_code(state, bytecode, Interpreter::dispatch_table(state), generate_poll);
  86 }
  87 
  88 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
  89   // Load current bytecode.
  90   Register bytecode = R12_scratch2;
  91   lbz(bytecode, 0, R14_bcp);
  92   dispatch_Lbyte_code(state, bytecode, table);
  93 }
  94 
  95 // Dispatch code executed in the prolog of a bytecode which does not do it's
  96 // own dispatch. The dispatch address is computed and placed in R24_dispatch_addr.
  97 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
  98   Register bytecode = R12_scratch2;
  99   lbz(bytecode, bcp_incr, R14_bcp);
 100 
 101   load_dispatch_table(R24_dispatch_addr, Interpreter::dispatch_table(state));
 102 
 103   sldi(bytecode, bytecode, LogBytesPerWord);
 104   ldx(R24_dispatch_addr, R24_dispatch_addr, bytecode);
 105 }
 106 
 107 // Dispatch code executed in the epilog of a bytecode which does not do it's
 108 // own dispatch. The dispatch address in R24_dispatch_addr is used for the
 109 // dispatch.
 110 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
 111   if (bcp_incr) { addi(R14_bcp, R14_bcp, bcp_incr); }
 112   mtctr(R24_dispatch_addr);
 113   bcctr(bcondAlways, 0, bhintbhBCCTRisNotPredictable);
 114 }
 115 
 116 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
 117   assert(scratch_reg != R0, "can't use R0 as scratch_reg here");
 118   if (JvmtiExport::can_pop_frame()) {
 119     Label L;
 120 
 121     // Check the "pending popframe condition" flag in the current thread.
 122     lwz(scratch_reg, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
 123 
 124     // Initiate popframe handling only if it is not already being
 125     // processed. If the flag has the popframe_processing bit set, it
 126     // means that this code is called *during* popframe handling - we
 127     // don't want to reenter.
 128     andi_(R0, scratch_reg, JavaThread::popframe_pending_bit);
 129     beq(CCR0, L);
 130 
 131     andi_(R0, scratch_reg, JavaThread::popframe_processing_bit);
 132     bne(CCR0, L);
 133 
 134     // Call the Interpreter::remove_activation_preserving_args_entry()
 135     // func to get the address of the same-named entrypoint in the
 136     // generated interpreter code.
 137 #if defined(ABI_ELFv2)
 138     call_c(CAST_FROM_FN_PTR(address,
 139                             Interpreter::remove_activation_preserving_args_entry),
 140            relocInfo::none);
 141 #else
 142     call_c(CAST_FROM_FN_PTR(FunctionDescriptor*,
 143                             Interpreter::remove_activation_preserving_args_entry),
 144            relocInfo::none);
 145 #endif
 146 
 147     // Jump to Interpreter::_remove_activation_preserving_args_entry.
 148     mtctr(R3_RET);
 149     bctr();
 150 
 151     align(32, 12);
 152     bind(L);
 153   }
 154 }
 155 
 156 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
 157   const Register Rthr_state_addr = scratch_reg;
 158   if (JvmtiExport::can_force_early_return()) {
 159     Label Lno_early_ret;
 160     ld(Rthr_state_addr, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
 161     cmpdi(CCR0, Rthr_state_addr, 0);
 162     beq(CCR0, Lno_early_ret);
 163 
 164     lwz(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rthr_state_addr);
 165     cmpwi(CCR0, R0, JvmtiThreadState::earlyret_pending);
 166     bne(CCR0, Lno_early_ret);
 167 
 168     // Jump to Interpreter::_earlyret_entry.
 169     lwz(R3_ARG1, in_bytes(JvmtiThreadState::earlyret_tos_offset()), Rthr_state_addr);
 170     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry));
 171     mtlr(R3_RET);
 172     blr();
 173 
 174     align(32, 12);
 175     bind(Lno_early_ret);
 176   }
 177 }
 178 
 179 void InterpreterMacroAssembler::load_earlyret_value(TosState state, Register Rscratch1) {
 180   const Register RjvmtiState = Rscratch1;
 181   const Register Rscratch2   = R0;
 182 
 183   ld(RjvmtiState, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
 184   li(Rscratch2, 0);
 185 
 186   switch (state) {
 187     case atos: ld(R17_tos, in_bytes(JvmtiThreadState::earlyret_oop_offset()), RjvmtiState);
 188                std(Rscratch2, in_bytes(JvmtiThreadState::earlyret_oop_offset()), RjvmtiState);
 189                break;
 190     case ltos: ld(R17_tos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 191                break;
 192     case btos: // fall through
 193     case ztos: // fall through
 194     case ctos: // fall through
 195     case stos: // fall through
 196     case itos: lwz(R17_tos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 197                break;
 198     case ftos: lfs(F15_ftos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 199                break;
 200     case dtos: lfd(F15_ftos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 201                break;
 202     case vtos: break;
 203     default  : ShouldNotReachHere();
 204   }
 205 
 206   // Clean up tos value in the jvmti thread state.
 207   std(Rscratch2, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 208   // Set tos state field to illegal value.
 209   li(Rscratch2, ilgl);
 210   stw(Rscratch2, in_bytes(JvmtiThreadState::earlyret_tos_offset()), RjvmtiState);
 211 }
 212 
 213 // Common code to dispatch and dispatch_only.
 214 // Dispatch value in Lbyte_code and increment Lbcp.
 215 
 216 void InterpreterMacroAssembler::load_dispatch_table(Register dst, address* table) {
 217   address table_base = (address)Interpreter::dispatch_table((TosState)0);
 218   intptr_t table_offs = (intptr_t)table - (intptr_t)table_base;
 219   if (is_simm16(table_offs)) {
 220     addi(dst, R25_templateTableBase, (int)table_offs);
 221   } else {
 222     load_const_optimized(dst, table, R0);
 223   }
 224 }
 225 
 226 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register bytecode,
 227                                                     address* table, bool generate_poll) {
 228   assert_different_registers(bytecode, R11_scratch1);
 229 
 230   // Calc dispatch table address.
 231   load_dispatch_table(R11_scratch1, table);
 232 
 233   if (generate_poll) {
 234     address *sfpt_tbl = Interpreter::safept_table(state);
 235     if (table != sfpt_tbl) {
 236       Label dispatch;
 237       ld(R0, in_bytes(JavaThread::polling_word_offset()), R16_thread);
 238       // Armed page has poll_bit set, if poll bit is cleared just continue.
 239       andi_(R0, R0, SafepointMechanism::poll_bit());
 240       beq(CCR0, dispatch);
 241       load_dispatch_table(R11_scratch1, sfpt_tbl);
 242       align(32, 16);
 243       bind(dispatch);
 244     }
 245   }
 246 
 247   sldi(R12_scratch2, bytecode, LogBytesPerWord);
 248   ldx(R11_scratch1, R11_scratch1, R12_scratch2);
 249 
 250   // Jump off!
 251   mtctr(R11_scratch1);
 252   bcctr(bcondAlways, 0, bhintbhBCCTRisNotPredictable);
 253 }
 254 
 255 void InterpreterMacroAssembler::load_receiver(Register Rparam_count, Register Rrecv_dst) {
 256   sldi(Rrecv_dst, Rparam_count, Interpreter::logStackElementSize);
 257   ldx(Rrecv_dst, Rrecv_dst, R15_esp);
 258 }
 259 
 260 // helpers for expression stack
 261 
 262 void InterpreterMacroAssembler::pop_i(Register r) {
 263   lwzu(r, Interpreter::stackElementSize, R15_esp);
 264 }
 265 
 266 void InterpreterMacroAssembler::pop_ptr(Register r) {
 267   ldu(r, Interpreter::stackElementSize, R15_esp);
 268 }
 269 
 270 void InterpreterMacroAssembler::pop_l(Register r) {
 271   ld(r, Interpreter::stackElementSize, R15_esp);
 272   addi(R15_esp, R15_esp, 2 * Interpreter::stackElementSize);
 273 }
 274 
 275 void InterpreterMacroAssembler::pop_f(FloatRegister f) {
 276   lfsu(f, Interpreter::stackElementSize, R15_esp);
 277 }
 278 
 279 void InterpreterMacroAssembler::pop_d(FloatRegister f) {
 280   lfd(f, Interpreter::stackElementSize, R15_esp);
 281   addi(R15_esp, R15_esp, 2 * Interpreter::stackElementSize);
 282 }
 283 
 284 void InterpreterMacroAssembler::push_i(Register r) {
 285   stw(r, 0, R15_esp);
 286   addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
 287 }
 288 
 289 void InterpreterMacroAssembler::push_ptr(Register r) {
 290   std(r, 0, R15_esp);
 291   addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
 292 }
 293 
 294 void InterpreterMacroAssembler::push_l(Register r) {
 295   // Clear unused slot.
 296   load_const_optimized(R0, 0L);
 297   std(R0, 0, R15_esp);
 298   std(r, - Interpreter::stackElementSize, R15_esp);
 299   addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
 300 }
 301 
 302 void InterpreterMacroAssembler::push_f(FloatRegister f) {
 303   stfs(f, 0, R15_esp);
 304   addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
 305 }
 306 
 307 void InterpreterMacroAssembler::push_d(FloatRegister f)   {
 308   stfd(f, - Interpreter::stackElementSize, R15_esp);
 309   addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
 310 }
 311 
 312 void InterpreterMacroAssembler::push_2ptrs(Register first, Register second) {
 313   std(first, 0, R15_esp);
 314   std(second, -Interpreter::stackElementSize, R15_esp);
 315   addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
 316 }
 317 
 318 void InterpreterMacroAssembler::move_l_to_d(Register l, FloatRegister d) {
 319   if (VM_Version::has_mtfprd()) {
 320     mtfprd(d, l);
 321   } else {
 322     std(l, 0, R15_esp);
 323     lfd(d, 0, R15_esp);
 324   }
 325 }
 326 
 327 void InterpreterMacroAssembler::move_d_to_l(FloatRegister d, Register l) {
 328   if (VM_Version::has_mtfprd()) {
 329     mffprd(l, d);
 330   } else {
 331     stfd(d, 0, R15_esp);
 332     ld(l, 0, R15_esp);
 333   }
 334 }
 335 
 336 void InterpreterMacroAssembler::push(TosState state) {
 337   switch (state) {
 338     case atos: push_ptr();                break;
 339     case btos:
 340     case ztos:
 341     case ctos:
 342     case stos:
 343     case itos: push_i();                  break;
 344     case ltos: push_l();                  break;
 345     case ftos: push_f();                  break;
 346     case dtos: push_d();                  break;
 347     case vtos: /* nothing to do */        break;
 348     default  : ShouldNotReachHere();
 349   }
 350 }
 351 
 352 void InterpreterMacroAssembler::pop(TosState state) {
 353   switch (state) {
 354     case atos: pop_ptr();            break;
 355     case btos:
 356     case ztos:
 357     case ctos:
 358     case stos:
 359     case itos: pop_i();              break;
 360     case ltos: pop_l();              break;
 361     case ftos: pop_f();              break;
 362     case dtos: pop_d();              break;
 363     case vtos: /* nothing to do */   break;
 364     default  : ShouldNotReachHere();
 365   }
 366   verify_oop(R17_tos, state);
 367 }
 368 
 369 void InterpreterMacroAssembler::empty_expression_stack() {
 370   addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
 371 }
 372 
 373 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int         bcp_offset,
 374                                                           Register    Rdst,
 375                                                           signedOrNot is_signed) {
 376 #if defined(VM_LITTLE_ENDIAN)
 377   if (bcp_offset) {
 378     load_const_optimized(Rdst, bcp_offset);
 379     lhbrx(Rdst, R14_bcp, Rdst);
 380   } else {
 381     lhbrx(Rdst, R14_bcp);
 382   }
 383   if (is_signed == Signed) {
 384     extsh(Rdst, Rdst);
 385   }
 386 #else
 387   // Read Java big endian format.
 388   if (is_signed == Signed) {
 389     lha(Rdst, bcp_offset, R14_bcp);
 390   } else {
 391     lhz(Rdst, bcp_offset, R14_bcp);
 392   }
 393 #endif
 394 }
 395 
 396 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int         bcp_offset,
 397                                                           Register    Rdst,
 398                                                           signedOrNot is_signed) {
 399 #if defined(VM_LITTLE_ENDIAN)
 400   if (bcp_offset) {
 401     load_const_optimized(Rdst, bcp_offset);
 402     lwbrx(Rdst, R14_bcp, Rdst);
 403   } else {
 404     lwbrx(Rdst, R14_bcp);
 405   }
 406   if (is_signed == Signed) {
 407     extsw(Rdst, Rdst);
 408   }
 409 #else
 410   // Read Java big endian format.
 411   if (bcp_offset & 3) { // Offset unaligned?
 412     load_const_optimized(Rdst, bcp_offset);
 413     if (is_signed == Signed) {
 414       lwax(Rdst, R14_bcp, Rdst);
 415     } else {
 416       lwzx(Rdst, R14_bcp, Rdst);
 417     }
 418   } else {
 419     if (is_signed == Signed) {
 420       lwa(Rdst, bcp_offset, R14_bcp);
 421     } else {
 422       lwz(Rdst, bcp_offset, R14_bcp);
 423     }
 424   }
 425 #endif
 426 }
 427 
 428 
 429 // Load the constant pool cache index from the bytecode stream.
 430 //
 431 // Kills / writes:
 432 //   - Rdst, Rscratch
 433 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset,
 434                                                        size_t index_size) {
 435   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 436   // Cache index is always in the native format, courtesy of Rewriter.
 437   if (index_size == sizeof(u2)) {
 438     lhz(Rdst, bcp_offset, R14_bcp);
 439   } else if (index_size == sizeof(u4)) {
 440     if (bcp_offset & 3) {
 441       load_const_optimized(Rdst, bcp_offset);
 442       lwax(Rdst, R14_bcp, Rdst);
 443     } else {
 444       lwa(Rdst, bcp_offset, R14_bcp);
 445     }
 446     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
 447     nand(Rdst, Rdst, Rdst); // convert to plain index
 448   } else if (index_size == sizeof(u1)) {
 449     lbz(Rdst, bcp_offset, R14_bcp);
 450   } else {
 451     ShouldNotReachHere();
 452   }
 453   // Rdst now contains cp cache index.
 454 }
 455 
 456 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, int bcp_offset,
 457                                                            size_t index_size) {
 458   get_cache_index_at_bcp(cache, bcp_offset, index_size);
 459   sldi(cache, cache, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord));
 460   add(cache, R27_constPoolCache, cache);
 461 }
 462 
 463 // Load 4-byte signed or unsigned integer in Java format (that is, big-endian format)
 464 // from (Rsrc)+offset.
 465 void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
 466                                        signedOrNot is_signed) {
 467 #if defined(VM_LITTLE_ENDIAN)
 468   if (offset) {
 469     load_const_optimized(Rdst, offset);
 470     lwbrx(Rdst, Rdst, Rsrc);
 471   } else {
 472     lwbrx(Rdst, Rsrc);
 473   }
 474   if (is_signed == Signed) {
 475     extsw(Rdst, Rdst);
 476   }
 477 #else
 478   if (is_signed == Signed) {
 479     lwa(Rdst, offset, Rsrc);
 480   } else {
 481     lwz(Rdst, offset, Rsrc);
 482   }
 483 #endif
 484 }
 485 
 486 void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) {
 487   // Get index out of bytecode pointer, get_cache_entry_pointer_at_bcp
 488   get_cache_index_at_bcp(index, 1, sizeof(u4));
 489 
 490   // Get address of invokedynamic array
 491   ld_ptr(cache, in_bytes(ConstantPoolCache::invokedynamic_entries_offset()), R27_constPoolCache);
 492   // Scale the index to be the entry index * sizeof(ResolvedIndyEntry)
 493   sldi(index, index, log2i_exact(sizeof(ResolvedIndyEntry)));
 494   add(cache, cache, index);
 495 }
 496 
 497 void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) {
 498   // Get index out of bytecode pointer
 499   get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
 500   // Take shortcut if the size is a power of 2
 501   if (is_power_of_2(sizeof(ResolvedFieldEntry))) {
 502     // Scale index by power of 2
 503     sldi(index, index, log2i_exact(sizeof(ResolvedFieldEntry)));
 504   } else {
 505     // Scale the index to be the entry index * sizeof(ResolvedFieldEntry)
 506     mulli(index, index, sizeof(ResolvedFieldEntry));
 507   }
 508   // Get address of field entries array
 509   ld_ptr(cache, in_bytes(ConstantPoolCache::field_entries_offset()), R27_constPoolCache);
 510   addi(cache, cache, Array<ResolvedFieldEntry>::base_offset_in_bytes());
 511   add(cache, cache, index);
 512 }
 513 
 514 // Load object from cpool->resolved_references(index).
 515 // Kills:
 516 //   - index
 517 void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index,
 518                                                                  Register tmp1, Register tmp2,
 519                                                                  Label *L_handle_null) {
 520   assert_different_registers(result, index, tmp1, tmp2);
 521   assert(index->is_nonvolatile(), "needs to survive C-call in resolve_oop_handle");
 522   get_constant_pool(result);
 523 
 524   // Convert from field index to resolved_references() index and from
 525   // word index to byte offset. Since this is a java object, it can be compressed.
 526   sldi(index, index, LogBytesPerHeapOop);
 527   // Load pointer for resolved_references[] objArray.
 528   ld(result, ConstantPool::cache_offset(), result);
 529   ld(result, ConstantPoolCache::resolved_references_offset(), result);
 530   resolve_oop_handle(result, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE);
 531 #ifdef ASSERT
 532   Label index_ok;
 533   lwa(R0, arrayOopDesc::length_offset_in_bytes(), result);
 534   sldi(R0, R0, LogBytesPerHeapOop);
 535   cmpd(CCR0, index, R0);
 536   blt(CCR0, index_ok);
 537   stop("resolved reference index out of bounds");
 538   bind(index_ok);
 539 #endif
 540   // Add in the index.
 541   add(result, index, result);
 542   load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result,
 543                 tmp1, tmp2,
 544                 MacroAssembler::PRESERVATION_NONE,
 545                 0, L_handle_null);
 546 }
 547 
 548 // load cpool->resolved_klass_at(index)
 549 void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass) {
 550   // int value = *(Rcpool->int_at_addr(which));
 551   // int resolved_klass_index = extract_low_short_from_int(value);
 552   add(Roffset, Rcpool, Roffset);
 553 #if defined(VM_LITTLE_ENDIAN)
 554   lhz(Roffset, sizeof(ConstantPool), Roffset);     // Roffset = resolved_klass_index
 555 #else
 556   lhz(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
 557 #endif
 558 
 559   ld(Rklass, ConstantPool::resolved_klasses_offset(), Rcpool); // Rklass = Rcpool->_resolved_klasses
 560 
 561   sldi(Roffset, Roffset, LogBytesPerWord);
 562   addi(Roffset, Roffset, Array<Klass*>::base_offset_in_bytes());
 563   isync(); // Order load of instance Klass wrt. tags.
 564   ldx(Rklass, Rklass, Roffset);
 565 }
 566 
 567 void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
 568                                                               Register cache,
 569                                                               Register method) {
 570   const int method_offset = in_bytes(
 571     ConstantPoolCache::base_offset() +
 572       ((byte_no == TemplateTable::f2_byte)
 573        ? ConstantPoolCacheEntry::f2_offset()
 574        : ConstantPoolCacheEntry::f1_offset()));
 575 
 576   ld(method, method_offset, cache); // get f1 Method*
 577 }
 578 
 579 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
 580 // a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
 581 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,
 582                                                   Register Rtmp2, Register Rtmp3, Label &ok_is_subtype) {
 583   // Profile the not-null value's klass.
 584   profile_typecheck(Rsub_klass, Rtmp1, Rtmp2);
 585   check_klass_subtype(Rsub_klass, Rsuper_klass, Rtmp1, Rtmp2, ok_is_subtype);
 586   profile_typecheck_failed(Rtmp1, Rtmp2);
 587 }
 588 
 589 // Separate these two to allow for delay slot in middle.
 590 // These are used to do a test and full jump to exception-throwing code.
 591 
 592 // Check that index is in range for array, then shift index by index_shift,
 593 // and put arrayOop + shifted_index into res.
 594 // Note: res is still shy of address by array offset into object.
 595 
 596 void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Register Rindex,
 597                                                         int index_shift, Register Rtmp, Register Rres) {
 598   // Check that index is in range for array, then shift index by index_shift,
 599   // and put arrayOop + shifted_index into res.
 600   // Note: res is still shy of address by array offset into object.
 601   // Kills:
 602   //   - Rindex
 603   // Writes:
 604   //   - Rres: Address that corresponds to the array index if check was successful.
 605   verify_oop(Rarray);
 606   const Register Rlength   = R0;
 607   const Register RsxtIndex = Rtmp;
 608   Label LisNull, LnotOOR;
 609 
 610   // Array nullcheck
 611   if (!ImplicitNullChecks) {
 612     cmpdi(CCR0, Rarray, 0);
 613     beq(CCR0, LisNull);
 614   } else {
 615     null_check_throw(Rarray, arrayOopDesc::length_offset_in_bytes(), /*temp*/RsxtIndex);
 616   }
 617 
 618   // Rindex might contain garbage in upper bits (remember that we don't sign extend
 619   // during integer arithmetic operations). So kill them and put value into same register
 620   // where ArrayIndexOutOfBounds would expect the index in.
 621   rldicl(RsxtIndex, Rindex, 0, 32); // zero extend 32 bit -> 64 bit
 622 
 623   // Index check
 624   lwz(Rlength, arrayOopDesc::length_offset_in_bytes(), Rarray);
 625   cmplw(CCR0, Rindex, Rlength);
 626   sldi(RsxtIndex, RsxtIndex, index_shift);
 627   blt(CCR0, LnotOOR);
 628   // Index should be in R17_tos, array should be in R4_ARG2.
 629   mr_if_needed(R17_tos, Rindex);
 630   mr_if_needed(R4_ARG2, Rarray);
 631   load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
 632   mtctr(Rtmp);
 633   bctr();
 634 
 635   if (!ImplicitNullChecks) {
 636     bind(LisNull);
 637     load_dispatch_table(Rtmp, (address*)Interpreter::_throw_NullPointerException_entry);
 638     mtctr(Rtmp);
 639     bctr();
 640   }
 641 
 642   align(32, 16);
 643   bind(LnotOOR);
 644 
 645   // Calc address
 646   add(Rres, RsxtIndex, Rarray);
 647 }
 648 
 649 void InterpreterMacroAssembler::index_check(Register array, Register index,
 650                                             int index_shift, Register tmp, Register res) {
 651   // pop array
 652   pop_ptr(array);
 653 
 654   // check array
 655   index_check_without_pop(array, index, index_shift, tmp, res);
 656 }
 657 
 658 void InterpreterMacroAssembler::get_const(Register Rdst) {
 659   ld(Rdst, in_bytes(Method::const_offset()), R19_method);
 660 }
 661 
 662 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
 663   get_const(Rdst);
 664   ld(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst);
 665 }
 666 
 667 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
 668   get_constant_pool(Rdst);
 669   ld(Rdst, ConstantPool::cache_offset(), Rdst);
 670 }
 671 
 672 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
 673   get_constant_pool(Rcpool);
 674   ld(Rtags, ConstantPool::tags_offset(), Rcpool);
 675 }
 676 
 677 // Unlock if synchronized method.
 678 //
 679 // Unlock the receiver if this is a synchronized method.
 680 // Unlock any Java monitors from synchronized blocks.
 681 //
 682 // If there are locked Java monitors
 683 //   If throw_monitor_exception
 684 //     throws IllegalMonitorStateException
 685 //   Else if install_monitor_exception
 686 //     installs IllegalMonitorStateException
 687 //   Else
 688 //     no error processing
 689 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
 690                                                               bool throw_monitor_exception,
 691                                                               bool install_monitor_exception) {
 692   Label Lunlocked, Lno_unlock;
 693   {
 694     Register Rdo_not_unlock_flag = R11_scratch1;
 695     Register Raccess_flags       = R12_scratch2;
 696 
 697     // Check if synchronized method or unlocking prevented by
 698     // JavaThread::do_not_unlock_if_synchronized flag.
 699     lbz(Rdo_not_unlock_flag, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
 700     lwz(Raccess_flags, in_bytes(Method::access_flags_offset()), R19_method);
 701     li(R0, 0);
 702     stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); // reset flag
 703 
 704     push(state);
 705 
 706     // Skip if we don't have to unlock.
 707     rldicl_(R0, Raccess_flags, 64-JVM_ACC_SYNCHRONIZED_BIT, 63); // Extract bit and compare to 0.
 708     beq(CCR0, Lunlocked);
 709 
 710     cmpwi(CCR0, Rdo_not_unlock_flag, 0);
 711     bne(CCR0, Lno_unlock);
 712   }
 713 
 714   // Unlock
 715   {
 716     Register Rmonitor_base = R11_scratch1;
 717 
 718     Label Lunlock;
 719     // If it's still locked, everything is ok, unlock it.
 720     ld(Rmonitor_base, 0, R1_SP);
 721     addi(Rmonitor_base, Rmonitor_base,
 722          -(frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
 723 
 724     ld(R0, BasicObjectLock::obj_offset(), Rmonitor_base);
 725     cmpdi(CCR0, R0, 0);
 726     bne(CCR0, Lunlock);
 727 
 728     // If it's already unlocked, throw exception.
 729     if (throw_monitor_exception) {
 730       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
 731       should_not_reach_here();
 732     } else {
 733       if (install_monitor_exception) {
 734         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
 735         b(Lunlocked);
 736       }
 737     }
 738 
 739     bind(Lunlock);
 740     unlock_object(Rmonitor_base);
 741   }
 742 
 743   // Check that all other monitors are unlocked. Throw IllegelMonitorState exception if not.
 744   bind(Lunlocked);
 745   {
 746     Label Lexception, Lrestart;
 747     Register Rcurrent_obj_addr = R11_scratch1;
 748     const int delta = frame::interpreter_frame_monitor_size_in_bytes();
 749     assert((delta & LongAlignmentMask) == 0, "sizeof BasicObjectLock must be even number of doublewords");
 750 
 751     bind(Lrestart);
 752     // Set up search loop: Calc num of iterations.
 753     {
 754       Register Riterations = R12_scratch2;
 755       Register Rmonitor_base = Rcurrent_obj_addr;
 756       ld(Rmonitor_base, 0, R1_SP);
 757       addi(Rmonitor_base, Rmonitor_base, - frame::ijava_state_size);  // Monitor base
 758 
 759       subf_(Riterations, R26_monitor, Rmonitor_base);
 760       ble(CCR0, Lno_unlock);
 761 
 762       addi(Rcurrent_obj_addr, Rmonitor_base,
 763            in_bytes(BasicObjectLock::obj_offset()) - frame::interpreter_frame_monitor_size_in_bytes());
 764       // Check if any monitor is on stack, bail out if not
 765       srdi(Riterations, Riterations, exact_log2(delta));
 766       mtctr(Riterations);
 767     }
 768 
 769     // The search loop: Look for locked monitors.
 770     {
 771       const Register Rcurrent_obj = R0;
 772       Label Lloop;
 773 
 774       ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
 775       addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta);
 776       bind(Lloop);
 777 
 778       // Check if current entry is used.
 779       cmpdi(CCR0, Rcurrent_obj, 0);
 780       bne(CCR0, Lexception);
 781       // Preload next iteration's compare value.
 782       ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
 783       addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta);
 784       bdnz(Lloop);
 785     }
 786     // Fell through: Everything's unlocked => finish.
 787     b(Lno_unlock);
 788 
 789     // An object is still locked => need to throw exception.
 790     bind(Lexception);
 791     if (throw_monitor_exception) {
 792       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
 793       should_not_reach_here();
 794     } else {
 795       // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
 796       // Unlock does not block, so don't have to worry about the frame.
 797       Register Rmonitor_addr = R11_scratch1;
 798       addi(Rmonitor_addr, Rcurrent_obj_addr, -in_bytes(BasicObjectLock::obj_offset()) + delta);
 799       unlock_object(Rmonitor_addr);
 800       if (install_monitor_exception) {
 801         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
 802       }
 803       b(Lrestart);
 804     }
 805   }
 806 
 807   align(32, 12);
 808   bind(Lno_unlock);
 809   pop(state);
 810 }
 811 
 812 // Support function for remove_activation & Co.
 813 void InterpreterMacroAssembler::merge_frames(Register Rsender_sp, Register return_pc,
 814                                              Register Rscratch1, Register Rscratch2) {
 815   // Pop interpreter frame.
 816   ld(Rscratch1, 0, R1_SP); // *SP
 817   ld(Rsender_sp, _ijava_state_neg(sender_sp), Rscratch1); // top_frame_sp
 818   ld(Rscratch2, 0, Rscratch1); // **SP
 819   if (return_pc!=noreg) {
 820     ld(return_pc, _abi0(lr), Rscratch1); // LR
 821   }
 822 
 823   // Merge top frames.
 824   subf(Rscratch1, R1_SP, Rsender_sp); // top_frame_sp - SP
 825   stdux(Rscratch2, R1_SP, Rscratch1); // atomically set *(SP = top_frame_sp) = **SP
 826 }
 827 
 828 void InterpreterMacroAssembler::narrow(Register result) {
 829   Register ret_type = R11_scratch1;
 830   ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
 831   lbz(ret_type, in_bytes(ConstMethod::result_type_offset()), R11_scratch1);
 832 
 833   Label notBool, notByte, notChar, done;
 834 
 835   // common case first
 836   cmpwi(CCR0, ret_type, T_INT);
 837   beq(CCR0, done);
 838 
 839   cmpwi(CCR0, ret_type, T_BOOLEAN);
 840   bne(CCR0, notBool);
 841   andi(result, result, 0x1);
 842   b(done);
 843 
 844   bind(notBool);
 845   cmpwi(CCR0, ret_type, T_BYTE);
 846   bne(CCR0, notByte);
 847   extsb(result, result);
 848   b(done);
 849 
 850   bind(notByte);
 851   cmpwi(CCR0, ret_type, T_CHAR);
 852   bne(CCR0, notChar);
 853   andi(result, result, 0xffff);
 854   b(done);
 855 
 856   bind(notChar);
 857   // cmpwi(CCR0, ret_type, T_SHORT);  // all that's left
 858   // bne(CCR0, done);
 859   extsh(result, result);
 860 
 861   // Nothing to do for T_INT
 862   bind(done);
 863 }
 864 
 865 // Remove activation.
 866 //
 867 // Apply stack watermark barrier.
 868 // Unlock the receiver if this is a synchronized method.
 869 // Unlock any Java monitors from synchronized blocks.
 870 // Remove the activation from the stack.
 871 //
 872 // If there are locked Java monitors
 873 //    If throw_monitor_exception
 874 //       throws IllegalMonitorStateException
 875 //    Else if install_monitor_exception
 876 //       installs IllegalMonitorStateException
 877 //    Else
 878 //       no error processing
 879 void InterpreterMacroAssembler::remove_activation(TosState state,
 880                                                   bool throw_monitor_exception,
 881                                                   bool install_monitor_exception) {
 882   BLOCK_COMMENT("remove_activation {");
 883 
 884   // The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
 885   // that would normally not be safe to use. Such bad returns into unsafe territory of
 886   // the stack, will call InterpreterRuntime::at_unwind.
 887   Label slow_path;
 888   Label fast_path;
 889   safepoint_poll(slow_path, R11_scratch1, true /* at_return */, false /* in_nmethod */);
 890   b(fast_path);
 891   bind(slow_path);
 892   push(state);
 893   set_last_Java_frame(R1_SP, noreg);
 894   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), R16_thread);
 895   reset_last_Java_frame();
 896   pop(state);
 897   align(32);
 898   bind(fast_path);
 899 
 900   unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
 901 
 902   // Save result (push state before jvmti call and pop it afterwards) and notify jvmti.
 903   notify_method_exit(false, state, NotifyJVMTI, true);
 904 
 905   BLOCK_COMMENT("reserved_stack_check:");
 906   if (StackReservedPages > 0) {
 907     // Test if reserved zone needs to be enabled.
 908     Label no_reserved_zone_enabling;
 909 
 910     // check if already enabled - if so no re-enabling needed
 911     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 912     lwz(R0, in_bytes(JavaThread::stack_guard_state_offset()), R16_thread);
 913     cmpwi(CCR0, R0, StackOverflow::stack_guard_enabled);
 914     beq_predict_taken(CCR0, no_reserved_zone_enabling);
 915 
 916     // Compare frame pointers. There is no good stack pointer, as with stack
 917     // frame compression we can get different SPs when we do calls. A subsequent
 918     // call could have a smaller SP, so that this compare succeeds for an
 919     // inner call of the method annotated with ReservedStack.
 920     ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
 921     ld_ptr(R11_scratch1, _abi0(callers_sp), R1_SP); // Load frame pointer.
 922     cmpld(CCR0, R11_scratch1, R0);
 923     blt_predict_taken(CCR0, no_reserved_zone_enabling);
 924 
 925     // Enable reserved zone again, throw stack overflow exception.
 926     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
 927     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError));
 928 
 929     should_not_reach_here();
 930 
 931     bind(no_reserved_zone_enabling);
 932   }
 933 
 934   verify_oop(R17_tos, state);
 935 
 936   merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
 937   mtlr(R0);
 938   pop_cont_fastpath();
 939   BLOCK_COMMENT("} remove_activation");
 940 }
 941 
 942 // Lock object
 943 //
 944 // Registers alive
 945 //   monitor - Address of the BasicObjectLock to be used for locking,
 946 //             which must be initialized with the object to lock.
 947 //   object  - Address of the object to be locked.
 948 //
 949 void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
 950   if (LockingMode == LM_MONITOR) {
 951     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
 952   } else {
 953     // template code (for LM_LEGACY):
 954     //
 955     // markWord displaced_header = obj->mark().set_unlocked();
 956     // monitor->lock()->set_displaced_header(displaced_header);
 957     // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
 958     //   // We stored the monitor address into the object's mark word.
 959     // } else if (THREAD->is_lock_owned((address)displaced_header))
 960     //   // Simple recursive case.
 961     //   monitor->lock()->set_displaced_header(nullptr);
 962     // } else {
 963     //   // Slow path.
 964     //   InterpreterRuntime::monitorenter(THREAD, monitor);
 965     // }
 966 
 967     const Register header           = R7_ARG5;
 968     const Register object_mark_addr = R8_ARG6;
 969     const Register current_header   = R9_ARG7;
 970     const Register tmp              = R10_ARG8;
 971 
 972     Label count_locking, done;
 973     Label cas_failed, slow_case;
 974 
 975     assert_different_registers(header, object_mark_addr, current_header, tmp);
 976 
 977     // markWord displaced_header = obj->mark().set_unlocked();
 978 
 979     // Load markWord from object into header.
 980     ld(header, oopDesc::mark_offset_in_bytes(), object);
 981 
 982     if (DiagnoseSyncOnValueBasedClasses != 0) {
 983       load_klass(tmp, object);
 984       lwz(tmp, in_bytes(Klass::access_flags_offset()), tmp);
 985       testbitdi(CCR0, R0, tmp, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
 986       bne(CCR0, slow_case);
 987     }
 988 
 989     if (LockingMode == LM_LIGHTWEIGHT) {
 990       fast_lock(object, /* mark word */ header, tmp, slow_case);
 991       b(count_locking);
 992     } else if (LockingMode == LM_LEGACY) {
 993 
 994       // Set displaced_header to be (markWord of object | UNLOCK_VALUE).
 995       ori(header, header, markWord::unlocked_value);
 996 
 997       // monitor->lock()->set_displaced_header(displaced_header);
 998       const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 999       const int mark_offset = lock_offset +
1000                               BasicLock::displaced_header_offset_in_bytes();
1001 
1002       // Initialize the box (Must happen before we update the object mark!).
1003       std(header, mark_offset, monitor);
1004 
1005       // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
1006 
1007       // Store stack address of the BasicObjectLock (this is monitor) into object.
1008       addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
1009 
1010       // Must fence, otherwise, preceding store(s) may float below cmpxchg.
1011       // CmpxchgX sets CCR0 to cmpX(current, displaced).
1012       cmpxchgd(/*flag=*/CCR0,
1013                /*current_value=*/current_header,
1014                /*compare_value=*/header, /*exchange_value=*/monitor,
1015                /*where=*/object_mark_addr,
1016                MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
1017                MacroAssembler::cmpxchgx_hint_acquire_lock(),
1018                noreg,
1019                &cas_failed,
1020                /*check without membar and ldarx first*/true);
1021 
1022       // If the compare-and-exchange succeeded, then we found an unlocked
1023       // object and we have now locked it.
1024       b(count_locking);
1025       bind(cas_failed);
1026 
1027       // } else if (THREAD->is_lock_owned((address)displaced_header))
1028       //   // Simple recursive case.
1029       //   monitor->lock()->set_displaced_header(nullptr);
1030 
1031       // We did not see an unlocked object so try the fast recursive case.
1032 
1033       // Check if owner is self by comparing the value in the markWord of object
1034       // (current_header) with the stack pointer.
1035       sub(current_header, current_header, R1_SP);
1036 
1037       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1038       load_const_optimized(tmp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
1039 
1040       and_(R0/*==0?*/, current_header, tmp);
1041       // If condition is true we are done and hence we can store 0 in the displaced
1042       // header indicating it is a recursive lock.
1043       bne(CCR0, slow_case);
1044       std(R0/*==0!*/, mark_offset, monitor);
1045       b(count_locking);
1046     }
1047 
1048     // } else {
1049     //   // Slow path.
1050     //   InterpreterRuntime::monitorenter(THREAD, monitor);
1051 
1052     // None of the above fast optimizations worked so we have to get into the
1053     // slow case of monitor enter.
1054     bind(slow_case);
1055     if (LockingMode == LM_LIGHTWEIGHT) {
1056       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj), object);
1057     } else {
1058       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
1059     }
1060     b(done);
1061     // }
1062     align(32, 12);
1063     bind(count_locking);
1064     inc_held_monitor_count(current_header /*tmp*/);
1065     bind(done);
1066   }
1067 }
1068 
1069 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
1070 //
1071 // Registers alive
1072 //   monitor - Address of the BasicObjectLock to be used for locking,
1073 //             which must be initialized with the object to lock.
1074 //
1075 // Throw IllegalMonitorException if object is not locked by current thread.
1076 void InterpreterMacroAssembler::unlock_object(Register monitor) {
1077   if (LockingMode == LM_MONITOR) {
1078     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
1079   } else {
1080 
1081     // template code (for LM_LEGACY):
1082     //
1083     // if ((displaced_header = monitor->displaced_header()) == nullptr) {
1084     //   // Recursive unlock. Mark the monitor unlocked by setting the object field to null.
1085     //   monitor->set_obj(nullptr);
1086     // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
1087     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
1088     //   monitor->set_obj(nullptr);
1089     // } else {
1090     //   // Slow path.
1091     //   InterpreterRuntime::monitorexit(monitor);
1092     // }
1093 
1094     const Register object           = R7_ARG5;
1095     const Register header           = R8_ARG6;
1096     const Register object_mark_addr = R9_ARG7;
1097     const Register current_header   = R10_ARG8;
1098 
1099     Label free_slot;
1100     Label slow_case;
1101 
1102     assert_different_registers(object, header, object_mark_addr, current_header);
1103 
1104     if (LockingMode != LM_LIGHTWEIGHT) {
1105       // Test first if we are in the fast recursive case.
1106       ld(header, in_bytes(BasicObjectLock::lock_offset()) +
1107                  BasicLock::displaced_header_offset_in_bytes(), monitor);
1108 
1109       // If the displaced header is zero, we have a recursive unlock.
1110       cmpdi(CCR0, header, 0);
1111       beq(CCR0, free_slot); // recursive unlock
1112     }
1113 
1114     // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
1115     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
1116     //   monitor->set_obj(nullptr);
1117 
1118     // If we still have a lightweight lock, unlock the object and be done.
1119 
1120     // The object address from the monitor is in object.
1121     ld(object, in_bytes(BasicObjectLock::obj_offset()), monitor);
1122 
1123     if (LockingMode == LM_LIGHTWEIGHT) {
1124       // Check for non-symmetric locking. This is allowed by the spec and the interpreter
1125       // must handle it.
1126       Register tmp = current_header;
1127       // First check for lock-stack underflow.
1128       lwz(tmp, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
1129       cmplwi(CCR0, tmp, (unsigned)LockStack::start_offset());
1130       ble(CCR0, slow_case);
1131       // Then check if the top of the lock-stack matches the unlocked object.
1132       addi(tmp, tmp, -oopSize);
1133       ldx(tmp, tmp, R16_thread);
1134       cmpd(CCR0, tmp, object);
1135       bne(CCR0, slow_case);
1136 
1137       ld(header, oopDesc::mark_offset_in_bytes(), object);
1138       andi_(R0, header, markWord::monitor_value);
1139       bne(CCR0, slow_case);
1140       fast_unlock(object, header, slow_case);
1141     } else {
1142       addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
1143 
1144       // We have the displaced header in displaced_header. If the lock is still
1145       // lightweight, it will contain the monitor address and we'll store the
1146       // displaced header back into the object's mark word.
1147       // CmpxchgX sets CCR0 to cmpX(current, monitor).
1148       cmpxchgd(/*flag=*/CCR0,
1149                /*current_value=*/current_header,
1150                /*compare_value=*/monitor, /*exchange_value=*/header,
1151                /*where=*/object_mark_addr,
1152                MacroAssembler::MemBarRel,
1153                MacroAssembler::cmpxchgx_hint_release_lock(),
1154                noreg,
1155                &slow_case);
1156     }
1157     b(free_slot);
1158 
1159     // } else {
1160     //   // Slow path.
1161     //   InterpreterRuntime::monitorexit(monitor);
1162 
1163     // The lock has been converted into a heavy lock and hence
1164     // we need to get into the slow case.
1165     bind(slow_case);
1166     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
1167     // }
1168 
1169     Label done;
1170     b(done); // Monitor register may be overwritten! Runtime has already freed the slot.
1171 
1172     // Exchange worked, do monitor->set_obj(nullptr);
1173     align(32, 12);
1174     bind(free_slot);
1175     li(R0, 0);
1176     std(R0, in_bytes(BasicObjectLock::obj_offset()), monitor);
1177     dec_held_monitor_count(current_header /*tmp*/);
1178     bind(done);
1179   }
1180 }
1181 
1182 // Load compiled (i2c) or interpreter entry when calling from interpreted and
1183 // do the call. Centralized so that all interpreter calls will do the same actions.
1184 // If jvmti single stepping is on for a thread we must not call compiled code.
1185 //
1186 // Input:
1187 //   - Rtarget_method: method to call
1188 //   - Rret_addr:      return address
1189 //   - 2 scratch regs
1190 //
1191 void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, Register Rret_addr,
1192                                                       Register Rscratch1, Register Rscratch2) {
1193   assert_different_registers(Rscratch1, Rscratch2, Rtarget_method, Rret_addr);
1194   // Assume we want to go compiled if available.
1195   const Register Rtarget_addr = Rscratch1;
1196   const Register Rinterp_only = Rscratch2;
1197 
1198   ld(Rtarget_addr, in_bytes(Method::from_interpreted_offset()), Rtarget_method);
1199 
1200   if (JvmtiExport::can_post_interpreter_events()) {
1201     lwz(Rinterp_only, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
1202 
1203     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
1204     // compiled code in threads for which the event is enabled. Check here for
1205     // interp_only_mode if these events CAN be enabled.
1206     Label done;
1207     cmpwi(CCR0, Rinterp_only, 0);
1208     beq(CCR0, done);
1209     ld(Rtarget_addr, in_bytes(Method::interpreter_entry_offset()), Rtarget_method);
1210     align(32, 12);
1211     bind(done);
1212   }
1213 
1214 #ifdef ASSERT
1215   {
1216     Label Lok;
1217     cmpdi(CCR0, Rtarget_addr, 0);
1218     bne(CCR0, Lok);
1219     stop("null entry point");
1220     bind(Lok);
1221   }
1222 #endif // ASSERT
1223 
1224   mr(R21_sender_SP, R1_SP);
1225 
1226   // Calc a precise SP for the call. The SP value we calculated in
1227   // generate_fixed_frame() is based on the max_stack() value, so we would waste stack space
1228   // if esp is not max. Also, the i2c adapter extends the stack space without restoring
1229   // our pre-calced value, so repeating calls via i2c would result in stack overflow.
1230   // Since esp already points to an empty slot, we just have to sub 1 additional slot
1231   // to meet the abi scratch requirements.
1232   // The max_stack pointer will get restored by means of the GR_Lmax_stack local in
1233   // the return entry of the interpreter.
1234   addi(Rscratch2, R15_esp, Interpreter::stackElementSize - frame::top_ijava_frame_abi_size);
1235   clrrdi(Rscratch2, Rscratch2, exact_log2(frame::alignment_in_bytes)); // round towards smaller address
1236   resize_frame_absolute(Rscratch2, Rscratch2, R0);
1237 
1238   mr_if_needed(R19_method, Rtarget_method);
1239   mtctr(Rtarget_addr);
1240   mtlr(Rret_addr);
1241 
1242   save_interpreter_state(Rscratch2);
1243 #ifdef ASSERT
1244   ld(Rscratch1, _ijava_state_neg(top_frame_sp), Rscratch2); // Rscratch2 contains fp
1245   sldi(Rscratch1, Rscratch1, Interpreter::logStackElementSize);
1246   add(Rscratch1, Rscratch1, Rscratch2); // Rscratch2 contains fp
1247   // Compare sender_sp with the derelativized top_frame_sp
1248   cmpd(CCR0, R21_sender_SP, Rscratch1);
1249   asm_assert_eq("top_frame_sp incorrect");
1250 #endif
1251 
1252   bctr();
1253 }
1254 
1255 // Set the method data pointer for the current bcp.
1256 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1257   assert(ProfileInterpreter, "must be profiling interpreter");
1258   Label get_continue;
1259   ld(R28_mdx, in_bytes(Method::method_data_offset()), R19_method);
1260   test_method_data_pointer(get_continue);
1261   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), R19_method, R14_bcp);
1262 
1263   addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
1264   add(R28_mdx, R28_mdx, R3_RET);
1265   bind(get_continue);
1266 }
1267 
1268 // Test ImethodDataPtr. If it is null, continue at the specified label.
1269 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1270   assert(ProfileInterpreter, "must be profiling interpreter");
1271   cmpdi(CCR0, R28_mdx, 0);
1272   beq(CCR0, zero_continue);
1273 }
1274 
1275 void InterpreterMacroAssembler::verify_method_data_pointer() {
1276   assert(ProfileInterpreter, "must be profiling interpreter");
1277 #ifdef ASSERT
1278   Label verify_continue;
1279   test_method_data_pointer(verify_continue);
1280 
1281   // If the mdp is valid, it will point to a DataLayout header which is
1282   // consistent with the bcp. The converse is highly probable also.
1283   lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
1284   ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
1285   addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
1286   add(R11_scratch1, R12_scratch2, R12_scratch2);
1287   cmpd(CCR0, R11_scratch1, R14_bcp);
1288   beq(CCR0, verify_continue);
1289 
1290   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
1291 
1292   bind(verify_continue);
1293 #endif
1294 }
1295 
1296 // Store a value at some constant offset from the method data pointer.
1297 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1298   assert(ProfileInterpreter, "must be profiling interpreter");
1299 
1300   std(value, constant, R28_mdx);
1301 }
1302 
1303 // Increment the value at some constant offset from the method data pointer.
1304 void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
1305                                                       Register counter_addr,
1306                                                       Register Rbumped_count,
1307                                                       bool decrement) {
1308   // Locate the counter at a fixed offset from the mdp:
1309   addi(counter_addr, R28_mdx, constant);
1310   increment_mdp_data_at(counter_addr, Rbumped_count, decrement);
1311 }
1312 
1313 // Increment the value at some non-fixed (reg + constant) offset from
1314 // the method data pointer.
1315 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
1316                                                       int constant,
1317                                                       Register scratch,
1318                                                       Register Rbumped_count,
1319                                                       bool decrement) {
1320   // Add the constant to reg to get the offset.
1321   add(scratch, R28_mdx, reg);
1322   // Then calculate the counter address.
1323   addi(scratch, scratch, constant);
1324   increment_mdp_data_at(scratch, Rbumped_count, decrement);
1325 }
1326 
1327 void InterpreterMacroAssembler::increment_mdp_data_at(Register counter_addr,
1328                                                       Register Rbumped_count,
1329                                                       bool decrement) {
1330   assert(ProfileInterpreter, "must be profiling interpreter");
1331 
1332   // Load the counter.
1333   ld(Rbumped_count, 0, counter_addr);
1334 
1335   if (decrement) {
1336     // Decrement the register. Set condition codes.
1337     addi(Rbumped_count, Rbumped_count, - DataLayout::counter_increment);
1338     // Store the decremented counter, if it is still negative.
1339     std(Rbumped_count, 0, counter_addr);
1340     // Note: add/sub overflow check are not ported, since 64 bit
1341     // calculation should never overflow.
1342   } else {
1343     // Increment the register. Set carry flag.
1344     addi(Rbumped_count, Rbumped_count, DataLayout::counter_increment);
1345     // Store the incremented counter.
1346     std(Rbumped_count, 0, counter_addr);
1347   }
1348 }
1349 
1350 // Set a flag value at the current method data pointer position.
1351 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,
1352                                                 Register scratch) {
1353   assert(ProfileInterpreter, "must be profiling interpreter");
1354   // Load the data header.
1355   lbz(scratch, in_bytes(DataLayout::flags_offset()), R28_mdx);
1356   // Set the flag.
1357   ori(scratch, scratch, flag_constant);
1358   // Store the modified header.
1359   stb(scratch, in_bytes(DataLayout::flags_offset()), R28_mdx);
1360 }
1361 
1362 // Test the location at some offset from the method data pointer.
1363 // If it is not equal to value, branch to the not_equal_continue Label.
1364 void InterpreterMacroAssembler::test_mdp_data_at(int offset,
1365                                                  Register value,
1366                                                  Label& not_equal_continue,
1367                                                  Register test_out) {
1368   assert(ProfileInterpreter, "must be profiling interpreter");
1369 
1370   ld(test_out, offset, R28_mdx);
1371   cmpd(CCR0,  value, test_out);
1372   bne(CCR0, not_equal_continue);
1373 }
1374 
1375 // Update the method data pointer by the displacement located at some fixed
1376 // offset from the method data pointer.
1377 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,
1378                                                      Register scratch) {
1379   assert(ProfileInterpreter, "must be profiling interpreter");
1380 
1381   ld(scratch, offset_of_disp, R28_mdx);
1382   add(R28_mdx, scratch, R28_mdx);
1383 }
1384 
1385 // Update the method data pointer by the displacement located at the
1386 // offset (reg + offset_of_disp).
1387 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,
1388                                                      int offset_of_disp,
1389                                                      Register scratch) {
1390   assert(ProfileInterpreter, "must be profiling interpreter");
1391 
1392   add(scratch, reg, R28_mdx);
1393   ld(scratch, offset_of_disp, scratch);
1394   add(R28_mdx, scratch, R28_mdx);
1395 }
1396 
1397 // Update the method data pointer by a simple constant displacement.
1398 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {
1399   assert(ProfileInterpreter, "must be profiling interpreter");
1400   addi(R28_mdx, R28_mdx, constant);
1401 }
1402 
1403 // Update the method data pointer for a _ret bytecode whose target
1404 // was not among our cached targets.
1405 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,
1406                                                    Register return_bci) {
1407   assert(ProfileInterpreter, "must be profiling interpreter");
1408 
1409   push(state);
1410   assert(return_bci->is_nonvolatile(), "need to protect return_bci");
1411   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
1412   pop(state);
1413 }
1414 
1415 // Increments the backedge counter.
1416 // Returns backedge counter + invocation counter in Rdst.
1417 void InterpreterMacroAssembler::increment_backedge_counter(const Register Rcounters, const Register Rdst,
1418                                                            const Register Rtmp1, Register Rscratch) {
1419   assert(UseCompiler, "incrementing must be useful");
1420   assert_different_registers(Rdst, Rtmp1);
1421   const Register invocation_counter = Rtmp1;
1422   const Register counter = Rdst;
1423   // TODO: PPC port: assert(4 == InvocationCounter::sz_counter(), "unexpected field size.");
1424 
1425   // Load backedge counter.
1426   lwz(counter, in_bytes(MethodCounters::backedge_counter_offset()) +
1427                in_bytes(InvocationCounter::counter_offset()), Rcounters);
1428   // Load invocation counter.
1429   lwz(invocation_counter, in_bytes(MethodCounters::invocation_counter_offset()) +
1430                           in_bytes(InvocationCounter::counter_offset()), Rcounters);
1431 
1432   // Add the delta to the backedge counter.
1433   addi(counter, counter, InvocationCounter::count_increment);
1434 
1435   // Mask the invocation counter.
1436   andi(invocation_counter, invocation_counter, InvocationCounter::count_mask_value);
1437 
1438   // Store new counter value.
1439   stw(counter, in_bytes(MethodCounters::backedge_counter_offset()) +
1440                in_bytes(InvocationCounter::counter_offset()), Rcounters);
1441   // Return invocation counter + backedge counter.
1442   add(counter, counter, invocation_counter);
1443 }
1444 
1445 // Count a taken branch in the bytecodes.
1446 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {
1447   if (ProfileInterpreter) {
1448     Label profile_continue;
1449 
1450     // If no method data exists, go to profile_continue.
1451     test_method_data_pointer(profile_continue);
1452 
1453     // We are taking a branch. Increment the taken count.
1454     increment_mdp_data_at(in_bytes(JumpData::taken_offset()), scratch, bumped_count);
1455 
1456     // The method data pointer needs to be updated to reflect the new target.
1457     update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);
1458     bind (profile_continue);
1459   }
1460 }
1461 
1462 // Count a not-taken branch in the bytecodes.
1463 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch1, Register scratch2) {
1464   if (ProfileInterpreter) {
1465     Label profile_continue;
1466 
1467     // If no method data exists, go to profile_continue.
1468     test_method_data_pointer(profile_continue);
1469 
1470     // We are taking a branch. Increment the not taken count.
1471     increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch1, scratch2);
1472 
1473     // The method data pointer needs to be updated to correspond to the
1474     // next bytecode.
1475     update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));
1476     bind (profile_continue);
1477   }
1478 }
1479 
1480 // Count a non-virtual call in the bytecodes.
1481 void InterpreterMacroAssembler::profile_call(Register scratch1, Register scratch2) {
1482   if (ProfileInterpreter) {
1483     Label profile_continue;
1484 
1485     // If no method data exists, go to profile_continue.
1486     test_method_data_pointer(profile_continue);
1487 
1488     // We are making a call. Increment the count.
1489     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1490 
1491     // The method data pointer needs to be updated to reflect the new target.
1492     update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));
1493     bind (profile_continue);
1494   }
1495 }
1496 
1497 // Count a final call in the bytecodes.
1498 void InterpreterMacroAssembler::profile_final_call(Register scratch1, Register scratch2) {
1499   if (ProfileInterpreter) {
1500     Label profile_continue;
1501 
1502     // If no method data exists, go to profile_continue.
1503     test_method_data_pointer(profile_continue);
1504 
1505     // We are making a call. Increment the count.
1506     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1507 
1508     // The method data pointer needs to be updated to reflect the new target.
1509     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1510     bind (profile_continue);
1511   }
1512 }
1513 
1514 // Count a virtual call in the bytecodes.
1515 void InterpreterMacroAssembler::profile_virtual_call(Register Rreceiver,
1516                                                      Register Rscratch1,
1517                                                      Register Rscratch2,
1518                                                      bool receiver_can_be_null) {
1519   if (!ProfileInterpreter) { return; }
1520   Label profile_continue;
1521 
1522   // If no method data exists, go to profile_continue.
1523   test_method_data_pointer(profile_continue);
1524 
1525   Label skip_receiver_profile;
1526   if (receiver_can_be_null) {
1527     Label not_null;
1528     cmpdi(CCR0, Rreceiver, 0);
1529     bne(CCR0, not_null);
1530     // We are making a call. Increment the count for null receiver.
1531     increment_mdp_data_at(in_bytes(CounterData::count_offset()), Rscratch1, Rscratch2);
1532     b(skip_receiver_profile);
1533     bind(not_null);
1534   }
1535 
1536   // Record the receiver type.
1537   record_klass_in_profile(Rreceiver, Rscratch1, Rscratch2, true);
1538   bind(skip_receiver_profile);
1539 
1540   // The method data pointer needs to be updated to reflect the new target.
1541   update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1542   bind (profile_continue);
1543 }
1544 
1545 void InterpreterMacroAssembler::profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2) {
1546   if (ProfileInterpreter) {
1547     Label profile_continue;
1548 
1549     // If no method data exists, go to profile_continue.
1550     test_method_data_pointer(profile_continue);
1551 
1552     int mdp_delta = in_bytes(BitData::bit_data_size());
1553     if (TypeProfileCasts) {
1554       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1555 
1556       // Record the object type.
1557       record_klass_in_profile(Rklass, Rscratch1, Rscratch2, false);
1558     }
1559 
1560     // The method data pointer needs to be updated.
1561     update_mdp_by_constant(mdp_delta);
1562 
1563     bind (profile_continue);
1564   }
1565 }
1566 
1567 void InterpreterMacroAssembler::profile_typecheck_failed(Register Rscratch1, Register Rscratch2) {
1568   if (ProfileInterpreter && TypeProfileCasts) {
1569     Label profile_continue;
1570 
1571     // If no method data exists, go to profile_continue.
1572     test_method_data_pointer(profile_continue);
1573 
1574     int count_offset = in_bytes(CounterData::count_offset());
1575     // Back up the address, since we have already bumped the mdp.
1576     count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1577 
1578     // *Decrement* the counter. We expect to see zero or small negatives.
1579     increment_mdp_data_at(count_offset, Rscratch1, Rscratch2, true);
1580 
1581     bind (profile_continue);
1582   }
1583 }
1584 
1585 // Count a ret in the bytecodes.
1586 void InterpreterMacroAssembler::profile_ret(TosState state, Register return_bci,
1587                                             Register scratch1, Register scratch2) {
1588   if (ProfileInterpreter) {
1589     Label profile_continue;
1590     uint row;
1591 
1592     // If no method data exists, go to profile_continue.
1593     test_method_data_pointer(profile_continue);
1594 
1595     // Update the total ret count.
1596     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2 );
1597 
1598     for (row = 0; row < RetData::row_limit(); row++) {
1599       Label next_test;
1600 
1601       // See if return_bci is equal to bci[n]:
1602       test_mdp_data_at(in_bytes(RetData::bci_offset(row)), return_bci, next_test, scratch1);
1603 
1604       // return_bci is equal to bci[n]. Increment the count.
1605       increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch1, scratch2);
1606 
1607       // The method data pointer needs to be updated to reflect the new target.
1608       update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch1);
1609       b(profile_continue);
1610       bind(next_test);
1611     }
1612 
1613     update_mdp_for_ret(state, return_bci);
1614 
1615     bind (profile_continue);
1616   }
1617 }
1618 
1619 // Count the default case of a switch construct.
1620 void InterpreterMacroAssembler::profile_switch_default(Register scratch1,  Register scratch2) {
1621   if (ProfileInterpreter) {
1622     Label profile_continue;
1623 
1624     // If no method data exists, go to profile_continue.
1625     test_method_data_pointer(profile_continue);
1626 
1627     // Update the default case count
1628     increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),
1629                           scratch1, scratch2);
1630 
1631     // The method data pointer needs to be updated.
1632     update_mdp_by_offset(in_bytes(MultiBranchData::default_displacement_offset()),
1633                          scratch1);
1634 
1635     bind (profile_continue);
1636   }
1637 }
1638 
1639 // Count the index'th case of a switch construct.
1640 void InterpreterMacroAssembler::profile_switch_case(Register index,
1641                                                     Register scratch1,
1642                                                     Register scratch2,
1643                                                     Register scratch3) {
1644   if (ProfileInterpreter) {
1645     assert_different_registers(index, scratch1, scratch2, scratch3);
1646     Label profile_continue;
1647 
1648     // If no method data exists, go to profile_continue.
1649     test_method_data_pointer(profile_continue);
1650 
1651     // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes().
1652     li(scratch3, in_bytes(MultiBranchData::case_array_offset()));
1653 
1654     assert (in_bytes(MultiBranchData::per_case_size()) == 16, "so that shladd works");
1655     sldi(scratch1, index, exact_log2(in_bytes(MultiBranchData::per_case_size())));
1656     add(scratch1, scratch1, scratch3);
1657 
1658     // Update the case count.
1659     increment_mdp_data_at(scratch1, in_bytes(MultiBranchData::relative_count_offset()), scratch2, scratch3);
1660 
1661     // The method data pointer needs to be updated.
1662     update_mdp_by_offset(scratch1, in_bytes(MultiBranchData::relative_displacement_offset()), scratch2);
1663 
1664     bind (profile_continue);
1665   }
1666 }
1667 
1668 void InterpreterMacroAssembler::profile_null_seen(Register Rscratch1, Register Rscratch2) {
1669   if (ProfileInterpreter) {
1670     assert_different_registers(Rscratch1, Rscratch2);
1671     Label profile_continue;
1672 
1673     // If no method data exists, go to profile_continue.
1674     test_method_data_pointer(profile_continue);
1675 
1676     set_mdp_flag_at(BitData::null_seen_byte_constant(), Rscratch1);
1677 
1678     // The method data pointer needs to be updated.
1679     int mdp_delta = in_bytes(BitData::bit_data_size());
1680     if (TypeProfileCasts) {
1681       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1682     }
1683     update_mdp_by_constant(mdp_delta);
1684 
1685     bind (profile_continue);
1686   }
1687 }
1688 
1689 void InterpreterMacroAssembler::record_klass_in_profile(Register Rreceiver,
1690                                                         Register Rscratch1, Register Rscratch2,
1691                                                         bool is_virtual_call) {
1692   assert(ProfileInterpreter, "must be profiling");
1693   assert_different_registers(Rreceiver, Rscratch1, Rscratch2);
1694 
1695   Label done;
1696   record_klass_in_profile_helper(Rreceiver, Rscratch1, Rscratch2, 0, done, is_virtual_call);
1697   bind (done);
1698 }
1699 
1700 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1701                                         Register receiver, Register scratch1, Register scratch2,
1702                                         int start_row, Label& done, bool is_virtual_call) {
1703   if (TypeProfileWidth == 0) {
1704     if (is_virtual_call) {
1705       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1706     }
1707     return;
1708   }
1709 
1710   int last_row = VirtualCallData::row_limit() - 1;
1711   assert(start_row <= last_row, "must be work left to do");
1712   // Test this row for both the receiver and for null.
1713   // Take any of three different outcomes:
1714   //   1. found receiver => increment count and goto done
1715   //   2. found null => keep looking for case 1, maybe allocate this cell
1716   //   3. found something else => keep looking for cases 1 and 2
1717   // Case 3 is handled by a recursive call.
1718   for (int row = start_row; row <= last_row; row++) {
1719     Label next_test;
1720     bool test_for_null_also = (row == start_row);
1721 
1722     // See if the receiver is receiver[n].
1723     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1724     test_mdp_data_at(recvr_offset, receiver, next_test, scratch1);
1725     // delayed()->tst(scratch);
1726 
1727     // The receiver is receiver[n]. Increment count[n].
1728     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1729     increment_mdp_data_at(count_offset, scratch1, scratch2);
1730     b(done);
1731     bind(next_test);
1732 
1733     if (test_for_null_also) {
1734       Label found_null;
1735       // Failed the equality check on receiver[n]... Test for null.
1736       if (start_row == last_row) {
1737         // The only thing left to do is handle the null case.
1738         if (is_virtual_call) {
1739           // Scratch1 contains test_out from test_mdp_data_at.
1740           cmpdi(CCR0, scratch1, 0);
1741           beq(CCR0, found_null);
1742           // Receiver did not match any saved receiver and there is no empty row for it.
1743           // Increment total counter to indicate polymorphic case.
1744           increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1745           b(done);
1746           bind(found_null);
1747         } else {
1748           cmpdi(CCR0, scratch1, 0);
1749           bne(CCR0, done);
1750         }
1751         break;
1752       }
1753       // Since null is rare, make it be the branch-taken case.
1754       cmpdi(CCR0, scratch1, 0);
1755       beq(CCR0, found_null);
1756 
1757       // Put all the "Case 3" tests here.
1758       record_klass_in_profile_helper(receiver, scratch1, scratch2, start_row + 1, done, is_virtual_call);
1759 
1760       // Found a null. Keep searching for a matching receiver,
1761       // but remember that this is an empty (unused) slot.
1762       bind(found_null);
1763     }
1764   }
1765 
1766   // In the fall-through case, we found no matching receiver, but we
1767   // observed the receiver[start_row] is null.
1768 
1769   // Fill in the receiver field and increment the count.
1770   int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1771   set_mdp_data_at(recvr_offset, receiver);
1772   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1773   li(scratch1, DataLayout::counter_increment);
1774   set_mdp_data_at(count_offset, scratch1);
1775   if (start_row > 0) {
1776     b(done);
1777   }
1778 }
1779 
1780 // Argument and return type profilig.
1781 // kills: tmp, tmp2, R0, CR0, CR1
1782 void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base,
1783                                                  RegisterOrConstant mdo_addr_offs,
1784                                                  Register tmp, Register tmp2) {
1785   Label do_nothing, do_update;
1786 
1787   // tmp2 = obj is allowed
1788   assert_different_registers(obj, mdo_addr_base, tmp, R0);
1789   assert_different_registers(tmp2, mdo_addr_base, tmp, R0);
1790   const Register klass = tmp2;
1791 
1792   verify_oop(obj);
1793 
1794   ld(tmp, mdo_addr_offs, mdo_addr_base);
1795 
1796   // Set null_seen if obj is 0.
1797   cmpdi(CCR0, obj, 0);
1798   ori(R0, tmp, TypeEntries::null_seen);
1799   beq(CCR0, do_update);
1800 
1801   load_klass(klass, obj);
1802 
1803   clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
1804   // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
1805   cmpd(CCR1, R0, klass);
1806   // Klass seen before, nothing to do (regardless of unknown bit).
1807   //beq(CCR1, do_nothing);
1808 
1809   andi_(R0, klass, TypeEntries::type_unknown);
1810   // Already unknown. Nothing to do anymore.
1811   //bne(CCR0, do_nothing);
1812   crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
1813   beq(CCR0, do_nothing);
1814 
1815   clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
1816   orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
1817   beq(CCR0, do_update); // First time here. Set profile type.
1818 
1819   // Different than before. Cannot keep accurate profile.
1820   ori(R0, tmp, TypeEntries::type_unknown);
1821 
1822   bind(do_update);
1823   // update profile
1824   std(R0, mdo_addr_offs, mdo_addr_base);
1825 
1826   align(32, 12);
1827   bind(do_nothing);
1828 }
1829 
1830 void InterpreterMacroAssembler::profile_arguments_type(Register callee,
1831                                                        Register tmp1, Register tmp2,
1832                                                        bool is_virtual) {
1833   if (!ProfileInterpreter) {
1834     return;
1835   }
1836 
1837   assert_different_registers(callee, tmp1, tmp2, R28_mdx);
1838 
1839   if (MethodData::profile_arguments() || MethodData::profile_return()) {
1840     Label profile_continue;
1841 
1842     test_method_data_pointer(profile_continue);
1843 
1844     int off_to_start = is_virtual ?
1845       in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
1846 
1847     lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
1848     cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
1849     bne(CCR0, profile_continue);
1850 
1851     if (MethodData::profile_arguments()) {
1852       Label done;
1853       int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
1854       addi(R28_mdx, R28_mdx, off_to_args);
1855 
1856       for (int i = 0; i < TypeProfileArgsLimit; i++) {
1857         if (i > 0 || MethodData::profile_return()) {
1858           // If return value type is profiled we may have no argument to profile.
1859           ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
1860           cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
1861           addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
1862           blt(CCR0, done);
1863         }
1864         ld(tmp1, in_bytes(Method::const_offset()), callee);
1865         lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
1866         // Stack offset o (zero based) from the start of the argument
1867         // list, for n arguments translates into offset n - o - 1 from
1868         // the end of the argument list. But there's an extra slot at
1869         // the top of the stack. So the offset is n - o from Lesp.
1870         ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx);
1871         subf(tmp1, tmp2, tmp1);
1872 
1873         sldi(tmp1, tmp1, Interpreter::logStackElementSize);
1874         ldx(tmp1, tmp1, R15_esp);
1875 
1876         profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1);
1877 
1878         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1879         addi(R28_mdx, R28_mdx, to_add);
1880         off_to_args += to_add;
1881       }
1882 
1883       if (MethodData::profile_return()) {
1884         ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
1885         addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1886       }
1887 
1888       bind(done);
1889 
1890       if (MethodData::profile_return()) {
1891         // We're right after the type profile for the last
1892         // argument. tmp1 is the number of cells left in the
1893         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1894         // if there's a return to profile.
1895         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(),
1896                "can't move past ret type");
1897         sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size));
1898         add(R28_mdx, tmp1, R28_mdx);
1899       }
1900     } else {
1901       assert(MethodData::profile_return(), "either profile call args or call ret");
1902       update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));
1903     }
1904 
1905     // Mdp points right after the end of the
1906     // CallTypeData/VirtualCallTypeData, right after the cells for the
1907     // return value type if there's one.
1908     align(32, 12);
1909     bind(profile_continue);
1910   }
1911 }
1912 
1913 void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {
1914   assert_different_registers(ret, tmp1, tmp2);
1915   if (ProfileInterpreter && MethodData::profile_return()) {
1916     Label profile_continue;
1917 
1918     test_method_data_pointer(profile_continue);
1919 
1920     if (MethodData::profile_return_jsr292_only()) {
1921       // If we don't profile all invoke bytecodes we must make sure
1922       // it's a bytecode we indeed profile. We can't go back to the
1923       // beginning of the ProfileData we intend to update to check its
1924       // type because we're right after it and we don't known its
1925       // length.
1926       lbz(tmp1, 0, R14_bcp);
1927       lbz(tmp2, in_bytes(Method::intrinsic_id_offset()), R19_method);
1928       cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
1929       cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
1930       cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
1931       cmpwi(CCR1, tmp2, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1932       cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
1933       bne(CCR0, profile_continue);
1934     }
1935 
1936     profile_obj_type(ret, R28_mdx, -in_bytes(SingleTypeEntry::size()), tmp1, tmp2);
1937 
1938     align(32, 12);
1939     bind(profile_continue);
1940   }
1941 }
1942 
1943 void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2,
1944                                                         Register tmp3, Register tmp4) {
1945   if (ProfileInterpreter && MethodData::profile_parameters()) {
1946     Label profile_continue, done;
1947 
1948     test_method_data_pointer(profile_continue);
1949 
1950     // Load the offset of the area within the MDO used for
1951     // parameters. If it's negative we're not profiling any parameters.
1952     lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
1953     cmpwi(CCR0, tmp1, 0);
1954     blt(CCR0, profile_continue);
1955 
1956     // Compute a pointer to the area for parameters from the offset
1957     // and move the pointer to the slot for the last
1958     // parameters. Collect profiling from last parameter down.
1959     // mdo start + parameters offset + array length - 1
1960 
1961     // Pointer to the parameter area in the MDO.
1962     const Register mdp = tmp1;
1963     add(mdp, tmp1, R28_mdx);
1964 
1965     // Offset of the current profile entry to update.
1966     const Register entry_offset = tmp2;
1967     // entry_offset = array len in number of cells
1968     ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp);
1969 
1970     int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
1971     assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");
1972 
1973     // entry_offset (number of cells)  = array len - size of 1 entry + offset of the stack slot field
1974     addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size));
1975     // entry_offset in bytes
1976     sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
1977 
1978     Label loop;
1979     align(32, 12);
1980     bind(loop);
1981 
1982     // Load offset on the stack from the slot for this parameter.
1983     ld(tmp3, entry_offset, mdp);
1984     sldi(tmp3, tmp3, Interpreter::logStackElementSize);
1985     neg(tmp3, tmp3);
1986     // Read the parameter from the local area.
1987     ldx(tmp3, tmp3, R18_locals);
1988 
1989     // Make entry_offset now point to the type field for this parameter.
1990     int type_base = in_bytes(ParametersTypeData::type_offset(0));
1991     assert(type_base > off_base, "unexpected");
1992     addi(entry_offset, entry_offset, type_base - off_base);
1993 
1994     // Profile the parameter.
1995     profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3);
1996 
1997     // Go to next parameter.
1998     int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
1999     cmpdi(CCR0, entry_offset, off_base + delta);
2000     addi(entry_offset, entry_offset, -delta);
2001     bge(CCR0, loop);
2002 
2003     align(32, 12);
2004     bind(profile_continue);
2005   }
2006 }
2007 
2008 // Add a InterpMonitorElem to stack (see frame_sparc.hpp).
2009 void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
2010 
2011   // Very-local scratch registers.
2012   const Register esp  = Rtemp1;
2013   const Register slot = Rtemp2;
2014 
2015   // Extracted monitor_size.
2016   int monitor_size = frame::interpreter_frame_monitor_size_in_bytes();
2017   assert(Assembler::is_aligned((unsigned int)monitor_size,
2018                                (unsigned int)frame::alignment_in_bytes),
2019          "size of a monitor must respect alignment of SP");
2020 
2021   resize_frame(-monitor_size, /*temp*/esp); // Allocate space for new monitor
2022   subf(Rtemp2, esp, R1_SP); // esp contains fp
2023   sradi(Rtemp2, Rtemp2, Interpreter::logStackElementSize);
2024   // Store relativized top_frame_sp
2025   std(Rtemp2, _ijava_state_neg(top_frame_sp), esp); // esp contains fp
2026 
2027   // Shuffle expression stack down. Recall that stack_base points
2028   // just above the new expression stack bottom. Old_tos and new_tos
2029   // are used to scan thru the old and new expression stacks.
2030   if (!stack_is_empty) {
2031     Label copy_slot, copy_slot_finished;
2032     const Register n_slots = slot;
2033 
2034     addi(esp, R15_esp, Interpreter::stackElementSize); // Point to first element (pre-pushed stack).
2035     subf(n_slots, esp, R26_monitor);
2036     srdi_(n_slots, n_slots, LogBytesPerWord);          // Compute number of slots to copy.
2037     assert(LogBytesPerWord == 3, "conflicts assembler instructions");
2038     beq(CCR0, copy_slot_finished);                     // Nothing to copy.
2039 
2040     mtctr(n_slots);
2041 
2042     // loop
2043     bind(copy_slot);
2044     ld(slot, 0, esp);              // Move expression stack down.
2045     std(slot, -monitor_size, esp); // distance = monitor_size
2046     addi(esp, esp, BytesPerWord);
2047     bdnz(copy_slot);
2048 
2049     bind(copy_slot_finished);
2050   }
2051 
2052   addi(R15_esp, R15_esp, -monitor_size);
2053   addi(R26_monitor, R26_monitor, -monitor_size);
2054 
2055   // Restart interpreter
2056 }
2057 
2058 // ============================================================================
2059 // Java locals access
2060 
2061 // Load a local variable at index in Rindex into register Rdst_value.
2062 // Also puts address of local into Rdst_address as a service.
2063 // Kills:
2064 //   - Rdst_value
2065 //   - Rdst_address
2066 void InterpreterMacroAssembler::load_local_int(Register Rdst_value, Register Rdst_address, Register Rindex) {
2067   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2068   subf(Rdst_address, Rdst_address, R18_locals);
2069   lwz(Rdst_value, 0, Rdst_address);
2070 }
2071 
2072 // Load a local variable at index in Rindex into register Rdst_value.
2073 // Also puts address of local into Rdst_address as a service.
2074 // Kills:
2075 //   - Rdst_value
2076 //   - Rdst_address
2077 void InterpreterMacroAssembler::load_local_long(Register Rdst_value, Register Rdst_address, Register Rindex) {
2078   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2079   subf(Rdst_address, Rdst_address, R18_locals);
2080   ld(Rdst_value, -8, Rdst_address);
2081 }
2082 
2083 // Load a local variable at index in Rindex into register Rdst_value.
2084 // Also puts address of local into Rdst_address as a service.
2085 // Input:
2086 //   - Rindex:      slot nr of local variable
2087 // Kills:
2088 //   - Rdst_value
2089 //   - Rdst_address
2090 void InterpreterMacroAssembler::load_local_ptr(Register Rdst_value,
2091                                                Register Rdst_address,
2092                                                Register Rindex) {
2093   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2094   subf(Rdst_address, Rdst_address, R18_locals);
2095   ld(Rdst_value, 0, Rdst_address);
2096 }
2097 
2098 // Load a local variable at index in Rindex into register Rdst_value.
2099 // Also puts address of local into Rdst_address as a service.
2100 // Kills:
2101 //   - Rdst_value
2102 //   - Rdst_address
2103 void InterpreterMacroAssembler::load_local_float(FloatRegister Rdst_value,
2104                                                  Register Rdst_address,
2105                                                  Register Rindex) {
2106   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2107   subf(Rdst_address, Rdst_address, R18_locals);
2108   lfs(Rdst_value, 0, Rdst_address);
2109 }
2110 
2111 // Load a local variable at index in Rindex into register Rdst_value.
2112 // Also puts address of local into Rdst_address as a service.
2113 // Kills:
2114 //   - Rdst_value
2115 //   - Rdst_address
2116 void InterpreterMacroAssembler::load_local_double(FloatRegister Rdst_value,
2117                                                   Register Rdst_address,
2118                                                   Register Rindex) {
2119   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2120   subf(Rdst_address, Rdst_address, R18_locals);
2121   lfd(Rdst_value, -8, Rdst_address);
2122 }
2123 
2124 // Store an int value at local variable slot Rindex.
2125 // Kills:
2126 //   - Rindex
2127 void InterpreterMacroAssembler::store_local_int(Register Rvalue, Register Rindex) {
2128   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2129   subf(Rindex, Rindex, R18_locals);
2130   stw(Rvalue, 0, Rindex);
2131 }
2132 
2133 // Store a long value at local variable slot Rindex.
2134 // Kills:
2135 //   - Rindex
2136 void InterpreterMacroAssembler::store_local_long(Register Rvalue, Register Rindex) {
2137   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2138   subf(Rindex, Rindex, R18_locals);
2139   std(Rvalue, -8, Rindex);
2140 }
2141 
2142 // Store an oop value at local variable slot Rindex.
2143 // Kills:
2144 //   - Rindex
2145 void InterpreterMacroAssembler::store_local_ptr(Register Rvalue, Register Rindex) {
2146   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2147   subf(Rindex, Rindex, R18_locals);
2148   std(Rvalue, 0, Rindex);
2149 }
2150 
2151 // Store an int value at local variable slot Rindex.
2152 // Kills:
2153 //   - Rindex
2154 void InterpreterMacroAssembler::store_local_float(FloatRegister Rvalue, Register Rindex) {
2155   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2156   subf(Rindex, Rindex, R18_locals);
2157   stfs(Rvalue, 0, Rindex);
2158 }
2159 
2160 // Store an int value at local variable slot Rindex.
2161 // Kills:
2162 //   - Rindex
2163 void InterpreterMacroAssembler::store_local_double(FloatRegister Rvalue, Register Rindex) {
2164   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2165   subf(Rindex, Rindex, R18_locals);
2166   stfd(Rvalue, -8, Rindex);
2167 }
2168 
2169 // Read pending exception from thread and jump to interpreter.
2170 // Throw exception entry if one if pending. Fall through otherwise.
2171 void InterpreterMacroAssembler::check_and_forward_exception(Register Rscratch1, Register Rscratch2) {
2172   assert_different_registers(Rscratch1, Rscratch2, R3);
2173   Register Rexception = Rscratch1;
2174   Register Rtmp       = Rscratch2;
2175   Label Ldone;
2176   // Get pending exception oop.
2177   ld(Rexception, thread_(pending_exception));
2178   cmpdi(CCR0, Rexception, 0);
2179   beq(CCR0, Ldone);
2180   li(Rtmp, 0);
2181   mr_if_needed(R3, Rexception);
2182   std(Rtmp, thread_(pending_exception)); // Clear exception in thread
2183   if (Interpreter::rethrow_exception_entry() != nullptr) {
2184     // Already got entry address.
2185     load_dispatch_table(Rtmp, (address*)Interpreter::rethrow_exception_entry());
2186   } else {
2187     // Dynamically load entry address.
2188     int simm16_rest = load_const_optimized(Rtmp, &Interpreter::_rethrow_exception_entry, R0, true);
2189     ld(Rtmp, simm16_rest, Rtmp);
2190   }
2191   mtctr(Rtmp);
2192   save_interpreter_state(Rtmp);
2193   bctr();
2194 
2195   align(32, 12);
2196   bind(Ldone);
2197 }
2198 
2199 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2200   save_interpreter_state(R11_scratch1);
2201 
2202   MacroAssembler::call_VM(oop_result, entry_point, false);
2203 
2204   restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
2205 
2206   check_and_handle_popframe(R11_scratch1);
2207   check_and_handle_earlyret(R11_scratch1);
2208   // Now check exceptions manually.
2209   if (check_exceptions) {
2210     check_and_forward_exception(R11_scratch1, R12_scratch2);
2211   }
2212 }
2213 
2214 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point,
2215                                         Register arg_1, bool check_exceptions) {
2216   // ARG1 is reserved for the thread.
2217   mr_if_needed(R4_ARG2, arg_1);
2218   call_VM(oop_result, entry_point, check_exceptions);
2219 }
2220 
2221 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point,
2222                                         Register arg_1, Register arg_2,
2223                                         bool check_exceptions) {
2224   // ARG1 is reserved for the thread.
2225   mr_if_needed(R4_ARG2, arg_1);
2226   assert(arg_2 != R4_ARG2, "smashed argument");
2227   mr_if_needed(R5_ARG3, arg_2);
2228   call_VM(oop_result, entry_point, check_exceptions);
2229 }
2230 
2231 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point,
2232                                         Register arg_1, Register arg_2, Register arg_3,
2233                                         bool check_exceptions) {
2234   // ARG1 is reserved for the thread.
2235   mr_if_needed(R4_ARG2, arg_1);
2236   assert(arg_2 != R4_ARG2, "smashed argument");
2237   mr_if_needed(R5_ARG3, arg_2);
2238   assert(arg_3 != R4_ARG2 && arg_3 != R5_ARG3, "smashed argument");
2239   mr_if_needed(R6_ARG4, arg_3);
2240   call_VM(oop_result, entry_point, check_exceptions);
2241 }
2242 
2243 void InterpreterMacroAssembler::save_interpreter_state(Register scratch) {
2244   ld(scratch, 0, R1_SP);
2245   std(R15_esp, _ijava_state_neg(esp), scratch);
2246   std(R14_bcp, _ijava_state_neg(bcp), scratch);
2247   std(R26_monitor, _ijava_state_neg(monitors), scratch);
2248   if (ProfileInterpreter) { std(R28_mdx, _ijava_state_neg(mdx), scratch); }
2249   // Other entries should be unchanged.
2250 }
2251 
2252 void InterpreterMacroAssembler::restore_interpreter_state(Register scratch, bool bcp_and_mdx_only, bool restore_top_frame_sp) {
2253   ld_ptr(scratch, _abi0(callers_sp), R1_SP);   // Load frame pointer.
2254   if (restore_top_frame_sp) {
2255     // After thawing the top frame of a continuation we reach here with frame::java_abi.
2256     // therefore we have to restore top_frame_sp before the assertion below.
2257     assert(!bcp_and_mdx_only, "chose other registers");
2258     Register tfsp = R18_locals;
2259     Register scratch2 = R26_monitor;
2260     ld(tfsp, _ijava_state_neg(top_frame_sp), scratch);
2261     // Derelativize top_frame_sp
2262     sldi(tfsp, tfsp, Interpreter::logStackElementSize);
2263     add(tfsp, tfsp, scratch);
2264     resize_frame_absolute(tfsp, scratch2, R0);
2265   }
2266   ld(R14_bcp, _ijava_state_neg(bcp), scratch); // Changed by VM code (exception).
2267   if (ProfileInterpreter) { ld(R28_mdx, _ijava_state_neg(mdx), scratch); } // Changed by VM code.
2268   if (!bcp_and_mdx_only) {
2269     // Following ones are Metadata.
2270     ld(R19_method, _ijava_state_neg(method), scratch);
2271     ld(R27_constPoolCache, _ijava_state_neg(cpoolCache), scratch);
2272     // Following ones are stack addresses and don't require reload.
2273     ld(R15_esp, _ijava_state_neg(esp), scratch);
2274     ld(R18_locals, _ijava_state_neg(locals), scratch);
2275     sldi(R18_locals, R18_locals, Interpreter::logStackElementSize);
2276     add(R18_locals, R18_locals, scratch);
2277     ld(R26_monitor, _ijava_state_neg(monitors), scratch);
2278   }
2279 #ifdef ASSERT
2280   {
2281     Label Lok;
2282     subf(R0, R1_SP, scratch);
2283     cmpdi(CCR0, R0, frame::top_ijava_frame_abi_size + frame::ijava_state_size);
2284     bge(CCR0, Lok);
2285     stop("frame too small (restore istate)");
2286     bind(Lok);
2287   }
2288 #endif
2289 }
2290 
2291 void InterpreterMacroAssembler::get_method_counters(Register method,
2292                                                     Register Rcounters,
2293                                                     Label& skip) {
2294   BLOCK_COMMENT("Load and ev. allocate counter object {");
2295   Label has_counters;
2296   ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
2297   cmpdi(CCR0, Rcounters, 0);
2298   bne(CCR0, has_counters);
2299   call_VM(noreg, CAST_FROM_FN_PTR(address,
2300                                   InterpreterRuntime::build_method_counters), method);
2301   ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
2302   cmpdi(CCR0, Rcounters, 0);
2303   beq(CCR0, skip); // No MethodCounters, OutOfMemory.
2304   BLOCK_COMMENT("} Load and ev. allocate counter object");
2305 
2306   bind(has_counters);
2307 }
2308 
2309 void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters,
2310                                                              Register iv_be_count,
2311                                                              Register Rtmp_r0) {
2312   assert(UseCompiler, "incrementing must be useful");
2313   Register invocation_count = iv_be_count;
2314   Register backedge_count   = Rtmp_r0;
2315   int delta = InvocationCounter::count_increment;
2316 
2317   // Load each counter in a register.
2318   //  ld(inv_counter, Rtmp);
2319   //  ld(be_counter, Rtmp2);
2320   int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() +
2321                                     InvocationCounter::counter_offset());
2322   int be_counter_offset  = in_bytes(MethodCounters::backedge_counter_offset() +
2323                                     InvocationCounter::counter_offset());
2324 
2325   BLOCK_COMMENT("Increment profiling counters {");
2326 
2327   // Load the backedge counter.
2328   lwz(backedge_count, be_counter_offset, Rcounters); // is unsigned int
2329   // Mask the backedge counter.
2330   andi(backedge_count, backedge_count, InvocationCounter::count_mask_value);
2331 
2332   // Load the invocation counter.
2333   lwz(invocation_count, inv_counter_offset, Rcounters); // is unsigned int
2334   // Add the delta to the invocation counter and store the result.
2335   addi(invocation_count, invocation_count, delta);
2336   // Store value.
2337   stw(invocation_count, inv_counter_offset, Rcounters);
2338 
2339   // Add invocation counter + backedge counter.
2340   add(iv_be_count, backedge_count, invocation_count);
2341 
2342   // Note that this macro must leave the backedge_count + invocation_count in
2343   // register iv_be_count!
2344   BLOCK_COMMENT("} Increment profiling counters");
2345 }
2346 
2347 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
2348   if (state == atos) { MacroAssembler::verify_oop(reg, FILE_AND_LINE); }
2349 }
2350 
2351 // Local helper function for the verify_oop_or_return_address macro.
2352 static bool verify_return_address(Method* m, int bci) {
2353 #ifndef PRODUCT
2354   address pc = (address)(m->constMethod()) + in_bytes(ConstMethod::codes_offset()) + bci;
2355   // Assume it is a valid return address if it is inside m and is preceded by a jsr.
2356   if (!m->contains(pc))                                            return false;
2357   address jsr_pc;
2358   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2359   if (*jsr_pc == Bytecodes::_jsr   && jsr_pc >= m->code_base())    return true;
2360   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2361   if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base())    return true;
2362 #endif // PRODUCT
2363   return false;
2364 }
2365 
2366 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2367   if (VerifyFPU) {
2368     unimplemented("verfiyFPU");
2369   }
2370 }
2371 
2372 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2373   if (!VerifyOops) return;
2374 
2375   // The VM documentation for the astore[_wide] bytecode allows
2376   // the TOS to be not only an oop but also a return address.
2377   Label test;
2378   Label skip;
2379   // See if it is an address (in the current method):
2380 
2381   const int log2_bytecode_size_limit = 16;
2382   srdi_(Rtmp, reg, log2_bytecode_size_limit);
2383   bne(CCR0, test);
2384 
2385   address fd = CAST_FROM_FN_PTR(address, verify_return_address);
2386   const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
2387   save_volatile_gprs(R1_SP, -nbytes_save); // except R0
2388   save_LR_CR(Rtmp); // Save in old frame.
2389   push_frame_reg_args(nbytes_save, Rtmp);
2390 
2391   load_const_optimized(Rtmp, fd, R0);
2392   mr_if_needed(R4_ARG2, reg);
2393   mr(R3_ARG1, R19_method);
2394   call_c(Rtmp); // call C
2395 
2396   pop_frame();
2397   restore_LR_CR(Rtmp);
2398   restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
2399   b(skip);
2400 
2401   // Perform a more elaborate out-of-line call.
2402   // Not an address; verify it:
2403   bind(test);
2404   verify_oop(reg);
2405   bind(skip);
2406 }
2407 
2408 // Inline assembly for:
2409 //
2410 // if (thread is in interp_only_mode) {
2411 //   InterpreterRuntime::post_method_entry();
2412 // }
2413 // if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ) ||
2414 //     *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2)   ) {
2415 //   SharedRuntime::jvmpi_method_entry(method, receiver);
2416 // }
2417 void InterpreterMacroAssembler::notify_method_entry() {
2418   // JVMTI
2419   // Whenever JVMTI puts a thread in interp_only_mode, method
2420   // entry/exit events are sent for that thread to track stack
2421   // depth. If it is possible to enter interp_only_mode we add
2422   // the code to check if the event should be sent.
2423   if (JvmtiExport::can_post_interpreter_events()) {
2424     Label jvmti_post_done;
2425 
2426     lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
2427     cmpwi(CCR0, R0, 0);
2428     beq(CCR0, jvmti_post_done);
2429     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2430 
2431     bind(jvmti_post_done);
2432   }
2433 }
2434 
2435 // Inline assembly for:
2436 //
2437 // if (thread is in interp_only_mode) {
2438 //   // save result
2439 //   InterpreterRuntime::post_method_exit();
2440 //   // restore result
2441 // }
2442 // if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT)) {
2443 //   // save result
2444 //   SharedRuntime::jvmpi_method_exit();
2445 //   // restore result
2446 // }
2447 //
2448 // Native methods have their result stored in d_tmp and l_tmp.
2449 // Java methods have their result stored in the expression stack.
2450 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state,
2451                                                    NotifyMethodExitMode mode, bool check_exceptions) {
2452   // JVMTI
2453   // Whenever JVMTI puts a thread in interp_only_mode, method
2454   // entry/exit events are sent for that thread to track stack
2455   // depth. If it is possible to enter interp_only_mode we add
2456   // the code to check if the event should be sent.
2457   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2458     Label jvmti_post_done;
2459 
2460     lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
2461     cmpwi(CCR0, R0, 0);
2462     beq(CCR0, jvmti_post_done);
2463     if (!is_native_method) { push(state); } // Expose tos to GC.
2464     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit), check_exceptions);
2465     if (!is_native_method) { pop(state); }
2466 
2467     align(32, 12);
2468     bind(jvmti_post_done);
2469   }
2470 
2471   // Dtrace support not implemented.
2472 }