1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2023 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_ppc.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "oops/methodCounters.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/resolvedFieldEntry.hpp"
  36 #include "oops/resolvedIndyEntry.hpp"
  37 #include "oops/resolvedMethodEntry.hpp"
  38 #include "prims/jvmtiExport.hpp"
  39 #include "prims/jvmtiThreadState.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/vm_version.hpp"
  44 #include "utilities/macros.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 
  47 // Implementation of InterpreterMacroAssembler.
  48 
  49 // This file specializes the assembler with interpreter-specific macros.
  50 
  51 #ifdef PRODUCT
  52 #define BLOCK_COMMENT(str) // nothing
  53 #else
  54 #define BLOCK_COMMENT(str) block_comment(str)
  55 #endif
  56 
  57 void InterpreterMacroAssembler::null_check_throw(Register a, int offset, Register temp_reg) {
  58   address exception_entry = Interpreter::throw_NullPointerException_entry();
  59   MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry);
  60 }
  61 
  62 void InterpreterMacroAssembler::load_klass_check_null_throw(Register dst, Register src, Register temp_reg) {
  63   null_check_throw(src, oopDesc::klass_offset_in_bytes(), temp_reg);
  64   load_klass(dst, src);
  65 }
  66 
  67 void InterpreterMacroAssembler::jump_to_entry(address entry, Register Rscratch) {
  68   assert(entry, "Entry must have been generated by now");
  69   if (is_within_range_of_b(entry, pc())) {
  70     b(entry);
  71   } else {
  72     load_const_optimized(Rscratch, entry, R0);
  73     mtctr(Rscratch);
  74     bctr();
  75   }
  76 }
  77 
  78 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr, bool generate_poll) {
  79   Register bytecode = R12_scratch2;
  80   if (bcp_incr != 0) {
  81     lbzu(bytecode, bcp_incr, R14_bcp);
  82   } else {
  83     lbz(bytecode, 0, R14_bcp);
  84   }
  85 
  86   dispatch_Lbyte_code(state, bytecode, Interpreter::dispatch_table(state), generate_poll);
  87 }
  88 
  89 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
  90   // Load current bytecode.
  91   Register bytecode = R12_scratch2;
  92   lbz(bytecode, 0, R14_bcp);
  93   dispatch_Lbyte_code(state, bytecode, table);
  94 }
  95 
  96 // Dispatch code executed in the prolog of a bytecode which does not do it's
  97 // own dispatch. The dispatch address is computed and placed in R24_dispatch_addr.
  98 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
  99   Register bytecode = R12_scratch2;
 100   lbz(bytecode, bcp_incr, R14_bcp);
 101 
 102   load_dispatch_table(R24_dispatch_addr, Interpreter::dispatch_table(state));
 103 
 104   sldi(bytecode, bytecode, LogBytesPerWord);
 105   ldx(R24_dispatch_addr, R24_dispatch_addr, bytecode);
 106 }
 107 
 108 // Dispatch code executed in the epilog of a bytecode which does not do it's
 109 // own dispatch. The dispatch address in R24_dispatch_addr is used for the
 110 // dispatch.
 111 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
 112   if (bcp_incr) { addi(R14_bcp, R14_bcp, bcp_incr); }
 113   mtctr(R24_dispatch_addr);
 114   bcctr(bcondAlways, 0, bhintbhBCCTRisNotPredictable);
 115 }
 116 
 117 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
 118   assert(scratch_reg != R0, "can't use R0 as scratch_reg here");
 119   if (JvmtiExport::can_pop_frame()) {
 120     Label L;
 121 
 122     // Check the "pending popframe condition" flag in the current thread.
 123     lwz(scratch_reg, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
 124 
 125     // Initiate popframe handling only if it is not already being
 126     // processed. If the flag has the popframe_processing bit set, it
 127     // means that this code is called *during* popframe handling - we
 128     // don't want to reenter.
 129     andi_(R0, scratch_reg, JavaThread::popframe_pending_bit);
 130     beq(CCR0, L);
 131 
 132     andi_(R0, scratch_reg, JavaThread::popframe_processing_bit);
 133     bne(CCR0, L);
 134 
 135     // Call the Interpreter::remove_activation_preserving_args_entry()
 136     // func to get the address of the same-named entrypoint in the
 137     // generated interpreter code.
 138 #if defined(ABI_ELFv2)
 139     call_c(CAST_FROM_FN_PTR(address,
 140                             Interpreter::remove_activation_preserving_args_entry),
 141            relocInfo::none);
 142 #else
 143     call_c(CAST_FROM_FN_PTR(FunctionDescriptor*,
 144                             Interpreter::remove_activation_preserving_args_entry),
 145            relocInfo::none);
 146 #endif
 147 
 148     // Jump to Interpreter::_remove_activation_preserving_args_entry.
 149     mtctr(R3_RET);
 150     bctr();
 151 
 152     align(32, 12);
 153     bind(L);
 154   }
 155 }
 156 
 157 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
 158   const Register Rthr_state_addr = scratch_reg;
 159   if (JvmtiExport::can_force_early_return()) {
 160     Label Lno_early_ret;
 161     ld(Rthr_state_addr, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
 162     cmpdi(CCR0, Rthr_state_addr, 0);
 163     beq(CCR0, Lno_early_ret);
 164 
 165     lwz(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rthr_state_addr);
 166     cmpwi(CCR0, R0, JvmtiThreadState::earlyret_pending);
 167     bne(CCR0, Lno_early_ret);
 168 
 169     // Jump to Interpreter::_earlyret_entry.
 170     lwz(R3_ARG1, in_bytes(JvmtiThreadState::earlyret_tos_offset()), Rthr_state_addr);
 171     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry));
 172     mtlr(R3_RET);
 173     blr();
 174 
 175     align(32, 12);
 176     bind(Lno_early_ret);
 177   }
 178 }
 179 
 180 void InterpreterMacroAssembler::load_earlyret_value(TosState state, Register Rscratch1) {
 181   const Register RjvmtiState = Rscratch1;
 182   const Register Rscratch2   = R0;
 183 
 184   ld(RjvmtiState, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
 185   li(Rscratch2, 0);
 186 
 187   switch (state) {
 188     case atos: ld(R17_tos, in_bytes(JvmtiThreadState::earlyret_oop_offset()), RjvmtiState);
 189                std(Rscratch2, in_bytes(JvmtiThreadState::earlyret_oop_offset()), RjvmtiState);
 190                break;
 191     case ltos: ld(R17_tos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 192                break;
 193     case btos: // fall through
 194     case ztos: // fall through
 195     case ctos: // fall through
 196     case stos: // fall through
 197     case itos: lwz(R17_tos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 198                break;
 199     case ftos: lfs(F15_ftos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 200                break;
 201     case dtos: lfd(F15_ftos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 202                break;
 203     case vtos: break;
 204     default  : ShouldNotReachHere();
 205   }
 206 
 207   // Clean up tos value in the jvmti thread state.
 208   std(Rscratch2, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 209   // Set tos state field to illegal value.
 210   li(Rscratch2, ilgl);
 211   stw(Rscratch2, in_bytes(JvmtiThreadState::earlyret_tos_offset()), RjvmtiState);
 212 }
 213 
 214 // Common code to dispatch and dispatch_only.
 215 // Dispatch value in Lbyte_code and increment Lbcp.
 216 
 217 void InterpreterMacroAssembler::load_dispatch_table(Register dst, address* table) {
 218   address table_base = (address)Interpreter::dispatch_table((TosState)0);
 219   intptr_t table_offs = (intptr_t)table - (intptr_t)table_base;
 220   if (is_simm16(table_offs)) {
 221     addi(dst, R25_templateTableBase, (int)table_offs);
 222   } else {
 223     load_const_optimized(dst, table, R0);
 224   }
 225 }
 226 
 227 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register bytecode,
 228                                                     address* table, bool generate_poll) {
 229   assert_different_registers(bytecode, R11_scratch1);
 230 
 231   // Calc dispatch table address.
 232   load_dispatch_table(R11_scratch1, table);
 233 
 234   if (generate_poll) {
 235     address *sfpt_tbl = Interpreter::safept_table(state);
 236     if (table != sfpt_tbl) {
 237       Label dispatch;
 238       ld(R0, in_bytes(JavaThread::polling_word_offset()), R16_thread);
 239       // Armed page has poll_bit set, if poll bit is cleared just continue.
 240       andi_(R0, R0, SafepointMechanism::poll_bit());
 241       beq(CCR0, dispatch);
 242       load_dispatch_table(R11_scratch1, sfpt_tbl);
 243       align(32, 16);
 244       bind(dispatch);
 245     }
 246   }
 247 
 248   sldi(R12_scratch2, bytecode, LogBytesPerWord);
 249   ldx(R11_scratch1, R11_scratch1, R12_scratch2);
 250 
 251   // Jump off!
 252   mtctr(R11_scratch1);
 253   bcctr(bcondAlways, 0, bhintbhBCCTRisNotPredictable);
 254 }
 255 
 256 void InterpreterMacroAssembler::load_receiver(Register Rparam_count, Register Rrecv_dst) {
 257   sldi(Rrecv_dst, Rparam_count, Interpreter::logStackElementSize);
 258   ldx(Rrecv_dst, Rrecv_dst, R15_esp);
 259 }
 260 
 261 // helpers for expression stack
 262 
 263 void InterpreterMacroAssembler::pop_i(Register r) {
 264   lwzu(r, Interpreter::stackElementSize, R15_esp);
 265 }
 266 
 267 void InterpreterMacroAssembler::pop_ptr(Register r) {
 268   ldu(r, Interpreter::stackElementSize, R15_esp);
 269 }
 270 
 271 void InterpreterMacroAssembler::pop_l(Register r) {
 272   ld(r, Interpreter::stackElementSize, R15_esp);
 273   addi(R15_esp, R15_esp, 2 * Interpreter::stackElementSize);
 274 }
 275 
 276 void InterpreterMacroAssembler::pop_f(FloatRegister f) {
 277   lfsu(f, Interpreter::stackElementSize, R15_esp);
 278 }
 279 
 280 void InterpreterMacroAssembler::pop_d(FloatRegister f) {
 281   lfd(f, Interpreter::stackElementSize, R15_esp);
 282   addi(R15_esp, R15_esp, 2 * Interpreter::stackElementSize);
 283 }
 284 
 285 void InterpreterMacroAssembler::push_i(Register r) {
 286   stw(r, 0, R15_esp);
 287   addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
 288 }
 289 
 290 void InterpreterMacroAssembler::push_ptr(Register r) {
 291   std(r, 0, R15_esp);
 292   addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
 293 }
 294 
 295 void InterpreterMacroAssembler::push_l(Register r) {
 296   // Clear unused slot.
 297   load_const_optimized(R0, 0L);
 298   std(R0, 0, R15_esp);
 299   std(r, - Interpreter::stackElementSize, R15_esp);
 300   addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
 301 }
 302 
 303 void InterpreterMacroAssembler::push_f(FloatRegister f) {
 304   stfs(f, 0, R15_esp);
 305   addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
 306 }
 307 
 308 void InterpreterMacroAssembler::push_d(FloatRegister f)   {
 309   stfd(f, - Interpreter::stackElementSize, R15_esp);
 310   addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
 311 }
 312 
 313 void InterpreterMacroAssembler::push_2ptrs(Register first, Register second) {
 314   std(first, 0, R15_esp);
 315   std(second, -Interpreter::stackElementSize, R15_esp);
 316   addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
 317 }
 318 
 319 void InterpreterMacroAssembler::move_l_to_d(Register l, FloatRegister d) {
 320   if (VM_Version::has_mtfprd()) {
 321     mtfprd(d, l);
 322   } else {
 323     std(l, 0, R15_esp);
 324     lfd(d, 0, R15_esp);
 325   }
 326 }
 327 
 328 void InterpreterMacroAssembler::move_d_to_l(FloatRegister d, Register l) {
 329   if (VM_Version::has_mtfprd()) {
 330     mffprd(l, d);
 331   } else {
 332     stfd(d, 0, R15_esp);
 333     ld(l, 0, R15_esp);
 334   }
 335 }
 336 
 337 void InterpreterMacroAssembler::push(TosState state) {
 338   switch (state) {
 339     case atos: push_ptr();                break;
 340     case btos:
 341     case ztos:
 342     case ctos:
 343     case stos:
 344     case itos: push_i();                  break;
 345     case ltos: push_l();                  break;
 346     case ftos: push_f();                  break;
 347     case dtos: push_d();                  break;
 348     case vtos: /* nothing to do */        break;
 349     default  : ShouldNotReachHere();
 350   }
 351 }
 352 
 353 void InterpreterMacroAssembler::pop(TosState state) {
 354   switch (state) {
 355     case atos: pop_ptr();            break;
 356     case btos:
 357     case ztos:
 358     case ctos:
 359     case stos:
 360     case itos: pop_i();              break;
 361     case ltos: pop_l();              break;
 362     case ftos: pop_f();              break;
 363     case dtos: pop_d();              break;
 364     case vtos: /* nothing to do */   break;
 365     default  : ShouldNotReachHere();
 366   }
 367   verify_oop(R17_tos, state);
 368 }
 369 
 370 void InterpreterMacroAssembler::empty_expression_stack() {
 371   addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
 372 }
 373 
 374 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int         bcp_offset,
 375                                                           Register    Rdst,
 376                                                           signedOrNot is_signed) {
 377 #if defined(VM_LITTLE_ENDIAN)
 378   if (bcp_offset) {
 379     load_const_optimized(Rdst, bcp_offset);
 380     lhbrx(Rdst, R14_bcp, Rdst);
 381   } else {
 382     lhbrx(Rdst, R14_bcp);
 383   }
 384   if (is_signed == Signed) {
 385     extsh(Rdst, Rdst);
 386   }
 387 #else
 388   // Read Java big endian format.
 389   if (is_signed == Signed) {
 390     lha(Rdst, bcp_offset, R14_bcp);
 391   } else {
 392     lhz(Rdst, bcp_offset, R14_bcp);
 393   }
 394 #endif
 395 }
 396 
 397 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int         bcp_offset,
 398                                                           Register    Rdst,
 399                                                           signedOrNot is_signed) {
 400 #if defined(VM_LITTLE_ENDIAN)
 401   if (bcp_offset) {
 402     load_const_optimized(Rdst, bcp_offset);
 403     lwbrx(Rdst, R14_bcp, Rdst);
 404   } else {
 405     lwbrx(Rdst, R14_bcp);
 406   }
 407   if (is_signed == Signed) {
 408     extsw(Rdst, Rdst);
 409   }
 410 #else
 411   // Read Java big endian format.
 412   if (bcp_offset & 3) { // Offset unaligned?
 413     load_const_optimized(Rdst, bcp_offset);
 414     if (is_signed == Signed) {
 415       lwax(Rdst, R14_bcp, Rdst);
 416     } else {
 417       lwzx(Rdst, R14_bcp, Rdst);
 418     }
 419   } else {
 420     if (is_signed == Signed) {
 421       lwa(Rdst, bcp_offset, R14_bcp);
 422     } else {
 423       lwz(Rdst, bcp_offset, R14_bcp);
 424     }
 425   }
 426 #endif
 427 }
 428 
 429 
 430 // Load the constant pool cache index from the bytecode stream.
 431 //
 432 // Kills / writes:
 433 //   - Rdst, Rscratch
 434 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset,
 435                                                        size_t index_size) {
 436   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 437   // Cache index is always in the native format, courtesy of Rewriter.
 438   if (index_size == sizeof(u2)) {
 439     lhz(Rdst, bcp_offset, R14_bcp);
 440   } else if (index_size == sizeof(u4)) {
 441     if (bcp_offset & 3) {
 442       load_const_optimized(Rdst, bcp_offset);
 443       lwax(Rdst, R14_bcp, Rdst);
 444     } else {
 445       lwa(Rdst, bcp_offset, R14_bcp);
 446     }
 447     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
 448     nand(Rdst, Rdst, Rdst); // convert to plain index
 449   } else if (index_size == sizeof(u1)) {
 450     lbz(Rdst, bcp_offset, R14_bcp);
 451   } else {
 452     ShouldNotReachHere();
 453   }
 454   // Rdst now contains cp cache index.
 455 }
 456 
 457 // Load 4-byte signed or unsigned integer in Java format (that is, big-endian format)
 458 // from (Rsrc)+offset.
 459 void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
 460                                        signedOrNot is_signed) {
 461 #if defined(VM_LITTLE_ENDIAN)
 462   if (offset) {
 463     load_const_optimized(Rdst, offset);
 464     lwbrx(Rdst, Rdst, Rsrc);
 465   } else {
 466     lwbrx(Rdst, Rsrc);
 467   }
 468   if (is_signed == Signed) {
 469     extsw(Rdst, Rdst);
 470   }
 471 #else
 472   if (is_signed == Signed) {
 473     lwa(Rdst, offset, Rsrc);
 474   } else {
 475     lwz(Rdst, offset, Rsrc);
 476   }
 477 #endif
 478 }
 479 
 480 void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) {
 481   // Get index out of bytecode pointer
 482   get_cache_index_at_bcp(index, 1, sizeof(u4));
 483 
 484   // Get address of invokedynamic array
 485   ld_ptr(cache, in_bytes(ConstantPoolCache::invokedynamic_entries_offset()), R27_constPoolCache);
 486   // Scale the index to be the entry index * sizeof(ResolvedIndyEntry)
 487   sldi(index, index, log2i_exact(sizeof(ResolvedIndyEntry)));
 488   addi(cache, cache, Array<ResolvedIndyEntry>::base_offset_in_bytes());
 489   add(cache, cache, index);
 490 }
 491 
 492 void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) {
 493   // Get index out of bytecode pointer
 494   get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
 495   // Take shortcut if the size is a power of 2
 496   if (is_power_of_2(sizeof(ResolvedFieldEntry))) {
 497     // Scale index by power of 2
 498     sldi(index, index, log2i_exact(sizeof(ResolvedFieldEntry)));
 499   } else {
 500     // Scale the index to be the entry index * sizeof(ResolvedFieldEntry)
 501     mulli(index, index, sizeof(ResolvedFieldEntry));
 502   }
 503   // Get address of field entries array
 504   ld_ptr(cache, in_bytes(ConstantPoolCache::field_entries_offset()), R27_constPoolCache);
 505   addi(cache, cache, Array<ResolvedFieldEntry>::base_offset_in_bytes());
 506   add(cache, cache, index);
 507 }
 508 
 509 void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
 510   // Get index out of bytecode pointer
 511   get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
 512   // Scale the index to be the entry index * sizeof(ResolvedMethodEntry)
 513   mulli(index, index, sizeof(ResolvedMethodEntry));
 514 
 515   // Get address of field entries array
 516   ld_ptr(cache, ConstantPoolCache::method_entries_offset(), R27_constPoolCache);
 517   addi(cache, cache, Array<ResolvedMethodEntry>::base_offset_in_bytes());
 518   add(cache, cache, index); // method_entries + base_offset + scaled index
 519 }
 520 
 521 // Load object from cpool->resolved_references(index).
 522 // Kills:
 523 //   - index
 524 void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index,
 525                                                                  Register tmp1, Register tmp2,
 526                                                                  Label *L_handle_null) {
 527   assert_different_registers(result, index, tmp1, tmp2);
 528   assert(index->is_nonvolatile(), "needs to survive C-call in resolve_oop_handle");
 529   get_constant_pool(result);
 530 
 531   // Convert from field index to resolved_references() index and from
 532   // word index to byte offset. Since this is a java object, it can be compressed.
 533   sldi(index, index, LogBytesPerHeapOop);
 534   // Load pointer for resolved_references[] objArray.
 535   ld(result, ConstantPool::cache_offset(), result);
 536   ld(result, ConstantPoolCache::resolved_references_offset(), result);
 537   resolve_oop_handle(result, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE);
 538 #ifdef ASSERT
 539   Label index_ok;
 540   lwa(R0, arrayOopDesc::length_offset_in_bytes(), result);
 541   sldi(R0, R0, LogBytesPerHeapOop);
 542   cmpd(CCR0, index, R0);
 543   blt(CCR0, index_ok);
 544   stop("resolved reference index out of bounds");
 545   bind(index_ok);
 546 #endif
 547   // Add in the index.
 548   add(result, index, result);
 549   load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result,
 550                 tmp1, tmp2,
 551                 MacroAssembler::PRESERVATION_NONE,
 552                 0, L_handle_null);
 553 }
 554 
 555 // load cpool->resolved_klass_at(index)
 556 void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass) {
 557   // int value = *(Rcpool->int_at_addr(which));
 558   // int resolved_klass_index = extract_low_short_from_int(value);
 559   add(Roffset, Rcpool, Roffset);
 560 #if defined(VM_LITTLE_ENDIAN)
 561   lhz(Roffset, sizeof(ConstantPool), Roffset);     // Roffset = resolved_klass_index
 562 #else
 563   lhz(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
 564 #endif
 565 
 566   ld(Rklass, ConstantPool::resolved_klasses_offset(), Rcpool); // Rklass = Rcpool->_resolved_klasses
 567 
 568   sldi(Roffset, Roffset, LogBytesPerWord);
 569   addi(Roffset, Roffset, Array<Klass*>::base_offset_in_bytes());
 570   isync(); // Order load of instance Klass wrt. tags.
 571   ldx(Rklass, Rklass, Roffset);
 572 }
 573 
 574 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
 575 // a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
 576 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,
 577                                                   Register Rtmp2, Register Rtmp3, Label &ok_is_subtype) {
 578   // Profile the not-null value's klass.
 579   profile_typecheck(Rsub_klass, Rtmp1, Rtmp2);
 580   check_klass_subtype(Rsub_klass, Rsuper_klass, Rtmp1, Rtmp2, ok_is_subtype);
 581 }
 582 
 583 // Separate these two to allow for delay slot in middle.
 584 // These are used to do a test and full jump to exception-throwing code.
 585 
 586 // Check that index is in range for array, then shift index by index_shift,
 587 // and put arrayOop + shifted_index into res.
 588 // Note: res is still shy of address by array offset into object.
 589 
 590 void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Register Rindex,
 591                                                         int index_shift, Register Rtmp, Register Rres) {
 592   // Check that index is in range for array, then shift index by index_shift,
 593   // and put arrayOop + shifted_index into res.
 594   // Note: res is still shy of address by array offset into object.
 595   // Kills:
 596   //   - Rindex
 597   // Writes:
 598   //   - Rres: Address that corresponds to the array index if check was successful.
 599   verify_oop(Rarray);
 600   const Register Rlength   = R0;
 601   const Register RsxtIndex = Rtmp;
 602   Label LisNull, LnotOOR;
 603 
 604   // Array nullcheck
 605   if (!ImplicitNullChecks) {
 606     cmpdi(CCR0, Rarray, 0);
 607     beq(CCR0, LisNull);
 608   } else {
 609     null_check_throw(Rarray, arrayOopDesc::length_offset_in_bytes(), /*temp*/RsxtIndex);
 610   }
 611 
 612   // Rindex might contain garbage in upper bits (remember that we don't sign extend
 613   // during integer arithmetic operations). So kill them and put value into same register
 614   // where ArrayIndexOutOfBounds would expect the index in.
 615   rldicl(RsxtIndex, Rindex, 0, 32); // zero extend 32 bit -> 64 bit
 616 
 617   // Index check
 618   lwz(Rlength, arrayOopDesc::length_offset_in_bytes(), Rarray);
 619   cmplw(CCR0, Rindex, Rlength);
 620   sldi(RsxtIndex, RsxtIndex, index_shift);
 621   blt(CCR0, LnotOOR);
 622   // Index should be in R17_tos, array should be in R4_ARG2.
 623   mr_if_needed(R17_tos, Rindex);
 624   mr_if_needed(R4_ARG2, Rarray);
 625   load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
 626   mtctr(Rtmp);
 627   bctr();
 628 
 629   if (!ImplicitNullChecks) {
 630     bind(LisNull);
 631     load_dispatch_table(Rtmp, (address*)Interpreter::_throw_NullPointerException_entry);
 632     mtctr(Rtmp);
 633     bctr();
 634   }
 635 
 636   align(32, 16);
 637   bind(LnotOOR);
 638 
 639   // Calc address
 640   add(Rres, RsxtIndex, Rarray);
 641 }
 642 
 643 void InterpreterMacroAssembler::index_check(Register array, Register index,
 644                                             int index_shift, Register tmp, Register res) {
 645   // pop array
 646   pop_ptr(array);
 647 
 648   // check array
 649   index_check_without_pop(array, index, index_shift, tmp, res);
 650 }
 651 
 652 void InterpreterMacroAssembler::get_const(Register Rdst) {
 653   ld(Rdst, in_bytes(Method::const_offset()), R19_method);
 654 }
 655 
 656 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
 657   get_const(Rdst);
 658   ld(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst);
 659 }
 660 
 661 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
 662   get_constant_pool(Rdst);
 663   ld(Rdst, ConstantPool::cache_offset(), Rdst);
 664 }
 665 
 666 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
 667   get_constant_pool(Rcpool);
 668   ld(Rtags, ConstantPool::tags_offset(), Rcpool);
 669 }
 670 
 671 // Unlock if synchronized method.
 672 //
 673 // Unlock the receiver if this is a synchronized method.
 674 // Unlock any Java monitors from synchronized blocks.
 675 //
 676 // If there are locked Java monitors
 677 //   If throw_monitor_exception
 678 //     throws IllegalMonitorStateException
 679 //   Else if install_monitor_exception
 680 //     installs IllegalMonitorStateException
 681 //   Else
 682 //     no error processing
 683 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
 684                                                               bool throw_monitor_exception,
 685                                                               bool install_monitor_exception) {
 686   Label Lunlocked, Lno_unlock;
 687   {
 688     Register Rdo_not_unlock_flag = R11_scratch1;
 689     Register Raccess_flags       = R12_scratch2;
 690 
 691     // Check if synchronized method or unlocking prevented by
 692     // JavaThread::do_not_unlock_if_synchronized flag.
 693     lbz(Rdo_not_unlock_flag, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
 694     lwz(Raccess_flags, in_bytes(Method::access_flags_offset()), R19_method);
 695     li(R0, 0);
 696     stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); // reset flag
 697 
 698     push(state);
 699 
 700     // Skip if we don't have to unlock.
 701     rldicl_(R0, Raccess_flags, 64-JVM_ACC_SYNCHRONIZED_BIT, 63); // Extract bit and compare to 0.
 702     beq(CCR0, Lunlocked);
 703 
 704     cmpwi(CCR0, Rdo_not_unlock_flag, 0);
 705     bne(CCR0, Lno_unlock);
 706   }
 707 
 708   // Unlock
 709   {
 710     Register Rmonitor_base = R11_scratch1;
 711 
 712     Label Lunlock;
 713     // If it's still locked, everything is ok, unlock it.
 714     ld(Rmonitor_base, 0, R1_SP);
 715     addi(Rmonitor_base, Rmonitor_base,
 716          -(frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
 717 
 718     ld(R0, BasicObjectLock::obj_offset(), Rmonitor_base);
 719     cmpdi(CCR0, R0, 0);
 720     bne(CCR0, Lunlock);
 721 
 722     // If it's already unlocked, throw exception.
 723     if (throw_monitor_exception) {
 724       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
 725       should_not_reach_here();
 726     } else {
 727       if (install_monitor_exception) {
 728         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
 729         b(Lunlocked);
 730       }
 731     }
 732 
 733     bind(Lunlock);
 734     unlock_object(Rmonitor_base);
 735   }
 736 
 737   // Check that all other monitors are unlocked. Throw IllegelMonitorState exception if not.
 738   bind(Lunlocked);
 739   {
 740     Label Lexception, Lrestart;
 741     Register Rcurrent_obj_addr = R11_scratch1;
 742     const int delta = frame::interpreter_frame_monitor_size_in_bytes();
 743     assert((delta & LongAlignmentMask) == 0, "sizeof BasicObjectLock must be even number of doublewords");
 744 
 745     bind(Lrestart);
 746     // Set up search loop: Calc num of iterations.
 747     {
 748       Register Riterations = R12_scratch2;
 749       Register Rmonitor_base = Rcurrent_obj_addr;
 750       ld(Rmonitor_base, 0, R1_SP);
 751       addi(Rmonitor_base, Rmonitor_base, - frame::ijava_state_size);  // Monitor base
 752 
 753       subf_(Riterations, R26_monitor, Rmonitor_base);
 754       ble(CCR0, Lno_unlock);
 755 
 756       addi(Rcurrent_obj_addr, Rmonitor_base,
 757            in_bytes(BasicObjectLock::obj_offset()) - frame::interpreter_frame_monitor_size_in_bytes());
 758       // Check if any monitor is on stack, bail out if not
 759       srdi(Riterations, Riterations, exact_log2(delta));
 760       mtctr(Riterations);
 761     }
 762 
 763     // The search loop: Look for locked monitors.
 764     {
 765       const Register Rcurrent_obj = R0;
 766       Label Lloop;
 767 
 768       ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
 769       addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta);
 770       bind(Lloop);
 771 
 772       // Check if current entry is used.
 773       cmpdi(CCR0, Rcurrent_obj, 0);
 774       bne(CCR0, Lexception);
 775       // Preload next iteration's compare value.
 776       ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
 777       addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta);
 778       bdnz(Lloop);
 779     }
 780     // Fell through: Everything's unlocked => finish.
 781     b(Lno_unlock);
 782 
 783     // An object is still locked => need to throw exception.
 784     bind(Lexception);
 785     if (throw_monitor_exception) {
 786       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
 787       should_not_reach_here();
 788     } else {
 789       // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
 790       // Unlock does not block, so don't have to worry about the frame.
 791       Register Rmonitor_addr = R11_scratch1;
 792       addi(Rmonitor_addr, Rcurrent_obj_addr, -in_bytes(BasicObjectLock::obj_offset()) + delta);
 793       unlock_object(Rmonitor_addr);
 794       if (install_monitor_exception) {
 795         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
 796       }
 797       b(Lrestart);
 798     }
 799   }
 800 
 801   align(32, 12);
 802   bind(Lno_unlock);
 803   pop(state);
 804 }
 805 
 806 // Support function for remove_activation & Co.
 807 void InterpreterMacroAssembler::merge_frames(Register Rsender_sp, Register return_pc,
 808                                              Register Rscratch1, Register Rscratch2) {
 809   // Pop interpreter frame.
 810   ld(Rscratch1, 0, R1_SP); // *SP
 811   ld(Rsender_sp, _ijava_state_neg(sender_sp), Rscratch1); // top_frame_sp
 812   ld(Rscratch2, 0, Rscratch1); // **SP
 813   if (return_pc!=noreg) {
 814     ld(return_pc, _abi0(lr), Rscratch1); // LR
 815   }
 816 
 817   // Merge top frames.
 818   subf(Rscratch1, R1_SP, Rsender_sp); // top_frame_sp - SP
 819   stdux(Rscratch2, R1_SP, Rscratch1); // atomically set *(SP = top_frame_sp) = **SP
 820 }
 821 
 822 void InterpreterMacroAssembler::narrow(Register result) {
 823   Register ret_type = R11_scratch1;
 824   ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
 825   lbz(ret_type, in_bytes(ConstMethod::result_type_offset()), R11_scratch1);
 826 
 827   Label notBool, notByte, notChar, done;
 828 
 829   // common case first
 830   cmpwi(CCR0, ret_type, T_INT);
 831   beq(CCR0, done);
 832 
 833   cmpwi(CCR0, ret_type, T_BOOLEAN);
 834   bne(CCR0, notBool);
 835   andi(result, result, 0x1);
 836   b(done);
 837 
 838   bind(notBool);
 839   cmpwi(CCR0, ret_type, T_BYTE);
 840   bne(CCR0, notByte);
 841   extsb(result, result);
 842   b(done);
 843 
 844   bind(notByte);
 845   cmpwi(CCR0, ret_type, T_CHAR);
 846   bne(CCR0, notChar);
 847   andi(result, result, 0xffff);
 848   b(done);
 849 
 850   bind(notChar);
 851   // cmpwi(CCR0, ret_type, T_SHORT);  // all that's left
 852   // bne(CCR0, done);
 853   extsh(result, result);
 854 
 855   // Nothing to do for T_INT
 856   bind(done);
 857 }
 858 
 859 // Remove activation.
 860 //
 861 // Apply stack watermark barrier.
 862 // Unlock the receiver if this is a synchronized method.
 863 // Unlock any Java monitors from synchronized blocks.
 864 // Remove the activation from the stack.
 865 //
 866 // If there are locked Java monitors
 867 //    If throw_monitor_exception
 868 //       throws IllegalMonitorStateException
 869 //    Else if install_monitor_exception
 870 //       installs IllegalMonitorStateException
 871 //    Else
 872 //       no error processing
 873 void InterpreterMacroAssembler::remove_activation(TosState state,
 874                                                   bool throw_monitor_exception,
 875                                                   bool install_monitor_exception) {
 876   BLOCK_COMMENT("remove_activation {");
 877 
 878   // The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
 879   // that would normally not be safe to use. Such bad returns into unsafe territory of
 880   // the stack, will call InterpreterRuntime::at_unwind.
 881   Label slow_path;
 882   Label fast_path;
 883   safepoint_poll(slow_path, R11_scratch1, true /* at_return */, false /* in_nmethod */);
 884   b(fast_path);
 885   bind(slow_path);
 886   push(state);
 887   set_last_Java_frame(R1_SP, noreg);
 888   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), R16_thread);
 889   reset_last_Java_frame();
 890   pop(state);
 891   align(32);
 892   bind(fast_path);
 893 
 894   unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
 895 
 896   // Save result (push state before jvmti call and pop it afterwards) and notify jvmti.
 897   notify_method_exit(false, state, NotifyJVMTI, true);
 898 
 899   BLOCK_COMMENT("reserved_stack_check:");
 900   if (StackReservedPages > 0) {
 901     // Test if reserved zone needs to be enabled.
 902     Label no_reserved_zone_enabling;
 903 
 904     // check if already enabled - if so no re-enabling needed
 905     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 906     lwz(R0, in_bytes(JavaThread::stack_guard_state_offset()), R16_thread);
 907     cmpwi(CCR0, R0, StackOverflow::stack_guard_enabled);
 908     beq_predict_taken(CCR0, no_reserved_zone_enabling);
 909 
 910     // Compare frame pointers. There is no good stack pointer, as with stack
 911     // frame compression we can get different SPs when we do calls. A subsequent
 912     // call could have a smaller SP, so that this compare succeeds for an
 913     // inner call of the method annotated with ReservedStack.
 914     ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
 915     ld_ptr(R11_scratch1, _abi0(callers_sp), R1_SP); // Load frame pointer.
 916     cmpld(CCR0, R11_scratch1, R0);
 917     blt_predict_taken(CCR0, no_reserved_zone_enabling);
 918 
 919     // Enable reserved zone again, throw stack overflow exception.
 920     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
 921     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError));
 922 
 923     should_not_reach_here();
 924 
 925     bind(no_reserved_zone_enabling);
 926   }
 927 
 928   verify_oop(R17_tos, state);
 929 
 930   merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
 931   mtlr(R0);
 932   pop_cont_fastpath();
 933   BLOCK_COMMENT("} remove_activation");
 934 }
 935 
 936 // Lock object
 937 //
 938 // Registers alive
 939 //   monitor - Address of the BasicObjectLock to be used for locking,
 940 //             which must be initialized with the object to lock.
 941 //   object  - Address of the object to be locked.
 942 //
 943 void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
 944   if (LockingMode == LM_MONITOR) {
 945     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
 946   } else {
 947     // template code (for LM_LEGACY):
 948     //
 949     // markWord displaced_header = obj->mark().set_unlocked();
 950     // monitor->lock()->set_displaced_header(displaced_header);
 951     // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
 952     //   // We stored the monitor address into the object's mark word.
 953     // } else if (THREAD->is_lock_owned((address)displaced_header))
 954     //   // Simple recursive case.
 955     //   monitor->lock()->set_displaced_header(nullptr);
 956     // } else {
 957     //   // Slow path.
 958     //   InterpreterRuntime::monitorenter(THREAD, monitor);
 959     // }
 960 
 961     const Register header           = R7_ARG5;
 962     const Register object_mark_addr = R8_ARG6;
 963     const Register current_header   = R9_ARG7;
 964     const Register tmp              = R10_ARG8;
 965 
 966     Label count_locking, done;
 967     Label cas_failed, slow_case;
 968 
 969     assert_different_registers(header, object_mark_addr, current_header, tmp);
 970 
 971     // markWord displaced_header = obj->mark().set_unlocked();
 972 
 973     // Load markWord from object into header.
 974     ld(header, oopDesc::mark_offset_in_bytes(), object);
 975 
 976     if (DiagnoseSyncOnValueBasedClasses != 0) {
 977       load_klass(tmp, object);
 978       lwz(tmp, in_bytes(Klass::access_flags_offset()), tmp);
 979       testbitdi(CCR0, R0, tmp, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
 980       bne(CCR0, slow_case);
 981     }
 982 
 983     if (LockingMode == LM_LIGHTWEIGHT) {
 984       lightweight_lock(object, /* mark word */ header, tmp, slow_case);
 985       b(count_locking);
 986     } else if (LockingMode == LM_LEGACY) {
 987 
 988       // Set displaced_header to be (markWord of object | UNLOCK_VALUE).
 989       ori(header, header, markWord::unlocked_value);
 990 
 991       // monitor->lock()->set_displaced_header(displaced_header);
 992       const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
 993       const int mark_offset = lock_offset +
 994                               BasicLock::displaced_header_offset_in_bytes();
 995 
 996       // Initialize the box (Must happen before we update the object mark!).
 997       std(header, mark_offset, monitor);
 998 
 999       // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
1000 
1001       // Store stack address of the BasicObjectLock (this is monitor) into object.
1002       addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
1003 
1004       // Must fence, otherwise, preceding store(s) may float below cmpxchg.
1005       // CmpxchgX sets CCR0 to cmpX(current, displaced).
1006       cmpxchgd(/*flag=*/CCR0,
1007                /*current_value=*/current_header,
1008                /*compare_value=*/header, /*exchange_value=*/monitor,
1009                /*where=*/object_mark_addr,
1010                MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
1011                MacroAssembler::cmpxchgx_hint_acquire_lock(),
1012                noreg,
1013                &cas_failed,
1014                /*check without membar and ldarx first*/true);
1015 
1016       // If the compare-and-exchange succeeded, then we found an unlocked
1017       // object and we have now locked it.
1018       b(count_locking);
1019       bind(cas_failed);
1020 
1021       // } else if (THREAD->is_lock_owned((address)displaced_header))
1022       //   // Simple recursive case.
1023       //   monitor->lock()->set_displaced_header(nullptr);
1024 
1025       // We did not see an unlocked object so try the fast recursive case.
1026 
1027       // Check if owner is self by comparing the value in the markWord of object
1028       // (current_header) with the stack pointer.
1029       sub(current_header, current_header, R1_SP);
1030 
1031       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1032       load_const_optimized(tmp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
1033 
1034       and_(R0/*==0?*/, current_header, tmp);
1035       // If condition is true we are done and hence we can store 0 in the displaced
1036       // header indicating it is a recursive lock.
1037       bne(CCR0, slow_case);
1038       std(R0/*==0!*/, mark_offset, monitor);
1039       b(count_locking);
1040     }
1041 
1042     // } else {
1043     //   // Slow path.
1044     //   InterpreterRuntime::monitorenter(THREAD, monitor);
1045 
1046     // None of the above fast optimizations worked so we have to get into the
1047     // slow case of monitor enter.
1048     bind(slow_case);
1049     if (LockingMode == LM_LIGHTWEIGHT) {
1050       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj), object);
1051     } else {
1052       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
1053     }
1054     b(done);
1055     // }
1056     align(32, 12);
1057     bind(count_locking);
1058     inc_held_monitor_count(current_header /*tmp*/);
1059     bind(done);
1060   }
1061 }
1062 
1063 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
1064 //
1065 // Registers alive
1066 //   monitor - Address of the BasicObjectLock to be used for locking,
1067 //             which must be initialized with the object to lock.
1068 //
1069 // Throw IllegalMonitorException if object is not locked by current thread.
1070 void InterpreterMacroAssembler::unlock_object(Register monitor) {
1071   if (LockingMode == LM_MONITOR) {
1072     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
1073   } else {
1074 
1075     // template code (for LM_LEGACY):
1076     //
1077     // if ((displaced_header = monitor->displaced_header()) == nullptr) {
1078     //   // Recursive unlock. Mark the monitor unlocked by setting the object field to null.
1079     //   monitor->set_obj(nullptr);
1080     // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
1081     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
1082     //   monitor->set_obj(nullptr);
1083     // } else {
1084     //   // Slow path.
1085     //   InterpreterRuntime::monitorexit(monitor);
1086     // }
1087 
1088     const Register object           = R7_ARG5;
1089     const Register header           = R8_ARG6;
1090     const Register object_mark_addr = R9_ARG7;
1091     const Register current_header   = R10_ARG8;
1092 
1093     Label free_slot;
1094     Label slow_case;
1095 
1096     assert_different_registers(object, header, object_mark_addr, current_header);
1097 
1098     if (LockingMode != LM_LIGHTWEIGHT) {
1099       // Test first if we are in the fast recursive case.
1100       ld(header, in_bytes(BasicObjectLock::lock_offset()) +
1101                  BasicLock::displaced_header_offset_in_bytes(), monitor);
1102 
1103       // If the displaced header is zero, we have a recursive unlock.
1104       cmpdi(CCR0, header, 0);
1105       beq(CCR0, free_slot); // recursive unlock
1106     }
1107 
1108     // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
1109     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
1110     //   monitor->set_obj(nullptr);
1111 
1112     // If we still have a lightweight lock, unlock the object and be done.
1113 
1114     // The object address from the monitor is in object.
1115     ld(object, in_bytes(BasicObjectLock::obj_offset()), monitor);
1116 
1117     if (LockingMode == LM_LIGHTWEIGHT) {
1118       // Check for non-symmetric locking. This is allowed by the spec and the interpreter
1119       // must handle it.
1120       Register tmp = current_header;
1121       // First check for lock-stack underflow.
1122       lwz(tmp, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
1123       cmplwi(CCR0, tmp, (unsigned)LockStack::start_offset());
1124       ble(CCR0, slow_case);
1125       // Then check if the top of the lock-stack matches the unlocked object.
1126       addi(tmp, tmp, -oopSize);
1127       ldx(tmp, tmp, R16_thread);
1128       cmpd(CCR0, tmp, object);
1129       bne(CCR0, slow_case);
1130 
1131       ld(header, oopDesc::mark_offset_in_bytes(), object);
1132       andi_(R0, header, markWord::monitor_value);
1133       bne(CCR0, slow_case);
1134       lightweight_unlock(object, header, slow_case);
1135     } else {
1136       addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
1137 
1138       // We have the displaced header in displaced_header. If the lock is still
1139       // lightweight, it will contain the monitor address and we'll store the
1140       // displaced header back into the object's mark word.
1141       // CmpxchgX sets CCR0 to cmpX(current, monitor).
1142       cmpxchgd(/*flag=*/CCR0,
1143                /*current_value=*/current_header,
1144                /*compare_value=*/monitor, /*exchange_value=*/header,
1145                /*where=*/object_mark_addr,
1146                MacroAssembler::MemBarRel,
1147                MacroAssembler::cmpxchgx_hint_release_lock(),
1148                noreg,
1149                &slow_case);
1150     }
1151     b(free_slot);
1152 
1153     // } else {
1154     //   // Slow path.
1155     //   InterpreterRuntime::monitorexit(monitor);
1156 
1157     // The lock has been converted into a heavy lock and hence
1158     // we need to get into the slow case.
1159     bind(slow_case);
1160     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
1161     // }
1162 
1163     Label done;
1164     b(done); // Monitor register may be overwritten! Runtime has already freed the slot.
1165 
1166     // Exchange worked, do monitor->set_obj(nullptr);
1167     align(32, 12);
1168     bind(free_slot);
1169     li(R0, 0);
1170     std(R0, in_bytes(BasicObjectLock::obj_offset()), monitor);
1171     dec_held_monitor_count(current_header /*tmp*/);
1172     bind(done);
1173   }
1174 }
1175 
1176 // Load compiled (i2c) or interpreter entry when calling from interpreted and
1177 // do the call. Centralized so that all interpreter calls will do the same actions.
1178 // If jvmti single stepping is on for a thread we must not call compiled code.
1179 //
1180 // Input:
1181 //   - Rtarget_method: method to call
1182 //   - Rret_addr:      return address
1183 //   - 2 scratch regs
1184 //
1185 void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, Register Rret_addr,
1186                                                       Register Rscratch1, Register Rscratch2) {
1187   assert_different_registers(Rscratch1, Rscratch2, Rtarget_method, Rret_addr);
1188   // Assume we want to go compiled if available.
1189   const Register Rtarget_addr = Rscratch1;
1190   const Register Rinterp_only = Rscratch2;
1191 
1192   ld(Rtarget_addr, in_bytes(Method::from_interpreted_offset()), Rtarget_method);
1193 
1194   if (JvmtiExport::can_post_interpreter_events()) {
1195     lwz(Rinterp_only, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
1196 
1197     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
1198     // compiled code in threads for which the event is enabled. Check here for
1199     // interp_only_mode if these events CAN be enabled.
1200     Label done;
1201     cmpwi(CCR0, Rinterp_only, 0);
1202     beq(CCR0, done);
1203     ld(Rtarget_addr, in_bytes(Method::interpreter_entry_offset()), Rtarget_method);
1204     align(32, 12);
1205     bind(done);
1206   }
1207 
1208 #ifdef ASSERT
1209   {
1210     Label Lok;
1211     cmpdi(CCR0, Rtarget_addr, 0);
1212     bne(CCR0, Lok);
1213     stop("null entry point");
1214     bind(Lok);
1215   }
1216 #endif // ASSERT
1217 
1218   mr(R21_sender_SP, R1_SP);
1219 
1220   // Calc a precise SP for the call. The SP value we calculated in
1221   // generate_fixed_frame() is based on the max_stack() value, so we would waste stack space
1222   // if esp is not max. Also, the i2c adapter extends the stack space without restoring
1223   // our pre-calced value, so repeating calls via i2c would result in stack overflow.
1224   // Since esp already points to an empty slot, we just have to sub 1 additional slot
1225   // to meet the abi scratch requirements.
1226   // The max_stack pointer will get restored by means of the GR_Lmax_stack local in
1227   // the return entry of the interpreter.
1228   addi(Rscratch2, R15_esp, Interpreter::stackElementSize - frame::top_ijava_frame_abi_size);
1229   clrrdi(Rscratch2, Rscratch2, exact_log2(frame::alignment_in_bytes)); // round towards smaller address
1230   resize_frame_absolute(Rscratch2, Rscratch2, R0);
1231 
1232   mr_if_needed(R19_method, Rtarget_method);
1233   mtctr(Rtarget_addr);
1234   mtlr(Rret_addr);
1235 
1236   save_interpreter_state(Rscratch2);
1237 #ifdef ASSERT
1238   ld(Rscratch1, _ijava_state_neg(top_frame_sp), Rscratch2); // Rscratch2 contains fp
1239   sldi(Rscratch1, Rscratch1, Interpreter::logStackElementSize);
1240   add(Rscratch1, Rscratch1, Rscratch2); // Rscratch2 contains fp
1241   // Compare sender_sp with the derelativized top_frame_sp
1242   cmpd(CCR0, R21_sender_SP, Rscratch1);
1243   asm_assert_eq("top_frame_sp incorrect");
1244 #endif
1245 
1246   bctr();
1247 }
1248 
1249 // Set the method data pointer for the current bcp.
1250 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1251   assert(ProfileInterpreter, "must be profiling interpreter");
1252   Label get_continue;
1253   ld(R28_mdx, in_bytes(Method::method_data_offset()), R19_method);
1254   test_method_data_pointer(get_continue);
1255   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), R19_method, R14_bcp);
1256 
1257   addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
1258   add(R28_mdx, R28_mdx, R3_RET);
1259   bind(get_continue);
1260 }
1261 
1262 // Test ImethodDataPtr. If it is null, continue at the specified label.
1263 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1264   assert(ProfileInterpreter, "must be profiling interpreter");
1265   cmpdi(CCR0, R28_mdx, 0);
1266   beq(CCR0, zero_continue);
1267 }
1268 
1269 void InterpreterMacroAssembler::verify_method_data_pointer() {
1270   assert(ProfileInterpreter, "must be profiling interpreter");
1271 #ifdef ASSERT
1272   Label verify_continue;
1273   test_method_data_pointer(verify_continue);
1274 
1275   // If the mdp is valid, it will point to a DataLayout header which is
1276   // consistent with the bcp. The converse is highly probable also.
1277   lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
1278   ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
1279   addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
1280   add(R11_scratch1, R12_scratch2, R12_scratch2);
1281   cmpd(CCR0, R11_scratch1, R14_bcp);
1282   beq(CCR0, verify_continue);
1283 
1284   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
1285 
1286   bind(verify_continue);
1287 #endif
1288 }
1289 
1290 // Store a value at some constant offset from the method data pointer.
1291 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1292   assert(ProfileInterpreter, "must be profiling interpreter");
1293 
1294   std(value, constant, R28_mdx);
1295 }
1296 
1297 // Increment the value at some constant offset from the method data pointer.
1298 void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
1299                                                       Register counter_addr,
1300                                                       Register Rbumped_count,
1301                                                       bool decrement) {
1302   // Locate the counter at a fixed offset from the mdp:
1303   addi(counter_addr, R28_mdx, constant);
1304   increment_mdp_data_at(counter_addr, Rbumped_count, decrement);
1305 }
1306 
1307 // Increment the value at some non-fixed (reg + constant) offset from
1308 // the method data pointer.
1309 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
1310                                                       int constant,
1311                                                       Register scratch,
1312                                                       Register Rbumped_count,
1313                                                       bool decrement) {
1314   // Add the constant to reg to get the offset.
1315   add(scratch, R28_mdx, reg);
1316   // Then calculate the counter address.
1317   addi(scratch, scratch, constant);
1318   increment_mdp_data_at(scratch, Rbumped_count, decrement);
1319 }
1320 
1321 void InterpreterMacroAssembler::increment_mdp_data_at(Register counter_addr,
1322                                                       Register Rbumped_count,
1323                                                       bool decrement) {
1324   assert(ProfileInterpreter, "must be profiling interpreter");
1325 
1326   // Load the counter.
1327   ld(Rbumped_count, 0, counter_addr);
1328 
1329   if (decrement) {
1330     // Decrement the register. Set condition codes.
1331     addi(Rbumped_count, Rbumped_count, - DataLayout::counter_increment);
1332     // Store the decremented counter, if it is still negative.
1333     std(Rbumped_count, 0, counter_addr);
1334     // Note: add/sub overflow check are not ported, since 64 bit
1335     // calculation should never overflow.
1336   } else {
1337     // Increment the register. Set carry flag.
1338     addi(Rbumped_count, Rbumped_count, DataLayout::counter_increment);
1339     // Store the incremented counter.
1340     std(Rbumped_count, 0, counter_addr);
1341   }
1342 }
1343 
1344 // Set a flag value at the current method data pointer position.
1345 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,
1346                                                 Register scratch) {
1347   assert(ProfileInterpreter, "must be profiling interpreter");
1348   // Load the data header.
1349   lbz(scratch, in_bytes(DataLayout::flags_offset()), R28_mdx);
1350   // Set the flag.
1351   ori(scratch, scratch, flag_constant);
1352   // Store the modified header.
1353   stb(scratch, in_bytes(DataLayout::flags_offset()), R28_mdx);
1354 }
1355 
1356 // Test the location at some offset from the method data pointer.
1357 // If it is not equal to value, branch to the not_equal_continue Label.
1358 void InterpreterMacroAssembler::test_mdp_data_at(int offset,
1359                                                  Register value,
1360                                                  Label& not_equal_continue,
1361                                                  Register test_out) {
1362   assert(ProfileInterpreter, "must be profiling interpreter");
1363 
1364   ld(test_out, offset, R28_mdx);
1365   cmpd(CCR0,  value, test_out);
1366   bne(CCR0, not_equal_continue);
1367 }
1368 
1369 // Update the method data pointer by the displacement located at some fixed
1370 // offset from the method data pointer.
1371 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,
1372                                                      Register scratch) {
1373   assert(ProfileInterpreter, "must be profiling interpreter");
1374 
1375   ld(scratch, offset_of_disp, R28_mdx);
1376   add(R28_mdx, scratch, R28_mdx);
1377 }
1378 
1379 // Update the method data pointer by the displacement located at the
1380 // offset (reg + offset_of_disp).
1381 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,
1382                                                      int offset_of_disp,
1383                                                      Register scratch) {
1384   assert(ProfileInterpreter, "must be profiling interpreter");
1385 
1386   add(scratch, reg, R28_mdx);
1387   ld(scratch, offset_of_disp, scratch);
1388   add(R28_mdx, scratch, R28_mdx);
1389 }
1390 
1391 // Update the method data pointer by a simple constant displacement.
1392 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {
1393   assert(ProfileInterpreter, "must be profiling interpreter");
1394   addi(R28_mdx, R28_mdx, constant);
1395 }
1396 
1397 // Update the method data pointer for a _ret bytecode whose target
1398 // was not among our cached targets.
1399 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,
1400                                                    Register return_bci) {
1401   assert(ProfileInterpreter, "must be profiling interpreter");
1402 
1403   push(state);
1404   assert(return_bci->is_nonvolatile(), "need to protect return_bci");
1405   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
1406   pop(state);
1407 }
1408 
1409 // Increments the backedge counter.
1410 // Returns backedge counter + invocation counter in Rdst.
1411 void InterpreterMacroAssembler::increment_backedge_counter(const Register Rcounters, const Register Rdst,
1412                                                            const Register Rtmp1, Register Rscratch) {
1413   assert(UseCompiler, "incrementing must be useful");
1414   assert_different_registers(Rdst, Rtmp1);
1415   const Register invocation_counter = Rtmp1;
1416   const Register counter = Rdst;
1417   // TODO: PPC port: assert(4 == InvocationCounter::sz_counter(), "unexpected field size.");
1418 
1419   // Load backedge counter.
1420   lwz(counter, in_bytes(MethodCounters::backedge_counter_offset()) +
1421                in_bytes(InvocationCounter::counter_offset()), Rcounters);
1422   // Load invocation counter.
1423   lwz(invocation_counter, in_bytes(MethodCounters::invocation_counter_offset()) +
1424                           in_bytes(InvocationCounter::counter_offset()), Rcounters);
1425 
1426   // Add the delta to the backedge counter.
1427   addi(counter, counter, InvocationCounter::count_increment);
1428 
1429   // Mask the invocation counter.
1430   andi(invocation_counter, invocation_counter, InvocationCounter::count_mask_value);
1431 
1432   // Store new counter value.
1433   stw(counter, in_bytes(MethodCounters::backedge_counter_offset()) +
1434                in_bytes(InvocationCounter::counter_offset()), Rcounters);
1435   // Return invocation counter + backedge counter.
1436   add(counter, counter, invocation_counter);
1437 }
1438 
1439 // Count a taken branch in the bytecodes.
1440 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {
1441   if (ProfileInterpreter) {
1442     Label profile_continue;
1443 
1444     // If no method data exists, go to profile_continue.
1445     test_method_data_pointer(profile_continue);
1446 
1447     // We are taking a branch. Increment the taken count.
1448     increment_mdp_data_at(in_bytes(JumpData::taken_offset()), scratch, bumped_count);
1449 
1450     // The method data pointer needs to be updated to reflect the new target.
1451     update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);
1452     bind (profile_continue);
1453   }
1454 }
1455 
1456 // Count a not-taken branch in the bytecodes.
1457 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch1, Register scratch2) {
1458   if (ProfileInterpreter) {
1459     Label profile_continue;
1460 
1461     // If no method data exists, go to profile_continue.
1462     test_method_data_pointer(profile_continue);
1463 
1464     // We are taking a branch. Increment the not taken count.
1465     increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch1, scratch2);
1466 
1467     // The method data pointer needs to be updated to correspond to the
1468     // next bytecode.
1469     update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));
1470     bind (profile_continue);
1471   }
1472 }
1473 
1474 // Count a non-virtual call in the bytecodes.
1475 void InterpreterMacroAssembler::profile_call(Register scratch1, Register scratch2) {
1476   if (ProfileInterpreter) {
1477     Label profile_continue;
1478 
1479     // If no method data exists, go to profile_continue.
1480     test_method_data_pointer(profile_continue);
1481 
1482     // We are making a call. Increment the count.
1483     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1484 
1485     // The method data pointer needs to be updated to reflect the new target.
1486     update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));
1487     bind (profile_continue);
1488   }
1489 }
1490 
1491 // Count a final call in the bytecodes.
1492 void InterpreterMacroAssembler::profile_final_call(Register scratch1, Register scratch2) {
1493   if (ProfileInterpreter) {
1494     Label profile_continue;
1495 
1496     // If no method data exists, go to profile_continue.
1497     test_method_data_pointer(profile_continue);
1498 
1499     // We are making a call. Increment the count.
1500     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1501 
1502     // The method data pointer needs to be updated to reflect the new target.
1503     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1504     bind (profile_continue);
1505   }
1506 }
1507 
1508 // Count a virtual call in the bytecodes.
1509 void InterpreterMacroAssembler::profile_virtual_call(Register Rreceiver,
1510                                                      Register Rscratch1,
1511                                                      Register Rscratch2,
1512                                                      bool receiver_can_be_null) {
1513   if (!ProfileInterpreter) { return; }
1514   Label profile_continue;
1515 
1516   // If no method data exists, go to profile_continue.
1517   test_method_data_pointer(profile_continue);
1518 
1519   Label skip_receiver_profile;
1520   if (receiver_can_be_null) {
1521     Label not_null;
1522     cmpdi(CCR0, Rreceiver, 0);
1523     bne(CCR0, not_null);
1524     // We are making a call. Increment the count for null receiver.
1525     increment_mdp_data_at(in_bytes(CounterData::count_offset()), Rscratch1, Rscratch2);
1526     b(skip_receiver_profile);
1527     bind(not_null);
1528   }
1529 
1530   // Record the receiver type.
1531   record_klass_in_profile(Rreceiver, Rscratch1, Rscratch2);
1532   bind(skip_receiver_profile);
1533 
1534   // The method data pointer needs to be updated to reflect the new target.
1535   update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1536   bind (profile_continue);
1537 }
1538 
1539 void InterpreterMacroAssembler::profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2) {
1540   if (ProfileInterpreter) {
1541     Label profile_continue;
1542 
1543     // If no method data exists, go to profile_continue.
1544     test_method_data_pointer(profile_continue);
1545 
1546     int mdp_delta = in_bytes(BitData::bit_data_size());
1547     if (TypeProfileCasts) {
1548       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1549 
1550       // Record the object type.
1551       record_klass_in_profile(Rklass, Rscratch1, Rscratch2);
1552     }
1553 
1554     // The method data pointer needs to be updated.
1555     update_mdp_by_constant(mdp_delta);
1556 
1557     bind (profile_continue);
1558   }
1559 }
1560 
1561 // Count a ret in the bytecodes.
1562 void InterpreterMacroAssembler::profile_ret(TosState state, Register return_bci,
1563                                             Register scratch1, Register scratch2) {
1564   if (ProfileInterpreter) {
1565     Label profile_continue;
1566     uint row;
1567 
1568     // If no method data exists, go to profile_continue.
1569     test_method_data_pointer(profile_continue);
1570 
1571     // Update the total ret count.
1572     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2 );
1573 
1574     for (row = 0; row < RetData::row_limit(); row++) {
1575       Label next_test;
1576 
1577       // See if return_bci is equal to bci[n]:
1578       test_mdp_data_at(in_bytes(RetData::bci_offset(row)), return_bci, next_test, scratch1);
1579 
1580       // return_bci is equal to bci[n]. Increment the count.
1581       increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch1, scratch2);
1582 
1583       // The method data pointer needs to be updated to reflect the new target.
1584       update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch1);
1585       b(profile_continue);
1586       bind(next_test);
1587     }
1588 
1589     update_mdp_for_ret(state, return_bci);
1590 
1591     bind (profile_continue);
1592   }
1593 }
1594 
1595 // Count the default case of a switch construct.
1596 void InterpreterMacroAssembler::profile_switch_default(Register scratch1,  Register scratch2) {
1597   if (ProfileInterpreter) {
1598     Label profile_continue;
1599 
1600     // If no method data exists, go to profile_continue.
1601     test_method_data_pointer(profile_continue);
1602 
1603     // Update the default case count
1604     increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),
1605                           scratch1, scratch2);
1606 
1607     // The method data pointer needs to be updated.
1608     update_mdp_by_offset(in_bytes(MultiBranchData::default_displacement_offset()),
1609                          scratch1);
1610 
1611     bind (profile_continue);
1612   }
1613 }
1614 
1615 // Count the index'th case of a switch construct.
1616 void InterpreterMacroAssembler::profile_switch_case(Register index,
1617                                                     Register scratch1,
1618                                                     Register scratch2,
1619                                                     Register scratch3) {
1620   if (ProfileInterpreter) {
1621     assert_different_registers(index, scratch1, scratch2, scratch3);
1622     Label profile_continue;
1623 
1624     // If no method data exists, go to profile_continue.
1625     test_method_data_pointer(profile_continue);
1626 
1627     // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes().
1628     li(scratch3, in_bytes(MultiBranchData::case_array_offset()));
1629 
1630     assert (in_bytes(MultiBranchData::per_case_size()) == 16, "so that shladd works");
1631     sldi(scratch1, index, exact_log2(in_bytes(MultiBranchData::per_case_size())));
1632     add(scratch1, scratch1, scratch3);
1633 
1634     // Update the case count.
1635     increment_mdp_data_at(scratch1, in_bytes(MultiBranchData::relative_count_offset()), scratch2, scratch3);
1636 
1637     // The method data pointer needs to be updated.
1638     update_mdp_by_offset(scratch1, in_bytes(MultiBranchData::relative_displacement_offset()), scratch2);
1639 
1640     bind (profile_continue);
1641   }
1642 }
1643 
1644 void InterpreterMacroAssembler::profile_null_seen(Register Rscratch1, Register Rscratch2) {
1645   if (ProfileInterpreter) {
1646     assert_different_registers(Rscratch1, Rscratch2);
1647     Label profile_continue;
1648 
1649     // If no method data exists, go to profile_continue.
1650     test_method_data_pointer(profile_continue);
1651 
1652     set_mdp_flag_at(BitData::null_seen_byte_constant(), Rscratch1);
1653 
1654     // The method data pointer needs to be updated.
1655     int mdp_delta = in_bytes(BitData::bit_data_size());
1656     if (TypeProfileCasts) {
1657       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1658     }
1659     update_mdp_by_constant(mdp_delta);
1660 
1661     bind (profile_continue);
1662   }
1663 }
1664 
1665 void InterpreterMacroAssembler::record_klass_in_profile(Register Rreceiver,
1666                                                         Register Rscratch1, Register Rscratch2) {
1667   assert(ProfileInterpreter, "must be profiling");
1668   assert_different_registers(Rreceiver, Rscratch1, Rscratch2);
1669 
1670   Label done;
1671   record_klass_in_profile_helper(Rreceiver, Rscratch1, Rscratch2, 0, done);
1672   bind (done);
1673 }
1674 
1675 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1676                                         Register receiver, Register scratch1, Register scratch2,
1677                                         int start_row, Label& done) {
1678   if (TypeProfileWidth == 0) {
1679     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1680     return;
1681   }
1682 
1683   int last_row = VirtualCallData::row_limit() - 1;
1684   assert(start_row <= last_row, "must be work left to do");
1685   // Test this row for both the receiver and for null.
1686   // Take any of three different outcomes:
1687   //   1. found receiver => increment count and goto done
1688   //   2. found null => keep looking for case 1, maybe allocate this cell
1689   //   3. found something else => keep looking for cases 1 and 2
1690   // Case 3 is handled by a recursive call.
1691   for (int row = start_row; row <= last_row; row++) {
1692     Label next_test;
1693     bool test_for_null_also = (row == start_row);
1694 
1695     // See if the receiver is receiver[n].
1696     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1697     test_mdp_data_at(recvr_offset, receiver, next_test, scratch1);
1698     // delayed()->tst(scratch);
1699 
1700     // The receiver is receiver[n]. Increment count[n].
1701     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1702     increment_mdp_data_at(count_offset, scratch1, scratch2);
1703     b(done);
1704     bind(next_test);
1705 
1706     if (test_for_null_also) {
1707       Label found_null;
1708       // Failed the equality check on receiver[n]... Test for null.
1709       if (start_row == last_row) {
1710         // The only thing left to do is handle the null case.
1711         // Scratch1 contains test_out from test_mdp_data_at.
1712         cmpdi(CCR0, scratch1, 0);
1713         beq(CCR0, found_null);
1714         // Receiver did not match any saved receiver and there is no empty row for it.
1715         // Increment total counter to indicate polymorphic case.
1716         increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1717         b(done);
1718         bind(found_null);
1719         break;
1720       }
1721       // Since null is rare, make it be the branch-taken case.
1722       cmpdi(CCR0, scratch1, 0);
1723       beq(CCR0, found_null);
1724 
1725       // Put all the "Case 3" tests here.
1726       record_klass_in_profile_helper(receiver, scratch1, scratch2, start_row + 1, done);
1727 
1728       // Found a null. Keep searching for a matching receiver,
1729       // but remember that this is an empty (unused) slot.
1730       bind(found_null);
1731     }
1732   }
1733 
1734   // In the fall-through case, we found no matching receiver, but we
1735   // observed the receiver[start_row] is null.
1736 
1737   // Fill in the receiver field and increment the count.
1738   int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1739   set_mdp_data_at(recvr_offset, receiver);
1740   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1741   li(scratch1, DataLayout::counter_increment);
1742   set_mdp_data_at(count_offset, scratch1);
1743   if (start_row > 0) {
1744     b(done);
1745   }
1746 }
1747 
1748 // Argument and return type profilig.
1749 // kills: tmp, tmp2, R0, CR0, CR1
1750 void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base,
1751                                                  RegisterOrConstant mdo_addr_offs,
1752                                                  Register tmp, Register tmp2) {
1753   Label do_nothing, do_update;
1754 
1755   // tmp2 = obj is allowed
1756   assert_different_registers(obj, mdo_addr_base, tmp, R0);
1757   assert_different_registers(tmp2, mdo_addr_base, tmp, R0);
1758   const Register klass = tmp2;
1759 
1760   verify_oop(obj);
1761 
1762   ld(tmp, mdo_addr_offs, mdo_addr_base);
1763 
1764   // Set null_seen if obj is 0.
1765   cmpdi(CCR0, obj, 0);
1766   ori(R0, tmp, TypeEntries::null_seen);
1767   beq(CCR0, do_update);
1768 
1769   load_klass(klass, obj);
1770 
1771   clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
1772   // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
1773   cmpd(CCR1, R0, klass);
1774   // Klass seen before, nothing to do (regardless of unknown bit).
1775   //beq(CCR1, do_nothing);
1776 
1777   andi_(R0, klass, TypeEntries::type_unknown);
1778   // Already unknown. Nothing to do anymore.
1779   //bne(CCR0, do_nothing);
1780   crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
1781   beq(CCR0, do_nothing);
1782 
1783   clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
1784   orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
1785   beq(CCR0, do_update); // First time here. Set profile type.
1786 
1787   // Different than before. Cannot keep accurate profile.
1788   ori(R0, tmp, TypeEntries::type_unknown);
1789 
1790   bind(do_update);
1791   // update profile
1792   std(R0, mdo_addr_offs, mdo_addr_base);
1793 
1794   align(32, 12);
1795   bind(do_nothing);
1796 }
1797 
1798 void InterpreterMacroAssembler::profile_arguments_type(Register callee,
1799                                                        Register tmp1, Register tmp2,
1800                                                        bool is_virtual) {
1801   if (!ProfileInterpreter) {
1802     return;
1803   }
1804 
1805   assert_different_registers(callee, tmp1, tmp2, R28_mdx);
1806 
1807   if (MethodData::profile_arguments() || MethodData::profile_return()) {
1808     Label profile_continue;
1809 
1810     test_method_data_pointer(profile_continue);
1811 
1812     int off_to_start = is_virtual ?
1813       in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
1814 
1815     lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
1816     cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
1817     bne(CCR0, profile_continue);
1818 
1819     if (MethodData::profile_arguments()) {
1820       Label done;
1821       int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
1822       addi(R28_mdx, R28_mdx, off_to_args);
1823 
1824       for (int i = 0; i < TypeProfileArgsLimit; i++) {
1825         if (i > 0 || MethodData::profile_return()) {
1826           // If return value type is profiled we may have no argument to profile.
1827           ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
1828           cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
1829           addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
1830           blt(CCR0, done);
1831         }
1832         ld(tmp1, in_bytes(Method::const_offset()), callee);
1833         lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
1834         // Stack offset o (zero based) from the start of the argument
1835         // list, for n arguments translates into offset n - o - 1 from
1836         // the end of the argument list. But there's an extra slot at
1837         // the top of the stack. So the offset is n - o from Lesp.
1838         ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx);
1839         subf(tmp1, tmp2, tmp1);
1840 
1841         sldi(tmp1, tmp1, Interpreter::logStackElementSize);
1842         ldx(tmp1, tmp1, R15_esp);
1843 
1844         profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1);
1845 
1846         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1847         addi(R28_mdx, R28_mdx, to_add);
1848         off_to_args += to_add;
1849       }
1850 
1851       if (MethodData::profile_return()) {
1852         ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
1853         addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1854       }
1855 
1856       bind(done);
1857 
1858       if (MethodData::profile_return()) {
1859         // We're right after the type profile for the last
1860         // argument. tmp1 is the number of cells left in the
1861         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1862         // if there's a return to profile.
1863         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(),
1864                "can't move past ret type");
1865         sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size));
1866         add(R28_mdx, tmp1, R28_mdx);
1867       }
1868     } else {
1869       assert(MethodData::profile_return(), "either profile call args or call ret");
1870       update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));
1871     }
1872 
1873     // Mdp points right after the end of the
1874     // CallTypeData/VirtualCallTypeData, right after the cells for the
1875     // return value type if there's one.
1876     align(32, 12);
1877     bind(profile_continue);
1878   }
1879 }
1880 
1881 void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {
1882   assert_different_registers(ret, tmp1, tmp2);
1883   if (ProfileInterpreter && MethodData::profile_return()) {
1884     Label profile_continue;
1885 
1886     test_method_data_pointer(profile_continue);
1887 
1888     if (MethodData::profile_return_jsr292_only()) {
1889       // If we don't profile all invoke bytecodes we must make sure
1890       // it's a bytecode we indeed profile. We can't go back to the
1891       // beginning of the ProfileData we intend to update to check its
1892       // type because we're right after it and we don't known its
1893       // length.
1894       lbz(tmp1, 0, R14_bcp);
1895       lbz(tmp2, in_bytes(Method::intrinsic_id_offset()), R19_method);
1896       cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
1897       cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
1898       cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
1899       cmpwi(CCR1, tmp2, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1900       cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
1901       bne(CCR0, profile_continue);
1902     }
1903 
1904     profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2);
1905 
1906     align(32, 12);
1907     bind(profile_continue);
1908   }
1909 }
1910 
1911 void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2,
1912                                                         Register tmp3, Register tmp4) {
1913   if (ProfileInterpreter && MethodData::profile_parameters()) {
1914     Label profile_continue, done;
1915 
1916     test_method_data_pointer(profile_continue);
1917 
1918     // Load the offset of the area within the MDO used for
1919     // parameters. If it's negative we're not profiling any parameters.
1920     lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
1921     cmpwi(CCR0, tmp1, 0);
1922     blt(CCR0, profile_continue);
1923 
1924     // Compute a pointer to the area for parameters from the offset
1925     // and move the pointer to the slot for the last
1926     // parameters. Collect profiling from last parameter down.
1927     // mdo start + parameters offset + array length - 1
1928 
1929     // Pointer to the parameter area in the MDO.
1930     const Register mdp = tmp1;
1931     add(mdp, tmp1, R28_mdx);
1932 
1933     // Offset of the current profile entry to update.
1934     const Register entry_offset = tmp2;
1935     // entry_offset = array len in number of cells
1936     ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp);
1937 
1938     int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
1939     assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");
1940 
1941     // entry_offset (number of cells)  = array len - size of 1 entry + offset of the stack slot field
1942     addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size));
1943     // entry_offset in bytes
1944     sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
1945 
1946     Label loop;
1947     align(32, 12);
1948     bind(loop);
1949 
1950     // Load offset on the stack from the slot for this parameter.
1951     ld(tmp3, entry_offset, mdp);
1952     sldi(tmp3, tmp3, Interpreter::logStackElementSize);
1953     neg(tmp3, tmp3);
1954     // Read the parameter from the local area.
1955     ldx(tmp3, tmp3, R18_locals);
1956 
1957     // Make entry_offset now point to the type field for this parameter.
1958     int type_base = in_bytes(ParametersTypeData::type_offset(0));
1959     assert(type_base > off_base, "unexpected");
1960     addi(entry_offset, entry_offset, type_base - off_base);
1961 
1962     // Profile the parameter.
1963     profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3);
1964 
1965     // Go to next parameter.
1966     int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
1967     cmpdi(CCR0, entry_offset, off_base + delta);
1968     addi(entry_offset, entry_offset, -delta);
1969     bge(CCR0, loop);
1970 
1971     align(32, 12);
1972     bind(profile_continue);
1973   }
1974 }
1975 
1976 // Add a monitor (see frame_ppc.hpp).
1977 void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
1978 
1979   // Very-local scratch registers.
1980   const Register esp  = Rtemp1;
1981   const Register slot = Rtemp2;
1982 
1983   // Extracted monitor_size.
1984   int monitor_size = frame::interpreter_frame_monitor_size_in_bytes();
1985   assert(Assembler::is_aligned((unsigned int)monitor_size,
1986                                (unsigned int)frame::alignment_in_bytes),
1987          "size of a monitor must respect alignment of SP");
1988 
1989   resize_frame(-monitor_size, /*temp*/esp); // Allocate space for new monitor
1990   subf(Rtemp2, esp, R1_SP); // esp contains fp
1991   sradi(Rtemp2, Rtemp2, Interpreter::logStackElementSize);
1992   // Store relativized top_frame_sp
1993   std(Rtemp2, _ijava_state_neg(top_frame_sp), esp); // esp contains fp
1994 
1995   // Shuffle expression stack down. Recall that stack_base points
1996   // just above the new expression stack bottom. Old_tos and new_tos
1997   // are used to scan thru the old and new expression stacks.
1998   if (!stack_is_empty) {
1999     Label copy_slot, copy_slot_finished;
2000     const Register n_slots = slot;
2001 
2002     addi(esp, R15_esp, Interpreter::stackElementSize); // Point to first element (pre-pushed stack).
2003     subf(n_slots, esp, R26_monitor);
2004     srdi_(n_slots, n_slots, LogBytesPerWord);          // Compute number of slots to copy.
2005     assert(LogBytesPerWord == 3, "conflicts assembler instructions");
2006     beq(CCR0, copy_slot_finished);                     // Nothing to copy.
2007 
2008     mtctr(n_slots);
2009 
2010     // loop
2011     bind(copy_slot);
2012     ld(slot, 0, esp);              // Move expression stack down.
2013     std(slot, -monitor_size, esp); // distance = monitor_size
2014     addi(esp, esp, BytesPerWord);
2015     bdnz(copy_slot);
2016 
2017     bind(copy_slot_finished);
2018   }
2019 
2020   addi(R15_esp, R15_esp, -monitor_size);
2021   addi(R26_monitor, R26_monitor, -monitor_size);
2022 
2023   // Restart interpreter
2024 }
2025 
2026 // ============================================================================
2027 // Java locals access
2028 
2029 // Load a local variable at index in Rindex into register Rdst_value.
2030 // Also puts address of local into Rdst_address as a service.
2031 // Kills:
2032 //   - Rdst_value
2033 //   - Rdst_address
2034 void InterpreterMacroAssembler::load_local_int(Register Rdst_value, Register Rdst_address, Register Rindex) {
2035   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2036   subf(Rdst_address, Rdst_address, R18_locals);
2037   lwz(Rdst_value, 0, Rdst_address);
2038 }
2039 
2040 // Load a local variable at index in Rindex into register Rdst_value.
2041 // Also puts address of local into Rdst_address as a service.
2042 // Kills:
2043 //   - Rdst_value
2044 //   - Rdst_address
2045 void InterpreterMacroAssembler::load_local_long(Register Rdst_value, Register Rdst_address, Register Rindex) {
2046   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2047   subf(Rdst_address, Rdst_address, R18_locals);
2048   ld(Rdst_value, -8, Rdst_address);
2049 }
2050 
2051 // Load a local variable at index in Rindex into register Rdst_value.
2052 // Also puts address of local into Rdst_address as a service.
2053 // Input:
2054 //   - Rindex:      slot nr of local variable
2055 // Kills:
2056 //   - Rdst_value
2057 //   - Rdst_address
2058 void InterpreterMacroAssembler::load_local_ptr(Register Rdst_value,
2059                                                Register Rdst_address,
2060                                                Register Rindex) {
2061   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2062   subf(Rdst_address, Rdst_address, R18_locals);
2063   ld(Rdst_value, 0, Rdst_address);
2064 }
2065 
2066 // Load a local variable at index in Rindex into register Rdst_value.
2067 // Also puts address of local into Rdst_address as a service.
2068 // Kills:
2069 //   - Rdst_value
2070 //   - Rdst_address
2071 void InterpreterMacroAssembler::load_local_float(FloatRegister Rdst_value,
2072                                                  Register Rdst_address,
2073                                                  Register Rindex) {
2074   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2075   subf(Rdst_address, Rdst_address, R18_locals);
2076   lfs(Rdst_value, 0, Rdst_address);
2077 }
2078 
2079 // Load a local variable at index in Rindex into register Rdst_value.
2080 // Also puts address of local into Rdst_address as a service.
2081 // Kills:
2082 //   - Rdst_value
2083 //   - Rdst_address
2084 void InterpreterMacroAssembler::load_local_double(FloatRegister Rdst_value,
2085                                                   Register Rdst_address,
2086                                                   Register Rindex) {
2087   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2088   subf(Rdst_address, Rdst_address, R18_locals);
2089   lfd(Rdst_value, -8, Rdst_address);
2090 }
2091 
2092 // Store an int value at local variable slot Rindex.
2093 // Kills:
2094 //   - Rindex
2095 void InterpreterMacroAssembler::store_local_int(Register Rvalue, Register Rindex) {
2096   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2097   subf(Rindex, Rindex, R18_locals);
2098   stw(Rvalue, 0, Rindex);
2099 }
2100 
2101 // Store a long value at local variable slot Rindex.
2102 // Kills:
2103 //   - Rindex
2104 void InterpreterMacroAssembler::store_local_long(Register Rvalue, Register Rindex) {
2105   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2106   subf(Rindex, Rindex, R18_locals);
2107   std(Rvalue, -8, Rindex);
2108 }
2109 
2110 // Store an oop value at local variable slot Rindex.
2111 // Kills:
2112 //   - Rindex
2113 void InterpreterMacroAssembler::store_local_ptr(Register Rvalue, Register Rindex) {
2114   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2115   subf(Rindex, Rindex, R18_locals);
2116   std(Rvalue, 0, Rindex);
2117 }
2118 
2119 // Store an int value at local variable slot Rindex.
2120 // Kills:
2121 //   - Rindex
2122 void InterpreterMacroAssembler::store_local_float(FloatRegister Rvalue, Register Rindex) {
2123   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2124   subf(Rindex, Rindex, R18_locals);
2125   stfs(Rvalue, 0, Rindex);
2126 }
2127 
2128 // Store an int value at local variable slot Rindex.
2129 // Kills:
2130 //   - Rindex
2131 void InterpreterMacroAssembler::store_local_double(FloatRegister Rvalue, Register Rindex) {
2132   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2133   subf(Rindex, Rindex, R18_locals);
2134   stfd(Rvalue, -8, Rindex);
2135 }
2136 
2137 // Read pending exception from thread and jump to interpreter.
2138 // Throw exception entry if one if pending. Fall through otherwise.
2139 void InterpreterMacroAssembler::check_and_forward_exception(Register Rscratch1, Register Rscratch2) {
2140   assert_different_registers(Rscratch1, Rscratch2, R3);
2141   Register Rexception = Rscratch1;
2142   Register Rtmp       = Rscratch2;
2143   Label Ldone;
2144   // Get pending exception oop.
2145   ld(Rexception, thread_(pending_exception));
2146   cmpdi(CCR0, Rexception, 0);
2147   beq(CCR0, Ldone);
2148   li(Rtmp, 0);
2149   mr_if_needed(R3, Rexception);
2150   std(Rtmp, thread_(pending_exception)); // Clear exception in thread
2151   if (Interpreter::rethrow_exception_entry() != nullptr) {
2152     // Already got entry address.
2153     load_dispatch_table(Rtmp, (address*)Interpreter::rethrow_exception_entry());
2154   } else {
2155     // Dynamically load entry address.
2156     int simm16_rest = load_const_optimized(Rtmp, &Interpreter::_rethrow_exception_entry, R0, true);
2157     ld(Rtmp, simm16_rest, Rtmp);
2158   }
2159   mtctr(Rtmp);
2160   save_interpreter_state(Rtmp);
2161   bctr();
2162 
2163   align(32, 12);
2164   bind(Ldone);
2165 }
2166 
2167 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2168   save_interpreter_state(R11_scratch1);
2169 
2170   MacroAssembler::call_VM(oop_result, entry_point, false);
2171 
2172   restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
2173 
2174   check_and_handle_popframe(R11_scratch1);
2175   check_and_handle_earlyret(R11_scratch1);
2176   // Now check exceptions manually.
2177   if (check_exceptions) {
2178     check_and_forward_exception(R11_scratch1, R12_scratch2);
2179   }
2180 }
2181 
2182 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point,
2183                                         Register arg_1, bool check_exceptions) {
2184   // ARG1 is reserved for the thread.
2185   mr_if_needed(R4_ARG2, arg_1);
2186   call_VM(oop_result, entry_point, check_exceptions);
2187 }
2188 
2189 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point,
2190                                         Register arg_1, Register arg_2,
2191                                         bool check_exceptions) {
2192   // ARG1 is reserved for the thread.
2193   mr_if_needed(R4_ARG2, arg_1);
2194   assert(arg_2 != R4_ARG2, "smashed argument");
2195   mr_if_needed(R5_ARG3, arg_2);
2196   call_VM(oop_result, entry_point, check_exceptions);
2197 }
2198 
2199 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point,
2200                                         Register arg_1, Register arg_2, Register arg_3,
2201                                         bool check_exceptions) {
2202   // ARG1 is reserved for the thread.
2203   mr_if_needed(R4_ARG2, arg_1);
2204   assert(arg_2 != R4_ARG2, "smashed argument");
2205   mr_if_needed(R5_ARG3, arg_2);
2206   assert(arg_3 != R4_ARG2 && arg_3 != R5_ARG3, "smashed argument");
2207   mr_if_needed(R6_ARG4, arg_3);
2208   call_VM(oop_result, entry_point, check_exceptions);
2209 }
2210 
2211 void InterpreterMacroAssembler::save_interpreter_state(Register scratch) {
2212   ld(scratch, 0, R1_SP);
2213   subf(R0, scratch, R15_esp);
2214   sradi(R0, R0, Interpreter::logStackElementSize);
2215   std(R0, _ijava_state_neg(esp), scratch);
2216   std(R14_bcp, _ijava_state_neg(bcp), scratch);
2217   subf(R0, scratch, R26_monitor);
2218   sradi(R0, R0, Interpreter::logStackElementSize);
2219   std(R0, _ijava_state_neg(monitors), scratch);
2220   if (ProfileInterpreter) { std(R28_mdx, _ijava_state_neg(mdx), scratch); }
2221   // Other entries should be unchanged.
2222 }
2223 
2224 void InterpreterMacroAssembler::restore_interpreter_state(Register scratch, bool bcp_and_mdx_only, bool restore_top_frame_sp) {
2225   ld_ptr(scratch, _abi0(callers_sp), R1_SP);   // Load frame pointer.
2226   if (restore_top_frame_sp) {
2227     // After thawing the top frame of a continuation we reach here with frame::java_abi.
2228     // therefore we have to restore top_frame_sp before the assertion below.
2229     assert(!bcp_and_mdx_only, "chose other registers");
2230     Register tfsp = R18_locals;
2231     Register scratch2 = R26_monitor;
2232     ld(tfsp, _ijava_state_neg(top_frame_sp), scratch);
2233     // Derelativize top_frame_sp
2234     sldi(tfsp, tfsp, Interpreter::logStackElementSize);
2235     add(tfsp, tfsp, scratch);
2236     resize_frame_absolute(tfsp, scratch2, R0);
2237   }
2238   ld(R14_bcp, _ijava_state_neg(bcp), scratch); // Changed by VM code (exception).
2239   if (ProfileInterpreter) { ld(R28_mdx, _ijava_state_neg(mdx), scratch); } // Changed by VM code.
2240   if (!bcp_and_mdx_only) {
2241     // Following ones are Metadata.
2242     ld(R19_method, _ijava_state_neg(method), scratch);
2243     ld(R27_constPoolCache, _ijava_state_neg(cpoolCache), scratch);
2244     // Following ones are stack addresses and don't require reload.
2245     // Derelativize esp
2246     ld(R15_esp, _ijava_state_neg(esp), scratch);
2247     sldi(R15_esp, R15_esp, Interpreter::logStackElementSize);
2248     add(R15_esp, R15_esp, scratch);
2249     ld(R18_locals, _ijava_state_neg(locals), scratch);
2250     sldi(R18_locals, R18_locals, Interpreter::logStackElementSize);
2251     add(R18_locals, R18_locals, scratch);
2252     ld(R26_monitor, _ijava_state_neg(monitors), scratch);
2253     // Derelativize monitors
2254     sldi(R26_monitor, R26_monitor, Interpreter::logStackElementSize);
2255     add(R26_monitor, R26_monitor, scratch);
2256   }
2257 #ifdef ASSERT
2258   {
2259     Label Lok;
2260     subf(R0, R1_SP, scratch);
2261     cmpdi(CCR0, R0, frame::top_ijava_frame_abi_size + frame::ijava_state_size);
2262     bge(CCR0, Lok);
2263     stop("frame too small (restore istate)");
2264     bind(Lok);
2265   }
2266 #endif
2267 }
2268 
2269 void InterpreterMacroAssembler::get_method_counters(Register method,
2270                                                     Register Rcounters,
2271                                                     Label& skip) {
2272   BLOCK_COMMENT("Load and ev. allocate counter object {");
2273   Label has_counters;
2274   ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
2275   cmpdi(CCR0, Rcounters, 0);
2276   bne(CCR0, has_counters);
2277   call_VM(noreg, CAST_FROM_FN_PTR(address,
2278                                   InterpreterRuntime::build_method_counters), method);
2279   ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
2280   cmpdi(CCR0, Rcounters, 0);
2281   beq(CCR0, skip); // No MethodCounters, OutOfMemory.
2282   BLOCK_COMMENT("} Load and ev. allocate counter object");
2283 
2284   bind(has_counters);
2285 }
2286 
2287 void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters,
2288                                                              Register iv_be_count,
2289                                                              Register Rtmp_r0) {
2290   assert(UseCompiler, "incrementing must be useful");
2291   Register invocation_count = iv_be_count;
2292   Register backedge_count   = Rtmp_r0;
2293   int delta = InvocationCounter::count_increment;
2294 
2295   // Load each counter in a register.
2296   //  ld(inv_counter, Rtmp);
2297   //  ld(be_counter, Rtmp2);
2298   int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() +
2299                                     InvocationCounter::counter_offset());
2300   int be_counter_offset  = in_bytes(MethodCounters::backedge_counter_offset() +
2301                                     InvocationCounter::counter_offset());
2302 
2303   BLOCK_COMMENT("Increment profiling counters {");
2304 
2305   // Load the backedge counter.
2306   lwz(backedge_count, be_counter_offset, Rcounters); // is unsigned int
2307   // Mask the backedge counter.
2308   andi(backedge_count, backedge_count, InvocationCounter::count_mask_value);
2309 
2310   // Load the invocation counter.
2311   lwz(invocation_count, inv_counter_offset, Rcounters); // is unsigned int
2312   // Add the delta to the invocation counter and store the result.
2313   addi(invocation_count, invocation_count, delta);
2314   // Store value.
2315   stw(invocation_count, inv_counter_offset, Rcounters);
2316 
2317   // Add invocation counter + backedge counter.
2318   add(iv_be_count, backedge_count, invocation_count);
2319 
2320   // Note that this macro must leave the backedge_count + invocation_count in
2321   // register iv_be_count!
2322   BLOCK_COMMENT("} Increment profiling counters");
2323 }
2324 
2325 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
2326   if (state == atos) { MacroAssembler::verify_oop(reg, FILE_AND_LINE); }
2327 }
2328 
2329 // Local helper function for the verify_oop_or_return_address macro.
2330 static bool verify_return_address(Method* m, int bci) {
2331 #ifndef PRODUCT
2332   address pc = (address)(m->constMethod()) + in_bytes(ConstMethod::codes_offset()) + bci;
2333   // Assume it is a valid return address if it is inside m and is preceded by a jsr.
2334   if (!m->contains(pc))                                            return false;
2335   address jsr_pc;
2336   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2337   if (*jsr_pc == Bytecodes::_jsr   && jsr_pc >= m->code_base())    return true;
2338   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2339   if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base())    return true;
2340 #endif // PRODUCT
2341   return false;
2342 }
2343 
2344 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2345   if (VerifyFPU) {
2346     unimplemented("verfiyFPU");
2347   }
2348 }
2349 
2350 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2351   if (!VerifyOops) return;
2352 
2353   // The VM documentation for the astore[_wide] bytecode allows
2354   // the TOS to be not only an oop but also a return address.
2355   Label test;
2356   Label skip;
2357   // See if it is an address (in the current method):
2358 
2359   const int log2_bytecode_size_limit = 16;
2360   srdi_(Rtmp, reg, log2_bytecode_size_limit);
2361   bne(CCR0, test);
2362 
2363   address fd = CAST_FROM_FN_PTR(address, verify_return_address);
2364   const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
2365   save_volatile_gprs(R1_SP, -nbytes_save); // except R0
2366   save_LR_CR(Rtmp); // Save in old frame.
2367   push_frame_reg_args(nbytes_save, Rtmp);
2368 
2369   load_const_optimized(Rtmp, fd, R0);
2370   mr_if_needed(R4_ARG2, reg);
2371   mr(R3_ARG1, R19_method);
2372   call_c(Rtmp); // call C
2373 
2374   pop_frame();
2375   restore_LR_CR(Rtmp);
2376   restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
2377   b(skip);
2378 
2379   // Perform a more elaborate out-of-line call.
2380   // Not an address; verify it:
2381   bind(test);
2382   verify_oop(reg);
2383   bind(skip);
2384 }
2385 
2386 // Inline assembly for:
2387 //
2388 // if (thread is in interp_only_mode) {
2389 //   InterpreterRuntime::post_method_entry();
2390 // }
2391 // if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ) ||
2392 //     *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2)   ) {
2393 //   SharedRuntime::jvmpi_method_entry(method, receiver);
2394 // }
2395 void InterpreterMacroAssembler::notify_method_entry() {
2396   // JVMTI
2397   // Whenever JVMTI puts a thread in interp_only_mode, method
2398   // entry/exit events are sent for that thread to track stack
2399   // depth. If it is possible to enter interp_only_mode we add
2400   // the code to check if the event should be sent.
2401   if (JvmtiExport::can_post_interpreter_events()) {
2402     Label jvmti_post_done;
2403 
2404     lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
2405     cmpwi(CCR0, R0, 0);
2406     beq(CCR0, jvmti_post_done);
2407     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2408 
2409     bind(jvmti_post_done);
2410   }
2411 }
2412 
2413 // Inline assembly for:
2414 //
2415 // if (thread is in interp_only_mode) {
2416 //   // save result
2417 //   InterpreterRuntime::post_method_exit();
2418 //   // restore result
2419 // }
2420 // if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT)) {
2421 //   // save result
2422 //   SharedRuntime::jvmpi_method_exit();
2423 //   // restore result
2424 // }
2425 //
2426 // Native methods have their result stored in d_tmp and l_tmp.
2427 // Java methods have their result stored in the expression stack.
2428 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state,
2429                                                    NotifyMethodExitMode mode, bool check_exceptions) {
2430   // JVMTI
2431   // Whenever JVMTI puts a thread in interp_only_mode, method
2432   // entry/exit events are sent for that thread to track stack
2433   // depth. If it is possible to enter interp_only_mode we add
2434   // the code to check if the event should be sent.
2435   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2436     Label jvmti_post_done;
2437 
2438     lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
2439     cmpwi(CCR0, R0, 0);
2440     beq(CCR0, jvmti_post_done);
2441     if (!is_native_method) { push(state); } // Expose tos to GC.
2442     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit), check_exceptions);
2443     if (!is_native_method) { pop(state); }
2444 
2445     align(32, 12);
2446     bind(jvmti_post_done);
2447   }
2448 
2449   // Dtrace support not implemented.
2450 }