1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "compiler/compiler_globals.hpp"
  26 #include "interp_masm_x86.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/interpreterRuntime.hpp"
  29 #include "logging/log.hpp"
  30 #include "oops/arrayOop.hpp"
  31 #include "oops/markWord.hpp"
  32 #include "oops/methodData.hpp"
  33 #include "oops/method.hpp"
  34 #include "oops/resolvedFieldEntry.hpp"
  35 #include "oops/resolvedIndyEntry.hpp"
  36 #include "oops/resolvedMethodEntry.hpp"
  37 #include "prims/jvmtiExport.hpp"
  38 #include "prims/jvmtiThreadState.hpp"
  39 #include "runtime/basicLock.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/javaThread.hpp"
  42 #include "runtime/runtimeUpcalls.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 
  47 // Implementation of InterpreterMacroAssembler
  48 
  49 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  50   assert(entry, "Entry must have been generated by now");
  51   jump(RuntimeAddress(entry));
  52 }
  53 
  54 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
  55   Label update, next, none;
  56 
  57   assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
  58 
  59   interp_verify_oop(obj, atos);
  60 
  61   testptr(obj, obj);
  62   jccb(Assembler::notZero, update);
  63   testptr(mdo_addr, TypeEntries::null_seen);
  64   jccb(Assembler::notZero, next); // null already seen. Nothing to do anymore.
  65   // atomic update to prevent overwriting Klass* with 0
  66   lock();
  67   orptr(mdo_addr, TypeEntries::null_seen);
  68   jmpb(next);
  69 
  70   bind(update);
  71   load_klass(obj, obj, rscratch1);
  72   mov(rscratch1, obj);
  73 
  74   xorptr(obj, mdo_addr);
  75   testptr(obj, TypeEntries::type_klass_mask);
  76   jccb(Assembler::zero, next); // klass seen before, nothing to
  77                                // do. The unknown bit may have been
  78                                // set already but no need to check.
  79 
  80   testptr(obj, TypeEntries::type_unknown);
  81   jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
  82 
  83   cmpptr(mdo_addr, 0);
  84   jccb(Assembler::equal, none);
  85   cmpptr(mdo_addr, TypeEntries::null_seen);
  86   jccb(Assembler::equal, none);
  87 
  88   // There is a chance that the checks above (re-reading profiling
  89   // data from memory) fail if another thread has just set the
  90   // profiling to this obj's klass
  91   mov(obj, rscratch1);
  92   xorptr(obj, mdo_addr);
  93   testptr(obj, TypeEntries::type_klass_mask);
  94   jccb(Assembler::zero, next);
  95 
  96   // different than before. Cannot keep accurate profile.
  97   orptr(mdo_addr, TypeEntries::type_unknown);
  98   jmpb(next);
  99 
 100   bind(none);
 101   // first time here. Set profile type.
 102   movptr(mdo_addr, obj);
 103 #ifdef ASSERT
 104   andptr(obj, TypeEntries::type_klass_mask);
 105   verify_klass_ptr(obj);
 106 #endif
 107 
 108   bind(next);
 109 }
 110 
 111 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
 112   if (!ProfileInterpreter) {
 113     return;
 114   }
 115 
 116   if (MethodData::profile_arguments() || MethodData::profile_return()) {
 117     Label profile_continue;
 118 
 119     test_method_data_pointer(mdp, profile_continue);
 120 
 121     int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
 122 
 123     cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
 124     jcc(Assembler::notEqual, profile_continue);
 125 
 126     if (MethodData::profile_arguments()) {
 127       Label done;
 128       int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
 129       addptr(mdp, off_to_args);
 130 
 131       for (int i = 0; i < TypeProfileArgsLimit; i++) {
 132         if (i > 0 || MethodData::profile_return()) {
 133           // If return value type is profiled we may have no argument to profile
 134           movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
 135           subl(tmp, i*TypeStackSlotEntries::per_arg_count());
 136           cmpl(tmp, TypeStackSlotEntries::per_arg_count());
 137           jcc(Assembler::less, done);
 138         }
 139         movptr(tmp, Address(callee, Method::const_offset()));
 140         load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
 141         // stack offset o (zero based) from the start of the argument
 142         // list, for n arguments translates into offset n - o - 1 from
 143         // the end of the argument list
 144         subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
 145         subl(tmp, 1);
 146         Address arg_addr = argument_address(tmp);
 147         movptr(tmp, arg_addr);
 148 
 149         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
 150         profile_obj_type(tmp, mdo_arg_addr);
 151 
 152         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
 153         addptr(mdp, to_add);
 154         off_to_args += to_add;
 155       }
 156 
 157       if (MethodData::profile_return()) {
 158         movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
 159         subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
 160       }
 161 
 162       bind(done);
 163 
 164       if (MethodData::profile_return()) {
 165         // We're right after the type profile for the last
 166         // argument. tmp is the number of cells left in the
 167         // CallTypeData/VirtualCallTypeData to reach its end. Non null
 168         // if there's a return to profile.
 169         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
 170         shll(tmp, log2i_exact((int)DataLayout::cell_size));
 171         addptr(mdp, tmp);
 172       }
 173       movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
 174     } else {
 175       assert(MethodData::profile_return(), "either profile call args or call ret");
 176       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
 177     }
 178 
 179     // mdp points right after the end of the
 180     // CallTypeData/VirtualCallTypeData, right after the cells for the
 181     // return value type if there's one
 182 
 183     bind(profile_continue);
 184   }
 185 }
 186 
 187 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
 188   assert_different_registers(mdp, ret, tmp, _bcp_register);
 189   if (ProfileInterpreter && MethodData::profile_return()) {
 190     Label profile_continue;
 191 
 192     test_method_data_pointer(mdp, profile_continue);
 193 
 194     if (MethodData::profile_return_jsr292_only()) {
 195       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
 196 
 197       // If we don't profile all invoke bytecodes we must make sure
 198       // it's a bytecode we indeed profile. We can't go back to the
 199       // beginning of the ProfileData we intend to update to check its
 200       // type because we're right after it and we don't known its
 201       // length
 202       Label do_profile;
 203       cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
 204       jcc(Assembler::equal, do_profile);
 205       cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
 206       jcc(Assembler::equal, do_profile);
 207       get_method(tmp);
 208       cmpw(Address(tmp, Method::intrinsic_id_offset()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
 209       jcc(Assembler::notEqual, profile_continue);
 210 
 211       bind(do_profile);
 212     }
 213 
 214     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
 215     mov(tmp, ret);
 216     profile_obj_type(tmp, mdo_ret_addr);
 217 
 218     bind(profile_continue);
 219   }
 220 }
 221 
 222 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
 223   if (ProfileInterpreter && MethodData::profile_parameters()) {
 224     Label profile_continue;
 225 
 226     test_method_data_pointer(mdp, profile_continue);
 227 
 228     // Load the offset of the area within the MDO used for
 229     // parameters. If it's negative we're not profiling any parameters
 230     movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
 231     testl(tmp1, tmp1);
 232     jcc(Assembler::negative, profile_continue);
 233 
 234     // Compute a pointer to the area for parameters from the offset
 235     // and move the pointer to the slot for the last
 236     // parameters. Collect profiling from last parameter down.
 237     // mdo start + parameters offset + array length - 1
 238     addptr(mdp, tmp1);
 239     movptr(tmp1, Address(mdp, ArrayData::array_len_offset()));
 240     decrement(tmp1, TypeStackSlotEntries::per_arg_count());
 241 
 242     Label loop;
 243     bind(loop);
 244 
 245     int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
 246     int type_base = in_bytes(ParametersTypeData::type_offset(0));
 247     Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size);
 248     Address arg_off(mdp, tmp1, per_arg_scale, off_base);
 249     Address arg_type(mdp, tmp1, per_arg_scale, type_base);
 250 
 251     // load offset on the stack from the slot for this parameter
 252     movptr(tmp2, arg_off);
 253     negptr(tmp2);
 254     // read the parameter from the local area
 255     movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale()));
 256 
 257     // profile the parameter
 258     profile_obj_type(tmp2, arg_type);
 259 
 260     // go to next parameter
 261     decrement(tmp1, TypeStackSlotEntries::per_arg_count());
 262     jcc(Assembler::positive, loop);
 263 
 264     bind(profile_continue);
 265   }
 266 }
 267 
 268 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
 269                                                   int number_of_arguments) {
 270   // interpreter specific
 271   //
 272   // Note: No need to save/restore bcp & locals registers
 273   //       since these are callee saved registers and no blocking/
 274   //       GC can happen in leaf calls.
 275   // Further Note: DO NOT save/restore bcp/locals. If a caller has
 276   // already saved them so that it can use rsi/rdi as temporaries
 277   // then a save/restore here will DESTROY the copy the caller
 278   // saved! There used to be a save_bcp() that only happened in
 279   // the ASSERT path (no restore_bcp). Which caused bizarre failures
 280   // when jvm built with ASSERTs.
 281 #ifdef ASSERT
 282   {
 283     Label L;
 284     cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
 285     jcc(Assembler::equal, L);
 286     stop("InterpreterMacroAssembler::call_VM_leaf_base:"
 287          " last_sp != null");
 288     bind(L);
 289   }
 290 #endif
 291   // super call
 292   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
 293   // interpreter specific
 294   // LP64: Used to ASSERT that r13/r14 were equal to frame's bcp/locals
 295   // but since they may not have been saved (and we don't want to
 296   // save them here (see note above) the assert is invalid.
 297 }
 298 
 299 void InterpreterMacroAssembler::call_VM_base(Register oop_result,
 300                                              Register last_java_sp,
 301                                              address  entry_point,
 302                                              int      number_of_arguments,
 303                                              bool     check_exceptions) {
 304   // interpreter specific
 305   //
 306   // Note: Could avoid restoring locals ptr (callee saved) - however doesn't
 307   //       really make a difference for these runtime calls, since they are
 308   //       slow anyway. Btw., bcp must be saved/restored since it may change
 309   //       due to GC.
 310   save_bcp();
 311 #ifdef ASSERT
 312   {
 313     Label L;
 314     cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
 315     jcc(Assembler::equal, L);
 316     stop("InterpreterMacroAssembler::call_VM_base:"
 317          " last_sp isn't null");
 318     bind(L);
 319   }
 320 #endif /* ASSERT */
 321   // super call
 322   MacroAssembler::call_VM_base(oop_result, last_java_sp,
 323                                entry_point, number_of_arguments,
 324                                check_exceptions);
 325   // interpreter specific
 326   restore_bcp();
 327   restore_locals();
 328 }
 329 
 330 void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
 331                                                     address entry_point,
 332                                                     Register arg_1) {
 333   assert(arg_1 == c_rarg1, "");
 334   Label resume_pc, not_preempted;
 335 
 336 #ifdef ASSERT
 337   {
 338     Label L;
 339     cmpptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
 340     jcc(Assembler::equal, L);
 341     stop("Should not have alternate return address set");
 342     bind(L);
 343   }
 344 #endif /* ASSERT */
 345 
 346   // Force freeze slow path.
 347   push_cont_fastpath();
 348 
 349   // Make VM call. In case of preemption set last_pc to the one we want to resume to.
 350   // Note: call_VM_helper requires last_Java_pc for anchor to be at the top of the stack.
 351   lea(rscratch1, resume_pc);
 352   push(rscratch1);
 353   MacroAssembler::call_VM_helper(oop_result, entry_point, 1, false /*check_exceptions*/);
 354   pop(rscratch1);
 355 
 356   pop_cont_fastpath();
 357 
 358   // Check if preempted.
 359   movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset()));
 360   cmpptr(rscratch1, NULL_WORD);
 361   jccb(Assembler::zero, not_preempted);
 362   movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
 363   jmp(rscratch1);
 364 
 365   // In case of preemption, this is where we will resume once we finally acquire the monitor.
 366   bind(resume_pc);
 367   restore_after_resume(false /* is_native */);
 368 
 369   bind(not_preempted);
 370 }
 371 
 372 void InterpreterMacroAssembler::restore_after_resume(bool is_native) {
 373   lea(rscratch1, ExternalAddress(Interpreter::cont_resume_interpreter_adapter()));
 374   call(rscratch1);
 375   if (is_native) {
 376     // On resume we need to set up stack as expected.
 377     push(dtos);
 378     push(ltos);
 379   }
 380 }
 381 
 382 void InterpreterMacroAssembler::check_and_handle_popframe() {
 383   if (JvmtiExport::can_pop_frame()) {
 384     Label L;
 385     // Initiate popframe handling only if it is not already being
 386     // processed.  If the flag has the popframe_processing bit set, it
 387     // means that this code is called *during* popframe handling - we
 388     // don't want to reenter.
 389     // This method is only called just after the call into the vm in
 390     // call_VM_base, so the arg registers are available.
 391     Register pop_cond = c_rarg0;
 392     movl(pop_cond, Address(r15_thread, JavaThread::popframe_condition_offset()));
 393     testl(pop_cond, JavaThread::popframe_pending_bit);
 394     jcc(Assembler::zero, L);
 395     testl(pop_cond, JavaThread::popframe_processing_bit);
 396     jcc(Assembler::notZero, L);
 397     // Call Interpreter::remove_activation_preserving_args_entry() to get the
 398     // address of the same-named entrypoint in the generated interpreter code.
 399     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
 400     jmp(rax);
 401     bind(L);
 402   }
 403 }
 404 
 405 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
 406   movptr(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
 407   const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset());
 408   const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset());
 409   const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset());
 410 
 411   switch (state) {
 412     case atos: movptr(rax, oop_addr);
 413                movptr(oop_addr, NULL_WORD);
 414                interp_verify_oop(rax, state);         break;
 415     case ltos: movptr(rax, val_addr);                 break;
 416     case btos:                                   // fall through
 417     case ztos:                                   // fall through
 418     case ctos:                                   // fall through
 419     case stos:                                   // fall through
 420     case itos: movl(rax, val_addr);                 break;
 421     case ftos: load_float(val_addr);                break;
 422     case dtos: load_double(val_addr);               break;
 423     case vtos: /* nothing to do */                  break;
 424     default  : ShouldNotReachHere();
 425   }
 426 
 427   // Clean up tos value in the thread object
 428   movl(tos_addr, ilgl);
 429   movptr(val_addr, NULL_WORD);
 430 }
 431 
 432 
 433 void InterpreterMacroAssembler::check_and_handle_earlyret() {
 434   if (JvmtiExport::can_force_early_return()) {
 435     Label L;
 436     Register tmp = c_rarg0;
 437     Register rthread = r15_thread;
 438 
 439     movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 440     testptr(tmp, tmp);
 441     jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == nullptr) exit;
 442 
 443     // Initiate earlyret handling only if it is not already being processed.
 444     // If the flag has the earlyret_processing bit set, it means that this code
 445     // is called *during* earlyret handling - we don't want to reenter.
 446     movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset()));
 447     cmpl(tmp, JvmtiThreadState::earlyret_pending);
 448     jcc(Assembler::notEqual, L);
 449 
 450     // Call Interpreter::remove_activation_early_entry() to get the address of the
 451     // same-named entrypoint in the generated interpreter code.
 452     movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset()));
 453     movl(tmp, Address(tmp, JvmtiThreadState::earlyret_tos_offset()));
 454     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), tmp);
 455     jmp(rax);
 456     bind(L);
 457   }
 458 }
 459 
 460 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
 461   assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
 462   load_unsigned_short(reg, Address(_bcp_register, bcp_offset));
 463   bswapl(reg);
 464   shrl(reg, 16);
 465 }
 466 
 467 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
 468                                                        int bcp_offset,
 469                                                        size_t index_size) {
 470   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 471   if (index_size == sizeof(u2)) {
 472     load_unsigned_short(index, Address(_bcp_register, bcp_offset));
 473   } else if (index_size == sizeof(u4)) {
 474     movl(index, Address(_bcp_register, bcp_offset));
 475   } else if (index_size == sizeof(u1)) {
 476     load_unsigned_byte(index, Address(_bcp_register, bcp_offset));
 477   } else {
 478     ShouldNotReachHere();
 479   }
 480 }
 481 
 482 // Load object from cpool->resolved_references(index)
 483 void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result,
 484                                                                  Register index,
 485                                                                  Register tmp) {
 486   assert_different_registers(result, index);
 487 
 488   get_constant_pool(result);
 489   // load pointer for resolved_references[] objArray
 490   movptr(result, Address(result, ConstantPool::cache_offset()));
 491   movptr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
 492   resolve_oop_handle(result, tmp);
 493   load_heap_oop(result, Address(result, index,
 494                                 UseCompressedOops ? Address::times_4 : Address::times_ptr,
 495                                 arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp);
 496 }
 497 
 498 // load cpool->resolved_klass_at(index)
 499 void InterpreterMacroAssembler::load_resolved_klass_at_index(Register klass,
 500                                                              Register cpool,
 501                                                              Register index) {
 502   assert_different_registers(cpool, index);
 503 
 504   movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
 505   Register resolved_klasses = cpool;
 506   movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset()));
 507   movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
 508 }
 509 
 510 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 511 // subtype of super_klass.
 512 //
 513 // Args:
 514 //      rax: superklass
 515 //      Rsub_klass: subklass
 516 //
 517 // Kills:
 518 //      rcx, rdi
 519 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 520                                                   Label& ok_is_subtype) {
 521   assert(Rsub_klass != rax, "rax holds superklass");
 522   LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
 523   LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
 524   assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
 525   assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
 526 
 527   // Profile the not-null value's klass.
 528   profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
 529 
 530   // Do the check.
 531   check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
 532 }
 533 
 534 
 535 // Java Expression Stack
 536 
 537 void InterpreterMacroAssembler::pop_ptr(Register r) {
 538   pop(r);
 539 }
 540 
 541 void InterpreterMacroAssembler::push_ptr(Register r) {
 542   push(r);
 543 }
 544 
 545 void InterpreterMacroAssembler::push_i(Register r) {
 546   push(r);
 547 }
 548 
 549 void InterpreterMacroAssembler::push_i_or_ptr(Register r) {
 550   push(r);
 551 }
 552 
 553 void InterpreterMacroAssembler::push_f(XMMRegister r) {
 554   subptr(rsp, wordSize);
 555   movflt(Address(rsp, 0), r);
 556 }
 557 
 558 void InterpreterMacroAssembler::pop_f(XMMRegister r) {
 559   movflt(r, Address(rsp, 0));
 560   addptr(rsp, wordSize);
 561 }
 562 
 563 void InterpreterMacroAssembler::push_d(XMMRegister r) {
 564   subptr(rsp, 2 * wordSize);
 565   movdbl(Address(rsp, 0), r);
 566 }
 567 
 568 void InterpreterMacroAssembler::pop_d(XMMRegister r) {
 569   movdbl(r, Address(rsp, 0));
 570   addptr(rsp, 2 * Interpreter::stackElementSize);
 571 }
 572 
 573 void InterpreterMacroAssembler::pop_i(Register r) {
 574   // XXX can't use pop currently, upper half non clean
 575   movl(r, Address(rsp, 0));
 576   addptr(rsp, wordSize);
 577 }
 578 
 579 void InterpreterMacroAssembler::pop_l(Register r) {
 580   movq(r, Address(rsp, 0));
 581   addptr(rsp, 2 * Interpreter::stackElementSize);
 582 }
 583 
 584 void InterpreterMacroAssembler::push_l(Register r) {
 585   subptr(rsp, 2 * wordSize);
 586   movptr(Address(rsp, Interpreter::expr_offset_in_bytes(0)), r         );
 587   movptr(Address(rsp, Interpreter::expr_offset_in_bytes(1)), NULL_WORD );
 588 }
 589 
 590 void InterpreterMacroAssembler::pop(TosState state) {
 591   switch (state) {
 592   case atos: pop_ptr();                 break;
 593   case btos:
 594   case ztos:
 595   case ctos:
 596   case stos:
 597   case itos: pop_i();                   break;
 598   case ltos: pop_l();                   break;
 599   case ftos: pop_f(xmm0);               break;
 600   case dtos: pop_d(xmm0);               break;
 601   case vtos: /* nothing to do */        break;
 602   default:   ShouldNotReachHere();
 603   }
 604   interp_verify_oop(rax, state);
 605 }
 606 
 607 void InterpreterMacroAssembler::push(TosState state) {
 608   interp_verify_oop(rax, state);
 609   switch (state) {
 610   case atos: push_ptr();                break;
 611   case btos:
 612   case ztos:
 613   case ctos:
 614   case stos:
 615   case itos: push_i();                  break;
 616   case ltos: push_l();                  break;
 617   case ftos: push_f(xmm0);              break;
 618   case dtos: push_d(xmm0);              break;
 619   case vtos: /* nothing to do */        break;
 620   default  : ShouldNotReachHere();
 621   }
 622 }
 623 
 624 // Helpers for swap and dup
 625 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
 626   movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
 627 }
 628 
 629 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
 630   movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
 631 }
 632 
 633 
 634 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
 635   // set sender sp
 636   lea(_bcp_register, Address(rsp, wordSize));
 637   // record last_sp
 638   mov(rcx, _bcp_register);
 639   subptr(rcx, rbp);
 640   sarptr(rcx, LogBytesPerWord);
 641   movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rcx);
 642 }
 643 
 644 
 645 // Jump to from_interpreted entry of a call unless single stepping is possible
 646 // in this thread in which case we must call the i2i entry
 647 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
 648   prepare_to_jump_from_interpreted();
 649 
 650   if (JvmtiExport::can_post_interpreter_events()) {
 651     Label run_compiled_code;
 652     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 653     // compiled code in threads for which the event is enabled.  Check here for
 654     // interp_only_mode if these events CAN be enabled.
 655     // interp_only is an int, on little endian it is sufficient to test the byte only
 656     // Is a cmpl faster?
 657     cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
 658     jccb(Assembler::zero, run_compiled_code);
 659     jmp(Address(method, Method::interpreter_entry_offset()));
 660     bind(run_compiled_code);
 661   }
 662 
 663   jmp(Address(method, Method::from_interpreted_offset()));
 664 }
 665 
 666 // The following two routines provide a hook so that an implementation
 667 // can schedule the dispatch in two parts.  x86 does not do this.
 668 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
 669   // Nothing x86 specific to be done here
 670 }
 671 
 672 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
 673   dispatch_next(state, step);
 674 }
 675 
 676 void InterpreterMacroAssembler::dispatch_base(TosState state,
 677                                               address* table,
 678                                               bool verifyoop,
 679                                               bool generate_poll) {
 680   if (VerifyActivationFrameSize) {
 681     Label L;
 682     mov(rcx, rbp);
 683     subptr(rcx, rsp);
 684     int32_t min_frame_size =
 685       (frame::link_offset - frame::interpreter_frame_initial_sp_offset) *
 686       wordSize;
 687     cmpptr(rcx, min_frame_size);
 688     jcc(Assembler::greaterEqual, L);
 689     stop("broken stack frame");
 690     bind(L);
 691   }
 692   if (verifyoop) {
 693     interp_verify_oop(rax, state);
 694   }
 695 
 696   address* const safepoint_table = Interpreter::safept_table(state);
 697   Label no_safepoint, dispatch;
 698   if (table != safepoint_table && generate_poll) {
 699     NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
 700     testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
 701 
 702     jccb(Assembler::zero, no_safepoint);
 703     lea(rscratch1, ExternalAddress((address)safepoint_table));
 704     jmpb(dispatch);
 705   }
 706 
 707   bind(no_safepoint);
 708   lea(rscratch1, ExternalAddress((address)table));
 709   bind(dispatch);
 710   jmp(Address(rscratch1, rbx, Address::times_8));
 711 }
 712 
 713 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
 714   dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
 715 }
 716 
 717 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
 718   dispatch_base(state, Interpreter::normal_table(state));
 719 }
 720 
 721 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
 722   dispatch_base(state, Interpreter::normal_table(state), false);
 723 }
 724 
 725 
 726 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
 727   // load next bytecode (load before advancing _bcp_register to prevent AGI)
 728   load_unsigned_byte(rbx, Address(_bcp_register, step));
 729   // advance _bcp_register
 730   increment(_bcp_register, step);
 731   dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
 732 }
 733 
 734 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
 735   // load current bytecode
 736   load_unsigned_byte(rbx, Address(_bcp_register, 0));
 737   dispatch_base(state, table);
 738 }
 739 
 740 void InterpreterMacroAssembler::narrow(Register result) {
 741 
 742   // Get method->_constMethod->_result_type
 743   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
 744   movptr(rcx, Address(rcx, Method::const_offset()));
 745   load_unsigned_byte(rcx, Address(rcx, ConstMethod::result_type_offset()));
 746 
 747   Label done, notBool, notByte, notChar;
 748 
 749   // common case first
 750   cmpl(rcx, T_INT);
 751   jcc(Assembler::equal, done);
 752 
 753   // mask integer result to narrower return type.
 754   cmpl(rcx, T_BOOLEAN);
 755   jcc(Assembler::notEqual, notBool);
 756   andl(result, 0x1);
 757   jmp(done);
 758 
 759   bind(notBool);
 760   cmpl(rcx, T_BYTE);
 761   jcc(Assembler::notEqual, notByte);
 762   movsbl(result, result);
 763   jmp(done);
 764 
 765   bind(notByte);
 766   cmpl(rcx, T_CHAR);
 767   jcc(Assembler::notEqual, notChar);
 768   movzwl(result, result);
 769   jmp(done);
 770 
 771   bind(notChar);
 772   // cmpl(rcx, T_SHORT);  // all that's left
 773   // jcc(Assembler::notEqual, done);
 774   movswl(result, result);
 775 
 776   // Nothing to do for T_INT
 777   bind(done);
 778 }
 779 
 780 // remove activation
 781 //
 782 // Apply stack watermark barrier.
 783 // Unlock the receiver if this is a synchronized method.
 784 // Unlock any Java monitors from synchronized blocks.
 785 // Remove the activation from the stack.
 786 //
 787 // If there are locked Java monitors
 788 //    If throw_monitor_exception
 789 //       throws IllegalMonitorStateException
 790 //    Else if install_monitor_exception
 791 //       installs IllegalMonitorStateException
 792 //    Else
 793 //       no error processing
 794 void InterpreterMacroAssembler::remove_activation(
 795         TosState state,
 796         Register ret_addr,
 797         bool throw_monitor_exception,
 798         bool install_monitor_exception,
 799         bool notify_jvmdi) {
 800   // Note: Registers rdx xmm0 may be in use for the
 801   // result check if synchronized method
 802   Label unlocked, unlock, no_unlock;
 803 
 804   const Register rthread = r15_thread;
 805   const Register robj    = c_rarg1;
 806   const Register rmon    = c_rarg1;
 807 
 808   // The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
 809   // that would normally not be safe to use. Such bad returns into unsafe territory of
 810   // the stack, will call InterpreterRuntime::at_unwind.
 811   Label slow_path;
 812   Label fast_path;
 813   safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
 814   jmp(fast_path);
 815   bind(slow_path);
 816   push(state);
 817   set_last_Java_frame(noreg, rbp, (address)pc(), rscratch1);
 818   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
 819   reset_last_Java_frame(true);
 820   pop(state);
 821   bind(fast_path);
 822 
 823   // get the value of _do_not_unlock_if_synchronized into rdx
 824   const Address do_not_unlock_if_synchronized(rthread,
 825     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 826   movbool(rbx, do_not_unlock_if_synchronized);
 827   movbool(do_not_unlock_if_synchronized, false); // reset the flag
 828 
 829  // get method access flags
 830   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
 831   load_unsigned_short(rcx, Address(rcx, Method::access_flags_offset()));
 832   testl(rcx, JVM_ACC_SYNCHRONIZED);
 833   jcc(Assembler::zero, unlocked);
 834 
 835   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 836   // is set.
 837   testbool(rbx);
 838   jcc(Assembler::notZero, no_unlock);
 839 
 840   // unlock monitor
 841   push(state); // save result
 842 
 843   // BasicObjectLock will be first in list, since this is a
 844   // synchronized method. However, need to check that the object has
 845   // not been unlocked by an explicit monitorexit bytecode.
 846   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
 847                         wordSize - (int) sizeof(BasicObjectLock));
 848   // We use c_rarg1/rdx so that if we go slow path it will be the correct
 849   // register for unlock_object to pass to VM directly
 850   lea(robj, monitor); // address of first monitor
 851 
 852   movptr(rax, Address(robj, BasicObjectLock::obj_offset()));
 853   testptr(rax, rax);
 854   jcc(Assembler::notZero, unlock);
 855 
 856   pop(state);
 857   if (throw_monitor_exception) {
 858     // Entry already unlocked, need to throw exception
 859     call_VM(noreg, CAST_FROM_FN_PTR(address,
 860                    InterpreterRuntime::throw_illegal_monitor_state_exception));
 861     should_not_reach_here();
 862   } else {
 863     // Monitor already unlocked during a stack unroll. If requested,
 864     // install an illegal_monitor_state_exception.  Continue with
 865     // stack unrolling.
 866     if (install_monitor_exception) {
 867       call_VM(noreg, CAST_FROM_FN_PTR(address,
 868                      InterpreterRuntime::new_illegal_monitor_state_exception));
 869     }
 870     jmp(unlocked);
 871   }
 872 
 873   bind(unlock);
 874   unlock_object(robj);
 875   pop(state);
 876 
 877   // Check that for block-structured locking (i.e., that all locked
 878   // objects has been unlocked)
 879   bind(unlocked);
 880 
 881   // rax, rdx: Might contain return value
 882 
 883   // Check that all monitors are unlocked
 884   {
 885     Label loop, exception, entry, restart;
 886     const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
 887     const Address monitor_block_top(
 888         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
 889     const Address monitor_block_bot(
 890         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
 891 
 892     bind(restart);
 893     // We use c_rarg1 so that if we go slow path it will be the correct
 894     // register for unlock_object to pass to VM directly
 895     movptr(rmon, monitor_block_top); // derelativize pointer
 896     lea(rmon, Address(rbp, rmon, Address::times_ptr));
 897     // c_rarg1 points to current entry, starting with top-most entry
 898 
 899     lea(rbx, monitor_block_bot);  // points to word before bottom of
 900                                   // monitor block
 901     jmp(entry);
 902 
 903     // Entry already locked, need to throw exception
 904     bind(exception);
 905 
 906     if (throw_monitor_exception) {
 907       // Throw exception
 908       MacroAssembler::call_VM(noreg,
 909                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
 910                                    throw_illegal_monitor_state_exception));
 911       should_not_reach_here();
 912     } else {
 913       // Stack unrolling. Unlock object and install illegal_monitor_exception.
 914       // Unlock does not block, so don't have to worry about the frame.
 915       // We don't have to preserve c_rarg1 since we are going to throw an exception.
 916 
 917       push(state);
 918       mov(robj, rmon);   // nop if robj and rmon are the same
 919       unlock_object(robj);
 920       pop(state);
 921 
 922       if (install_monitor_exception) {
 923         call_VM(noreg, CAST_FROM_FN_PTR(address,
 924                                         InterpreterRuntime::
 925                                         new_illegal_monitor_state_exception));
 926       }
 927 
 928       jmp(restart);
 929     }
 930 
 931     bind(loop);
 932     // check if current entry is used
 933     cmpptr(Address(rmon, BasicObjectLock::obj_offset()), NULL_WORD);
 934     jcc(Assembler::notEqual, exception);
 935 
 936     addptr(rmon, entry_size); // otherwise advance to next entry
 937     bind(entry);
 938     cmpptr(rmon, rbx); // check if bottom reached
 939     jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
 940   }
 941 
 942   bind(no_unlock);
 943 
 944   // jvmti support
 945   if (notify_jvmdi) {
 946     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 947   } else {
 948     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 949   }
 950 
 951   // remove activation
 952   // get sender sp
 953   movptr(rbx,
 954          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
 955   if (StackReservedPages > 0) {
 956     // testing if reserved zone needs to be re-enabled
 957     Register rthread = r15_thread;
 958     Label no_reserved_zone_enabling;
 959 
 960     // check if already enabled - if so no re-enabling needed
 961     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 962     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
 963     jcc(Assembler::equal, no_reserved_zone_enabling);
 964 
 965     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 966     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
 967 
 968     call_VM_leaf(
 969       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 970     call_VM(noreg, CAST_FROM_FN_PTR(address,
 971                    InterpreterRuntime::throw_delayed_StackOverflowError));
 972     should_not_reach_here();
 973 
 974     bind(no_reserved_zone_enabling);
 975   }
 976   leave();                           // remove frame anchor
 977   pop(ret_addr);                     // get return address
 978   mov(rsp, rbx);                     // set sp to sender sp
 979   pop_cont_fastpath();
 980 }
 981 
 982 void InterpreterMacroAssembler::get_method_counters(Register method,
 983                                                     Register mcs, Label& skip) {
 984   Label has_counters;
 985   movptr(mcs, Address(method, Method::method_counters_offset()));
 986   testptr(mcs, mcs);
 987   jcc(Assembler::notZero, has_counters);
 988   call_VM(noreg, CAST_FROM_FN_PTR(address,
 989           InterpreterRuntime::build_method_counters), method);
 990   movptr(mcs, Address(method,Method::method_counters_offset()));
 991   testptr(mcs, mcs);
 992   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
 993   bind(has_counters);
 994 }
 995 
 996 
 997 // Lock object
 998 //
 999 // Args:
1000 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1001 //
1002 // Kills:
1003 //      rax, rbx
1004 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1005   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
1006 
1007   if (LockingMode == LM_MONITOR) {
1008     call_VM_preemptable(noreg,
1009             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1010             lock_reg);
1011   } else {
1012     Label count_locking, done, slow_case;
1013 
1014     const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1015     const Register tmp_reg = rbx;
1016     const Register obj_reg = c_rarg3; // Will contain the oop
1017     const Register rklass_decode_tmp = rscratch1;
1018 
1019     const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
1020     const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
1021     const int mark_offset = lock_offset +
1022                             BasicLock::displaced_header_offset_in_bytes();
1023 
1024     // Load object pointer into obj_reg
1025     movptr(obj_reg, Address(lock_reg, obj_offset));
1026 
1027     if (DiagnoseSyncOnValueBasedClasses != 0) {
1028       load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1029       testb(Address(tmp_reg, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
1030       jcc(Assembler::notZero, slow_case);
1031     }
1032 
1033     if (LockingMode == LM_LIGHTWEIGHT) {
1034       lightweight_lock(lock_reg, obj_reg, swap_reg, tmp_reg, slow_case);
1035     } else if (LockingMode == LM_LEGACY) {
1036       // Load immediate 1 into swap_reg %rax
1037       movl(swap_reg, 1);
1038 
1039       // Load (object->mark() | 1) into swap_reg %rax
1040       orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1041 
1042       // Save (object->mark() | 1) into BasicLock's displaced header
1043       movptr(Address(lock_reg, mark_offset), swap_reg);
1044 
1045       assert(lock_offset == 0,
1046              "displaced header must be first word in BasicObjectLock");
1047 
1048       lock();
1049       cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1050       jcc(Assembler::zero, count_locking);
1051 
1052       const int zero_bits = 7;
1053 
1054       // Fast check for recursive lock.
1055       //
1056       // Can apply the optimization only if this is a stack lock
1057       // allocated in this thread. For efficiency, we can focus on
1058       // recently allocated stack locks (instead of reading the stack
1059       // base and checking whether 'mark' points inside the current
1060       // thread stack):
1061       //  1) (mark & zero_bits) == 0, and
1062       //  2) rsp <= mark < mark + os::pagesize()
1063       //
1064       // Warning: rsp + os::pagesize can overflow the stack base. We must
1065       // neither apply the optimization for an inflated lock allocated
1066       // just above the thread stack (this is why condition 1 matters)
1067       // nor apply the optimization if the stack lock is inside the stack
1068       // of another thread. The latter is avoided even in case of overflow
1069       // because we have guard pages at the end of all stacks. Hence, if
1070       // we go over the stack base and hit the stack of another thread,
1071       // this should not be in a writeable area that could contain a
1072       // stack lock allocated by that thread. As a consequence, a stack
1073       // lock less than page size away from rsp is guaranteed to be
1074       // owned by the current thread.
1075       //
1076       // These 3 tests can be done by evaluating the following
1077       // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1078       // assuming both stack pointer and pagesize have their
1079       // least significant bits clear.
1080       // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1081       subptr(swap_reg, rsp);
1082       andptr(swap_reg, zero_bits - (int)os::vm_page_size());
1083 
1084       // Save the test result, for recursive case, the result is zero
1085       movptr(Address(lock_reg, mark_offset), swap_reg);
1086       jcc(Assembler::notZero, slow_case);
1087 
1088       bind(count_locking);
1089       inc_held_monitor_count();
1090     }
1091     jmp(done);
1092 
1093     bind(slow_case);
1094 
1095     // Call the runtime routine for slow case
1096     call_VM_preemptable(noreg,
1097             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1098             lock_reg);
1099     bind(done);
1100   }
1101 }
1102 
1103 
1104 // Unlocks an object. Used in monitorexit bytecode and
1105 // remove_activation.  Throws an IllegalMonitorException if object is
1106 // not locked by current thread.
1107 //
1108 // Args:
1109 //      rdx, c_rarg1: BasicObjectLock for lock
1110 //
1111 // Kills:
1112 //      rax
1113 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
1114 //      rscratch1 (scratch reg)
1115 // rax, rbx, rcx, rdx
1116 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1117   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
1118 
1119   if (LockingMode == LM_MONITOR) {
1120     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1121   } else {
1122     Label count_locking, done, slow_case;
1123 
1124     const Register swap_reg   = rax;  // Must use rax for cmpxchg instruction
1125     const Register header_reg = c_rarg2;  // Will contain the old oopMark
1126     const Register obj_reg    = c_rarg3;  // Will contain the oop
1127 
1128     save_bcp(); // Save in case of exception
1129 
1130     if (LockingMode != LM_LIGHTWEIGHT) {
1131       // Convert from BasicObjectLock structure to object and BasicLock
1132       // structure Store the BasicLock address into %rax
1133       lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
1134     }
1135 
1136     // Load oop into obj_reg(%c_rarg3)
1137     movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
1138 
1139     // Free entry
1140     movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD);
1141 
1142     if (LockingMode == LM_LIGHTWEIGHT) {
1143       lightweight_unlock(obj_reg, swap_reg, header_reg, slow_case);
1144     } else if (LockingMode == LM_LEGACY) {
1145       // Load the old header from BasicLock structure
1146       movptr(header_reg, Address(swap_reg,
1147                                  BasicLock::displaced_header_offset_in_bytes()));
1148 
1149       // Test for recursion
1150       testptr(header_reg, header_reg);
1151 
1152       // zero for recursive case
1153       jcc(Assembler::zero, count_locking);
1154 
1155       // Atomic swap back the old header
1156       lock();
1157       cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1158 
1159       // zero for simple unlock of a stack-lock case
1160       jcc(Assembler::notZero, slow_case);
1161 
1162       bind(count_locking);
1163       dec_held_monitor_count();
1164     }
1165     jmp(done);
1166 
1167     bind(slow_case);
1168     // Call the runtime routine for slow case.
1169     movptr(Address(lock_reg, BasicObjectLock::obj_offset()), obj_reg); // restore obj
1170     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1171 
1172     bind(done);
1173 
1174     restore_bcp();
1175   }
1176 }
1177 
1178 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1179                                                          Label& zero_continue) {
1180   assert(ProfileInterpreter, "must be profiling interpreter");
1181   movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1182   testptr(mdp, mdp);
1183   jcc(Assembler::zero, zero_continue);
1184 }
1185 
1186 
1187 // Set the method data pointer for the current bcp.
1188 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1189   assert(ProfileInterpreter, "must be profiling interpreter");
1190   Label set_mdp;
1191   push(rax);
1192   push(rbx);
1193 
1194   get_method(rbx);
1195   // Test MDO to avoid the call if it is null.
1196   movptr(rax, Address(rbx, in_bytes(Method::method_data_offset())));
1197   testptr(rax, rax);
1198   jcc(Assembler::zero, set_mdp);
1199   // rbx: method
1200   // _bcp_register: bcp
1201   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, _bcp_register);
1202   // rax: mdi
1203   // mdo is guaranteed to be non-zero here, we checked for it before the call.
1204   movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset())));
1205   addptr(rbx, in_bytes(MethodData::data_offset()));
1206   addptr(rax, rbx);
1207   bind(set_mdp);
1208   movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), rax);
1209   pop(rbx);
1210   pop(rax);
1211 }
1212 
1213 void InterpreterMacroAssembler::verify_method_data_pointer() {
1214   assert(ProfileInterpreter, "must be profiling interpreter");
1215 #ifdef ASSERT
1216   Label verify_continue;
1217   push(rax);
1218   push(rbx);
1219   Register arg3_reg = c_rarg3;
1220   Register arg2_reg = c_rarg2;
1221   push(arg3_reg);
1222   push(arg2_reg);
1223   test_method_data_pointer(arg3_reg, verify_continue); // If mdp is zero, continue
1224   get_method(rbx);
1225 
1226   // If the mdp is valid, it will point to a DataLayout header which is
1227   // consistent with the bcp.  The converse is highly probable also.
1228   load_unsigned_short(arg2_reg,
1229                       Address(arg3_reg, in_bytes(DataLayout::bci_offset())));
1230   addptr(arg2_reg, Address(rbx, Method::const_offset()));
1231   lea(arg2_reg, Address(arg2_reg, ConstMethod::codes_offset()));
1232   cmpptr(arg2_reg, _bcp_register);
1233   jcc(Assembler::equal, verify_continue);
1234   // rbx: method
1235   // _bcp_register: bcp
1236   // c_rarg3: mdp
1237   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
1238                rbx, _bcp_register, arg3_reg);
1239   bind(verify_continue);
1240   pop(arg2_reg);
1241   pop(arg3_reg);
1242   pop(rbx);
1243   pop(rax);
1244 #endif // ASSERT
1245 }
1246 
1247 
1248 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
1249                                                 int constant,
1250                                                 Register value) {
1251   assert(ProfileInterpreter, "must be profiling interpreter");
1252   Address data(mdp_in, constant);
1253   movptr(data, value);
1254 }
1255 
1256 
1257 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1258                                                       int constant,
1259                                                       bool decrement) {
1260   // Counter address
1261   Address data(mdp_in, constant);
1262 
1263   increment_mdp_data_at(data, decrement);
1264 }
1265 
1266 void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
1267                                                       bool decrement) {
1268   assert(ProfileInterpreter, "must be profiling interpreter");
1269   // %%% this does 64bit counters at best it is wasting space
1270   // at worst it is a rare bug when counters overflow
1271 
1272   if (decrement) {
1273     // Decrement the register.  Set condition codes.
1274     addptr(data, -DataLayout::counter_increment);
1275     // If the decrement causes the counter to overflow, stay negative
1276     Label L;
1277     jcc(Assembler::negative, L);
1278     addptr(data, DataLayout::counter_increment);
1279     bind(L);
1280   } else {
1281     assert(DataLayout::counter_increment == 1,
1282            "flow-free idiom only works with 1");
1283     // Increment the register.  Set carry flag.
1284     addptr(data, DataLayout::counter_increment);
1285     // If the increment causes the counter to overflow, pull back by 1.
1286     sbbptr(data, 0);
1287   }
1288 }
1289 
1290 
1291 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1292                                                       Register reg,
1293                                                       int constant,
1294                                                       bool decrement) {
1295   Address data(mdp_in, reg, Address::times_1, constant);
1296 
1297   increment_mdp_data_at(data, decrement);
1298 }
1299 
1300 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
1301                                                 int flag_byte_constant) {
1302   assert(ProfileInterpreter, "must be profiling interpreter");
1303   int header_offset = in_bytes(DataLayout::flags_offset());
1304   int header_bits = flag_byte_constant;
1305   // Set the flag
1306   orb(Address(mdp_in, header_offset), header_bits);
1307 }
1308 
1309 
1310 
1311 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1312                                                  int offset,
1313                                                  Register value,
1314                                                  Register test_value_out,
1315                                                  Label& not_equal_continue) {
1316   assert(ProfileInterpreter, "must be profiling interpreter");
1317   if (test_value_out == noreg) {
1318     cmpptr(value, Address(mdp_in, offset));
1319   } else {
1320     // Put the test value into a register, so caller can use it:
1321     movptr(test_value_out, Address(mdp_in, offset));
1322     cmpptr(test_value_out, value);
1323   }
1324   jcc(Assembler::notEqual, not_equal_continue);
1325 }
1326 
1327 
1328 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1329                                                      int offset_of_disp) {
1330   assert(ProfileInterpreter, "must be profiling interpreter");
1331   Address disp_address(mdp_in, offset_of_disp);
1332   addptr(mdp_in, disp_address);
1333   movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in);
1334 }
1335 
1336 
1337 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
1338                                                      Register reg,
1339                                                      int offset_of_disp) {
1340   assert(ProfileInterpreter, "must be profiling interpreter");
1341   Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);
1342   addptr(mdp_in, disp_address);
1343   movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in);
1344 }
1345 
1346 
1347 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
1348                                                        int constant) {
1349   assert(ProfileInterpreter, "must be profiling interpreter");
1350   addptr(mdp_in, constant);
1351   movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in);
1352 }
1353 
1354 
1355 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1356   assert(ProfileInterpreter, "must be profiling interpreter");
1357   push(return_bci); // save/restore across call_VM
1358   call_VM(noreg,
1359           CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
1360           return_bci);
1361   pop(return_bci);
1362 }
1363 
1364 
1365 void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
1366                                                      Register bumped_count) {
1367   if (ProfileInterpreter) {
1368     Label profile_continue;
1369 
1370     // If no method data exists, go to profile_continue.
1371     // Otherwise, assign to mdp
1372     test_method_data_pointer(mdp, profile_continue);
1373 
1374     // We are taking a branch.  Increment the taken count.
1375     // We inline increment_mdp_data_at to return bumped_count in a register
1376     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1377     Address data(mdp, in_bytes(JumpData::taken_offset()));
1378     movptr(bumped_count, data);
1379     assert(DataLayout::counter_increment == 1,
1380             "flow-free idiom only works with 1");
1381     addptr(bumped_count, DataLayout::counter_increment);
1382     sbbptr(bumped_count, 0);
1383     movptr(data, bumped_count); // Store back out
1384 
1385     // The method data pointer needs to be updated to reflect the new target.
1386     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1387     bind(profile_continue);
1388   }
1389 }
1390 
1391 
1392 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1393   if (ProfileInterpreter) {
1394     Label profile_continue;
1395 
1396     // If no method data exists, go to profile_continue.
1397     test_method_data_pointer(mdp, profile_continue);
1398 
1399     // We are taking a branch.  Increment the not taken count.
1400     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1401 
1402     // The method data pointer needs to be updated to correspond to
1403     // the next bytecode
1404     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1405     bind(profile_continue);
1406   }
1407 }
1408 
1409 void InterpreterMacroAssembler::profile_call(Register mdp) {
1410   if (ProfileInterpreter) {
1411     Label profile_continue;
1412 
1413     // If no method data exists, go to profile_continue.
1414     test_method_data_pointer(mdp, profile_continue);
1415 
1416     // We are making a call.  Increment the count.
1417     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1418 
1419     // The method data pointer needs to be updated to reflect the new target.
1420     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1421     bind(profile_continue);
1422   }
1423 }
1424 
1425 
1426 void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1427   if (ProfileInterpreter) {
1428     Label profile_continue;
1429 
1430     // If no method data exists, go to profile_continue.
1431     test_method_data_pointer(mdp, profile_continue);
1432 
1433     // We are making a call.  Increment the count.
1434     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1435 
1436     // The method data pointer needs to be updated to reflect the new target.
1437     update_mdp_by_constant(mdp,
1438                            in_bytes(VirtualCallData::
1439                                     virtual_call_data_size()));
1440     bind(profile_continue);
1441   }
1442 }
1443 
1444 
1445 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1446                                                      Register mdp,
1447                                                      Register reg2,
1448                                                      bool receiver_can_be_null) {
1449   if (ProfileInterpreter) {
1450     Label profile_continue;
1451 
1452     // If no method data exists, go to profile_continue.
1453     test_method_data_pointer(mdp, profile_continue);
1454 
1455     Label skip_receiver_profile;
1456     if (receiver_can_be_null) {
1457       Label not_null;
1458       testptr(receiver, receiver);
1459       jccb(Assembler::notZero, not_null);
1460       // We are making a call.  Increment the count for null receiver.
1461       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1462       jmp(skip_receiver_profile);
1463       bind(not_null);
1464     }
1465 
1466     // Record the receiver type.
1467     record_klass_in_profile(receiver, mdp, reg2, true);
1468     bind(skip_receiver_profile);
1469 
1470     // The method data pointer needs to be updated to reflect the new target.
1471     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1472     bind(profile_continue);
1473   }
1474 }
1475 
1476 // This routine creates a state machine for updating the multi-row
1477 // type profile at a virtual call site (or other type-sensitive bytecode).
1478 // The machine visits each row (of receiver/count) until the receiver type
1479 // is found, or until it runs out of rows.  At the same time, it remembers
1480 // the location of the first empty row.  (An empty row records null for its
1481 // receiver, and can be allocated for a newly-observed receiver type.)
1482 // Because there are two degrees of freedom in the state, a simple linear
1483 // search will not work; it must be a decision tree.  Hence this helper
1484 // function is recursive, to generate the required tree structured code.
1485 // It's the interpreter, so we are trading off code space for speed.
1486 // See below for example code.
1487 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1488                                         Register receiver, Register mdp,
1489                                         Register reg2, int start_row,
1490                                         Label& done, bool is_virtual_call) {
1491   if (TypeProfileWidth == 0) {
1492     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1493   } else {
1494     record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
1495                                   &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
1496   }
1497 }
1498 
1499 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, Register reg2, int start_row,
1500                                                               Label& done, int total_rows,
1501                                                               OffsetFunction item_offset_fn,
1502                                                               OffsetFunction item_count_offset_fn) {
1503   int last_row = total_rows - 1;
1504   assert(start_row <= last_row, "must be work left to do");
1505   // Test this row for both the item and for null.
1506   // Take any of three different outcomes:
1507   //   1. found item => increment count and goto done
1508   //   2. found null => keep looking for case 1, maybe allocate this cell
1509   //   3. found something else => keep looking for cases 1 and 2
1510   // Case 3 is handled by a recursive call.
1511   for (int row = start_row; row <= last_row; row++) {
1512     Label next_test;
1513     bool test_for_null_also = (row == start_row);
1514 
1515     // See if the item is item[n].
1516     int item_offset = in_bytes(item_offset_fn(row));
1517     test_mdp_data_at(mdp, item_offset, item,
1518                      (test_for_null_also ? reg2 : noreg),
1519                      next_test);
1520     // (Reg2 now contains the item from the CallData.)
1521 
1522     // The item is item[n].  Increment count[n].
1523     int count_offset = in_bytes(item_count_offset_fn(row));
1524     increment_mdp_data_at(mdp, count_offset);
1525     jmp(done);
1526     bind(next_test);
1527 
1528     if (test_for_null_also) {
1529       // Failed the equality check on item[n]...  Test for null.
1530       testptr(reg2, reg2);
1531       if (start_row == last_row) {
1532         // The only thing left to do is handle the null case.
1533         Label found_null;
1534         jccb(Assembler::zero, found_null);
1535         // Item did not match any saved item and there is no empty row for it.
1536         // Increment total counter to indicate polymorphic case.
1537         increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1538         jmp(done);
1539         bind(found_null);
1540         break;
1541       }
1542       Label found_null;
1543       // Since null is rare, make it be the branch-taken case.
1544       jcc(Assembler::zero, found_null);
1545 
1546       // Put all the "Case 3" tests here.
1547       record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
1548                                     item_offset_fn, item_count_offset_fn);
1549 
1550       // Found a null.  Keep searching for a matching item,
1551       // but remember that this is an empty (unused) slot.
1552       bind(found_null);
1553     }
1554   }
1555 
1556   // In the fall-through case, we found no matching item, but we
1557   // observed the item[start_row] is null.
1558 
1559   // Fill in the item field and increment the count.
1560   int item_offset = in_bytes(item_offset_fn(start_row));
1561   set_mdp_data_at(mdp, item_offset, item);
1562   int count_offset = in_bytes(item_count_offset_fn(start_row));
1563   movl(reg2, DataLayout::counter_increment);
1564   set_mdp_data_at(mdp, count_offset, reg2);
1565   if (start_row > 0) {
1566     jmp(done);
1567   }
1568 }
1569 
1570 // Example state machine code for three profile rows:
1571 //   // main copy of decision tree, rooted at row[1]
1572 //   if (row[0].rec == rec) { row[0].incr(); goto done; }
1573 //   if (row[0].rec != nullptr) {
1574 //     // inner copy of decision tree, rooted at row[1]
1575 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1576 //     if (row[1].rec != nullptr) {
1577 //       // degenerate decision tree, rooted at row[2]
1578 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1579 //       if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
1580 //       row[2].init(rec); goto done;
1581 //     } else {
1582 //       // remember row[1] is empty
1583 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1584 //       row[1].init(rec); goto done;
1585 //     }
1586 //   } else {
1587 //     // remember row[0] is empty
1588 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1589 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
1590 //     row[0].init(rec); goto done;
1591 //   }
1592 //   done:
1593 
1594 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1595                                                         Register mdp, Register reg2,
1596                                                         bool is_virtual_call) {
1597   assert(ProfileInterpreter, "must be profiling");
1598   Label done;
1599 
1600   record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
1601 
1602   bind (done);
1603 }
1604 
1605 void InterpreterMacroAssembler::profile_ret(Register return_bci,
1606                                             Register mdp) {
1607   if (ProfileInterpreter) {
1608     Label profile_continue;
1609     uint row;
1610 
1611     // If no method data exists, go to profile_continue.
1612     test_method_data_pointer(mdp, profile_continue);
1613 
1614     // Update the total ret count.
1615     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1616 
1617     for (row = 0; row < RetData::row_limit(); row++) {
1618       Label next_test;
1619 
1620       // See if return_bci is equal to bci[n]:
1621       test_mdp_data_at(mdp,
1622                        in_bytes(RetData::bci_offset(row)),
1623                        return_bci, noreg,
1624                        next_test);
1625 
1626       // return_bci is equal to bci[n].  Increment the count.
1627       increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));
1628 
1629       // The method data pointer needs to be updated to reflect the new target.
1630       update_mdp_by_offset(mdp,
1631                            in_bytes(RetData::bci_displacement_offset(row)));
1632       jmp(profile_continue);
1633       bind(next_test);
1634     }
1635 
1636     update_mdp_for_ret(return_bci);
1637 
1638     bind(profile_continue);
1639   }
1640 }
1641 
1642 
1643 void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1644   if (ProfileInterpreter) {
1645     Label profile_continue;
1646 
1647     // If no method data exists, go to profile_continue.
1648     test_method_data_pointer(mdp, profile_continue);
1649 
1650     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1651 
1652     // The method data pointer needs to be updated.
1653     int mdp_delta = in_bytes(BitData::bit_data_size());
1654     if (TypeProfileCasts) {
1655       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1656     }
1657     update_mdp_by_constant(mdp, mdp_delta);
1658 
1659     bind(profile_continue);
1660   }
1661 }
1662 
1663 
1664 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1665   if (ProfileInterpreter) {
1666     Label profile_continue;
1667 
1668     // If no method data exists, go to profile_continue.
1669     test_method_data_pointer(mdp, profile_continue);
1670 
1671     // The method data pointer needs to be updated.
1672     int mdp_delta = in_bytes(BitData::bit_data_size());
1673     if (TypeProfileCasts) {
1674       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1675 
1676       // Record the object type.
1677       record_klass_in_profile(klass, mdp, reg2, false);
1678     }
1679     update_mdp_by_constant(mdp, mdp_delta);
1680 
1681     bind(profile_continue);
1682   }
1683 }
1684 
1685 
1686 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1687   if (ProfileInterpreter) {
1688     Label profile_continue;
1689 
1690     // If no method data exists, go to profile_continue.
1691     test_method_data_pointer(mdp, profile_continue);
1692 
1693     // Update the default case count
1694     increment_mdp_data_at(mdp,
1695                           in_bytes(MultiBranchData::default_count_offset()));
1696 
1697     // The method data pointer needs to be updated.
1698     update_mdp_by_offset(mdp,
1699                          in_bytes(MultiBranchData::
1700                                   default_displacement_offset()));
1701 
1702     bind(profile_continue);
1703   }
1704 }
1705 
1706 
1707 void InterpreterMacroAssembler::profile_switch_case(Register index,
1708                                                     Register mdp,
1709                                                     Register reg2) {
1710   if (ProfileInterpreter) {
1711     Label profile_continue;
1712 
1713     // If no method data exists, go to profile_continue.
1714     test_method_data_pointer(mdp, profile_continue);
1715 
1716     // Build the base (index * per_case_size_in_bytes()) +
1717     // case_array_offset_in_bytes()
1718     movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1719     imulptr(index, reg2); // XXX l ?
1720     addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1721 
1722     // Update the case count
1723     increment_mdp_data_at(mdp,
1724                           index,
1725                           in_bytes(MultiBranchData::relative_count_offset()));
1726 
1727     // The method data pointer needs to be updated.
1728     update_mdp_by_offset(mdp,
1729                          index,
1730                          in_bytes(MultiBranchData::
1731                                   relative_displacement_offset()));
1732 
1733     bind(profile_continue);
1734   }
1735 }
1736 
1737 
1738 
1739 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1740   if (state == atos) {
1741     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1742   }
1743 }
1744 
1745 
1746 // Jump if ((*counter_addr += increment) & mask) == 0
1747 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1748                                                         Register scratch, Label* where) {
1749   // This update is actually not atomic and can lose a number of updates
1750   // under heavy contention, but the alternative of using the (contended)
1751   // atomic update here penalizes profiling paths too much.
1752   movl(scratch, counter_addr);
1753   incrementl(scratch, InvocationCounter::count_increment);
1754   movl(counter_addr, scratch);
1755   andl(scratch, mask);
1756   if (where != nullptr) {
1757     jcc(Assembler::zero, *where);
1758   }
1759 }
1760 
1761 void InterpreterMacroAssembler::generate_runtime_upcalls_on_method_entry()
1762 {
1763   address upcall = RuntimeUpcalls::on_method_entry_upcall_address();
1764   if (RuntimeUpcalls::does_upcall_need_method_parameter(upcall)) {
1765     get_method(c_rarg1);
1766     call_VM(noreg,upcall, c_rarg1);
1767   } else {
1768     call_VM(noreg,upcall);
1769   }
1770 }
1771 
1772 void InterpreterMacroAssembler::notify_method_entry() {
1773   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1774   // track stack depth.  If it is possible to enter interp_only_mode we add
1775   // the code to check if the event should be sent.
1776   Register rthread = r15_thread;
1777   Register rarg = c_rarg1;
1778   if (JvmtiExport::can_post_interpreter_events()) {
1779     Label L;
1780     movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
1781     testl(rdx, rdx);
1782     jcc(Assembler::zero, L);
1783     call_VM(noreg, CAST_FROM_FN_PTR(address,
1784                                     InterpreterRuntime::post_method_entry));
1785     bind(L);
1786   }
1787 
1788   if (DTraceMethodProbes) {
1789     get_method(rarg);
1790     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1791                  rthread, rarg);
1792   }
1793 
1794   // RedefineClasses() tracing support for obsolete method entry
1795   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1796     get_method(rarg);
1797     call_VM_leaf(
1798       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1799       rthread, rarg);
1800   }
1801 }
1802 
1803 
1804 void InterpreterMacroAssembler::notify_method_exit(
1805     TosState state, NotifyMethodExitMode mode) {
1806   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1807   // track stack depth.  If it is possible to enter interp_only_mode we add
1808   // the code to check if the event should be sent.
1809   Register rthread = r15_thread;
1810   Register rarg = c_rarg1;
1811   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1812     Label L;
1813     // Note: frame::interpreter_frame_result has a dependency on how the
1814     // method result is saved across the call to post_method_exit. If this
1815     // is changed then the interpreter_frame_result implementation will
1816     // need to be updated too.
1817 
1818     // template interpreter will leave the result on the top of the stack.
1819     push(state);
1820     movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
1821     testl(rdx, rdx);
1822     jcc(Assembler::zero, L);
1823     call_VM(noreg,
1824             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1825     bind(L);
1826     pop(state);
1827   }
1828 
1829   if (DTraceMethodProbes) {
1830     push(state);
1831     get_method(rarg);
1832     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1833                  rthread, rarg);
1834     pop(state);
1835   }
1836 }
1837 
1838 void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) {
1839   // Get index out of bytecode pointer
1840   get_cache_index_at_bcp(index, 1, sizeof(u4));
1841   // Get address of invokedynamic array
1842   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
1843   movptr(cache, Address(cache, in_bytes(ConstantPoolCache::invokedynamic_entries_offset())));
1844   if (is_power_of_2(sizeof(ResolvedIndyEntry))) {
1845     shll(index, log2i_exact(sizeof(ResolvedIndyEntry))); // Scale index by power of 2
1846   } else {
1847     imull(index, index, sizeof(ResolvedIndyEntry)); // Scale the index to be the entry index * sizeof(ResolvedIndyEntry)
1848   }
1849   lea(cache, Address(cache, index, Address::times_1, Array<ResolvedIndyEntry>::base_offset_in_bytes()));
1850 }
1851 
1852 void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) {
1853   // Get index out of bytecode pointer
1854   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
1855   get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
1856 
1857   movptr(cache, Address(cache, ConstantPoolCache::field_entries_offset()));
1858   // Take shortcut if the size is a power of 2
1859   if (is_power_of_2(sizeof(ResolvedFieldEntry))) {
1860     shll(index, log2i_exact(sizeof(ResolvedFieldEntry))); // Scale index by power of 2
1861   } else {
1862     imull(index, index, sizeof(ResolvedFieldEntry)); // Scale the index to be the entry index * sizeof(ResolvedFieldEntry)
1863   }
1864   lea(cache, Address(cache, index, Address::times_1, Array<ResolvedFieldEntry>::base_offset_in_bytes()));
1865 }
1866 
1867 void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
1868   // Get index out of bytecode pointer
1869   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
1870   get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
1871 
1872   movptr(cache, Address(cache, ConstantPoolCache::method_entries_offset()));
1873   imull(index, index, sizeof(ResolvedMethodEntry)); // Scale the index to be the entry index * sizeof(ResolvedMethodEntry)
1874   lea(cache, Address(cache, index, Address::times_1, Array<ResolvedMethodEntry>::base_offset_in_bytes()));
1875 }