1 /*
   2  * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interpreter/bytecodeHistogram.hpp"
  32 #include "interpreter/interp_masm.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "interpreter/interpreterRuntime.hpp"
  35 #include "interpreter/templateInterpreterGenerator.hpp"
  36 #include "interpreter/templateTable.hpp"
  37 #include "oops/arrayOop.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/method.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "prims/jvmtiExport.hpp"
  42 #include "prims/jvmtiThreadState.hpp"
  43 #include "runtime/deoptimization.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/jniHandles.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/stubRoutines.hpp"
  48 #include "runtime/synchronizer.hpp"
  49 #include "runtime/timer.hpp"
  50 #include "runtime/vframeArray.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/macros.hpp"
  53 
  54 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  55 
  56 // Size of interpreter code.  Increase if too small.  Interpreter will
  57 // fail with a guarantee ("not enough space for interpreter generation");
  58 // if too small.
  59 // Run with +PrintInterpreter to get the VM to print out the size.
  60 // Max size with JVMTI
  61 #ifdef AMD64
  62 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024;
  63 #else
  64 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
  65 #endif // AMD64
  66 
  67 // Global Register Names
  68 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  69 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  70 
  71 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
  72 const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
  73 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
  74 
  75 
  76 //-----------------------------------------------------------------------------
  77 
  78 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
  79   address entry = __ pc();
  80 
  81 #ifdef ASSERT
  82   {
  83     Label L;
  84     __ lea(rax, Address(rbp,
  85                         frame::interpreter_frame_monitor_block_top_offset *
  86                         wordSize));
  87     __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
  88                          // grows negative)
  89     __ jcc(Assembler::aboveEqual, L); // check if frame is complete
  90     __ stop ("interpreter frame not set up");
  91     __ bind(L);
  92   }
  93 #endif // ASSERT
  94   // Restore bcp under the assumption that the current frame is still
  95   // interpreted
  96   __ restore_bcp();
  97 
  98   // expression stack must be empty before entering the VM if an
  99   // exception happened
 100   __ empty_expression_stack();
 101   // throw exception
 102   __ call_VM(noreg,
 103              CAST_FROM_FN_PTR(address,
 104                               InterpreterRuntime::throw_StackOverflowError));
 105   return entry;
 106 }
 107 
 108 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
 109   address entry = __ pc();
 110   // The expression stack must be empty before entering the VM if an
 111   // exception happened.
 112   __ empty_expression_stack();
 113 
 114   // Setup parameters.
 115   // ??? convention: expect aberrant index in register ebx/rbx.
 116   // Pass array to create more detailed exceptions.
 117   Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
 118   __ call_VM(noreg,
 119              CAST_FROM_FN_PTR(address,
 120                               InterpreterRuntime::
 121                               throw_ArrayIndexOutOfBoundsException),
 122              rarg, rbx);
 123   return entry;
 124 }
 125 
 126 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 127   address entry = __ pc();
 128 
 129   // object is at TOS
 130   Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
 131   __ pop(rarg);
 132 
 133   // expression stack must be empty before entering the VM if an
 134   // exception happened
 135   __ empty_expression_stack();
 136 
 137   __ call_VM(noreg,
 138              CAST_FROM_FN_PTR(address,
 139                               InterpreterRuntime::
 140                               throw_ClassCastException),
 141              rarg);
 142   return entry;
 143 }
 144 
 145 address TemplateInterpreterGenerator::generate_exception_handler_common(
 146         const char* name, const char* message, bool pass_oop) {
 147   assert(!pass_oop || message == NULL, "either oop or message but not both");
 148   address entry = __ pc();
 149 
 150   Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
 151   Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2);
 152 
 153   if (pass_oop) {
 154     // object is at TOS
 155     __ pop(rarg2);
 156   }
 157   // expression stack must be empty before entering the VM if an
 158   // exception happened
 159   __ empty_expression_stack();
 160   // setup parameters
 161   __ lea(rarg, ExternalAddress((address)name));
 162   if (pass_oop) {
 163     __ call_VM(rax, CAST_FROM_FN_PTR(address,
 164                                      InterpreterRuntime::
 165                                      create_klass_exception),
 166                rarg, rarg2);
 167   } else {
 168     __ lea(rarg2, ExternalAddress((address)message));
 169     __ call_VM(rax,
 170                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
 171                rarg, rarg2);
 172   }
 173   // throw exception
 174   __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
 175   return entry;
 176 }
 177 
 178 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 179   address entry = __ pc();
 180 
 181 #ifndef _LP64
 182 #ifdef COMPILER2
 183   // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
 184   if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
 185     for (int i = 1; i < 8; i++) {
 186         __ ffree(i);
 187     }
 188   } else if (UseSSE < 2) {
 189     __ empty_FPU_stack();
 190   }
 191 #endif // COMPILER2
 192   if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
 193     __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
 194   } else {
 195     __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
 196   }
 197 
 198   if (state == ftos) {
 199     __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
 200   } else if (state == dtos) {
 201     __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
 202   }
 203 #endif // _LP64
 204 
 205   // Restore stack bottom in case i2c adjusted stack
 206   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
 207   // and NULL it as marker that esp is now tos until next java call
 208   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
 209 
 210   __ restore_bcp();
 211   __ restore_locals();
 212 
 213   if (state == atos) {
 214     Register mdp = rbx;
 215     Register tmp = rcx;
 216     __ profile_return_type(mdp, rax, tmp);
 217   }
 218 
 219   const Register cache = rbx;
 220   const Register index = rcx;
 221   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
 222 
 223   const Register flags = cache;
 224   __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 225   __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
 226   __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
 227 
 228    const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
 229    if (JvmtiExport::can_pop_frame()) {
 230      NOT_LP64(__ get_thread(java_thread));
 231      __ check_and_handle_popframe(java_thread);
 232    }
 233    if (JvmtiExport::can_force_early_return()) {
 234      NOT_LP64(__ get_thread(java_thread));
 235      __ check_and_handle_earlyret(java_thread);
 236    }
 237 
 238   __ dispatch_next(state, step);
 239 
 240   return entry;
 241 }
 242 
 243 
 244 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
 245   address entry = __ pc();
 246 
 247 #ifndef _LP64
 248   if (state == ftos) {
 249     __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter");
 250   } else if (state == dtos) {
 251     __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter");
 252   }
 253 #endif // _LP64
 254 
 255   // NULL last_sp until next java call
 256   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
 257   __ restore_bcp();
 258   __ restore_locals();
 259   const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
 260   NOT_LP64(__ get_thread(thread));
 261 #if INCLUDE_JVMCI
 262   // Check if we need to take lock at entry of synchronized method.  This can
 263   // only occur on method entry so emit it only for vtos with step 0.
 264   if (EnableJVMCI && state == vtos && step == 0) {
 265     Label L;
 266     __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
 267     __ jcc(Assembler::zero, L);
 268     // Clear flag.
 269     __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
 270     // Satisfy calling convention for lock_method().
 271     __ get_method(rbx);
 272     // Take lock.
 273     lock_method();
 274     __ bind(L);
 275   } else {
 276 #ifdef ASSERT
 277     if (EnableJVMCI) {
 278       Label L;
 279       __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
 280       __ jcc(Assembler::zero, L);
 281       __ stop("unexpected pending monitor in deopt entry");
 282       __ bind(L);
 283     }
 284 #endif
 285   }
 286 #endif
 287   // handle exceptions
 288   {
 289     Label L;
 290     __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
 291     __ jcc(Assembler::zero, L);
 292     __ call_VM(noreg,
 293                CAST_FROM_FN_PTR(address,
 294                                 InterpreterRuntime::throw_pending_exception));
 295     __ should_not_reach_here();
 296     __ bind(L);
 297   }
 298   if (continuation == NULL) {
 299     __ dispatch_next(state, step);
 300   } else {
 301     __ jump_to_entry(continuation);
 302   }
 303   return entry;
 304 }
 305 
 306 address TemplateInterpreterGenerator::generate_result_handler_for(
 307         BasicType type) {
 308   address entry = __ pc();
 309   switch (type) {
 310   case T_BOOLEAN: __ c2bool(rax);            break;
 311 #ifndef _LP64
 312   case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
 313 #else
 314   case T_CHAR   : __ movzwl(rax, rax);       break;
 315 #endif // _LP64
 316   case T_BYTE   : __ sign_extend_byte(rax);  break;
 317   case T_SHORT  : __ sign_extend_short(rax); break;
 318   case T_INT    : /* nothing to do */        break;
 319   case T_LONG   : /* nothing to do */        break;
 320   case T_VOID   : /* nothing to do */        break;
 321 #ifndef _LP64
 322   case T_DOUBLE :
 323   case T_FLOAT  :
 324     { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
 325       __ pop(t);                            // remove return address first
 326       // Must return a result for interpreter or compiler. In SSE
 327       // mode, results are returned in xmm0 and the FPU stack must
 328       // be empty.
 329       if (type == T_FLOAT && UseSSE >= 1) {
 330         // Load ST0
 331         __ fld_d(Address(rsp, 0));
 332         // Store as float and empty fpu stack
 333         __ fstp_s(Address(rsp, 0));
 334         // and reload
 335         __ movflt(xmm0, Address(rsp, 0));
 336       } else if (type == T_DOUBLE && UseSSE >= 2 ) {
 337         __ movdbl(xmm0, Address(rsp, 0));
 338       } else {
 339         // restore ST0
 340         __ fld_d(Address(rsp, 0));
 341       }
 342       // and pop the temp
 343       __ addptr(rsp, 2 * wordSize);
 344       __ push(t);                           // restore return address
 345     }
 346     break;
 347 #else
 348   case T_FLOAT  : /* nothing to do */        break;
 349   case T_DOUBLE : /* nothing to do */        break;
 350 #endif // _LP64
 351 
 352   case T_OBJECT :
 353     // retrieve result from frame
 354     __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
 355     // and verify it
 356     __ verify_oop(rax);
 357     break;
 358   default       : ShouldNotReachHere();
 359   }
 360   __ ret(0);                                   // return from result handler
 361   return entry;
 362 }
 363 
 364 address TemplateInterpreterGenerator::generate_safept_entry_for(
 365         TosState state,
 366         address runtime_entry) {
 367   address entry = __ pc();
 368   __ push(state);
 369   __ call_VM(noreg, runtime_entry);
 370   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 371   return entry;
 372 }
 373 
 374 
 375 
 376 // Helpers for commoning out cases in the various type of method entries.
 377 //
 378 
 379 
 380 // increment invocation count & check for overflow
 381 //
 382 // Note: checking for negative value instead of overflow
 383 //       so we have a 'sticky' overflow test
 384 //
 385 // rbx: method
 386 // rcx: invocation counter
 387 //
 388 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 389   Label done;
 390   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
 391   int increment = InvocationCounter::count_increment;
 392   Label no_mdo;
 393   if (ProfileInterpreter) {
 394     // Are we profiling?
 395     __ movptr(rax, Address(rbx, Method::method_data_offset()));
 396     __ testptr(rax, rax);
 397     __ jccb(Assembler::zero, no_mdo);
 398     // Increment counter in the MDO
 399     const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
 400         in_bytes(InvocationCounter::counter_offset()));
 401     const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
 402     __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
 403     __ jmp(done);
 404   }
 405   __ bind(no_mdo);
 406   // Increment counter in MethodCounters
 407   const Address invocation_counter(rax,
 408       MethodCounters::invocation_counter_offset() +
 409       InvocationCounter::counter_offset());
 410   __ get_method_counters(rbx, rax, done);
 411   const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
 412   __ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
 413       false, Assembler::zero, overflow);
 414   __ bind(done);
 415 }
 416 
 417 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 418 
 419   // Asm interpreter on entry
 420   // r14/rdi - locals
 421   // r13/rsi - bcp
 422   // rbx - method
 423   // rdx - cpool --- DOES NOT APPEAR TO BE TRUE
 424   // rbp - interpreter frame
 425 
 426   // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
 427   // Everything as it was on entry
 428   // rdx is not restored. Doesn't appear to really be set.
 429 
 430   // InterpreterRuntime::frequency_counter_overflow takes two
 431   // arguments, the first (thread) is passed by call_VM, the second
 432   // indicates if the counter overflow occurs at a backwards branch
 433   // (NULL bcp).  We pass zero for it.  The call returns the address
 434   // of the verified entry point for the method or NULL if the
 435   // compilation did not complete (either went background or bailed
 436   // out).
 437   Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
 438   __ movl(rarg, 0);
 439   __ call_VM(noreg,
 440              CAST_FROM_FN_PTR(address,
 441                               InterpreterRuntime::frequency_counter_overflow),
 442              rarg);
 443 
 444   __ movptr(rbx, Address(rbp, method_offset));   // restore Method*
 445   // Preserve invariant that r13/r14 contain bcp/locals of sender frame
 446   // and jump to the interpreted entry.
 447   __ jmp(do_continue, relocInfo::none);
 448 }
 449 
 450 // See if we've got enough room on the stack for locals plus overhead below
 451 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
 452 // without going through the signal handler, i.e., reserved and yellow zones
 453 // will not be made usable. The shadow zone must suffice to handle the
 454 // overflow.
 455 // The expression stack grows down incrementally, so the normal guard
 456 // page mechanism will work for that.
 457 //
 458 // NOTE: Since the additional locals are also always pushed (wasn't
 459 // obvious in generate_fixed_frame) so the guard should work for them
 460 // too.
 461 //
 462 // Args:
 463 //      rdx: number of additional locals this frame needs (what we must check)
 464 //      rbx: Method*
 465 //
 466 // Kills:
 467 //      rax
 468 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 469 
 470   // monitor entry size: see picture of stack in frame_x86.hpp
 471   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 472 
 473   // total overhead size: entry_size + (saved rbp through expr stack
 474   // bottom).  be sure to change this if you add/subtract anything
 475   // to/from the overhead area
 476   const int overhead_size =
 477     -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
 478 
 479   const int page_size = os::vm_page_size();
 480 
 481   Label after_frame_check;
 482 
 483   // see if the frame is greater than one page in size. If so,
 484   // then we need to verify there is enough stack space remaining
 485   // for the additional locals.
 486   __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
 487   __ jcc(Assembler::belowEqual, after_frame_check);
 488 
 489   // compute rsp as if this were going to be the last frame on
 490   // the stack before the red zone
 491 
 492   Label after_frame_check_pop;
 493   const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
 494 #ifndef _LP64
 495   __ push(thread);
 496   __ get_thread(thread);
 497 #endif
 498 
 499   const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset());
 500 
 501   // locals + overhead, in bytes
 502   __ mov(rax, rdx);
 503   __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes.
 504   __ addptr(rax, overhead_size);
 505 
 506 #ifdef ASSERT
 507   Label limit_okay;
 508   // Verify that thread stack overflow limit is non-zero.
 509   __ cmpptr(stack_limit, (int32_t)NULL_WORD);
 510   __ jcc(Assembler::notEqual, limit_okay);
 511   __ stop("stack overflow limit is zero");
 512   __ bind(limit_okay);
 513 #endif
 514 
 515   // Add locals/frame size to stack limit.
 516   __ addptr(rax, stack_limit);
 517 
 518   // Check against the current stack bottom.
 519   __ cmpptr(rsp, rax);
 520 
 521   __ jcc(Assembler::above, after_frame_check_pop);
 522   NOT_LP64(__ pop(rsi));  // get saved bcp
 523 
 524   // Restore sender's sp as SP. This is necessary if the sender's
 525   // frame is an extended compiled frame (see gen_c2i_adapter())
 526   // and safer anyway in case of JSR292 adaptations.
 527 
 528   __ pop(rax); // return address must be moved if SP is changed
 529   __ mov(rsp, rbcp);
 530   __ push(rax);
 531 
 532   // Note: the restored frame is not necessarily interpreted.
 533   // Use the shared runtime version of the StackOverflowError.
 534   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
 535   __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
 536   // all done with frame size check
 537   __ bind(after_frame_check_pop);
 538   NOT_LP64(__ pop(rsi));
 539 
 540   // all done with frame size check
 541   __ bind(after_frame_check);
 542 }
 543 
 544 // Allocate monitor and lock method (asm interpreter)
 545 //
 546 // Args:
 547 //      rbx: Method*
 548 //      r14/rdi: locals
 549 //
 550 // Kills:
 551 //      rax
 552 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
 553 //      rscratch1, rscratch2 (scratch regs)
 554 void TemplateInterpreterGenerator::lock_method() {
 555   // synchronize method
 556   const Address access_flags(rbx, Method::access_flags_offset());
 557   const Address monitor_block_top(
 558         rbp,
 559         frame::interpreter_frame_monitor_block_top_offset * wordSize);
 560   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 561 
 562 #ifdef ASSERT
 563   {
 564     Label L;
 565     __ movl(rax, access_flags);
 566     __ testl(rax, JVM_ACC_SYNCHRONIZED);
 567     __ jcc(Assembler::notZero, L);
 568     __ stop("method doesn't need synchronization");
 569     __ bind(L);
 570   }
 571 #endif // ASSERT
 572 
 573   // get synchronization object
 574   {
 575     Label done;
 576     __ movl(rax, access_flags);
 577     __ testl(rax, JVM_ACC_STATIC);
 578     // get receiver (assume this is frequent case)
 579     __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
 580     __ jcc(Assembler::zero, done);
 581     __ load_mirror(rax, rbx);
 582 
 583 #ifdef ASSERT
 584     {
 585       Label L;
 586       __ testptr(rax, rax);
 587       __ jcc(Assembler::notZero, L);
 588       __ stop("synchronization object is NULL");
 589       __ bind(L);
 590     }
 591 #endif // ASSERT
 592 
 593     __ bind(done);
 594   }
 595 
 596   // add space for monitor & lock
 597   __ subptr(rsp, entry_size); // add space for a monitor entry
 598   __ movptr(monitor_block_top, rsp);  // set new monitor block top
 599   // store object
 600   __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
 601   const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
 602   __ movptr(lockreg, rsp); // object address
 603   __ lock_object(lockreg);
 604 }
 605 
 606 // Generate a fixed interpreter frame. This is identical setup for
 607 // interpreted methods and for native methods hence the shared code.
 608 //
 609 // Args:
 610 //      rax: return address
 611 //      rbx: Method*
 612 //      r14/rdi: pointer to locals
 613 //      r13/rsi: sender sp
 614 //      rdx: cp cache
 615 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 616   // initialize fixed part of activation frame
 617   __ push(rax);        // save return address
 618   __ enter();          // save old & set new rbp
 619   __ push(rbcp);        // set sender sp
 620   __ push((int)NULL_WORD); // leave last_sp as null
 621   __ movptr(rbcp, Address(rbx, Method::const_offset()));      // get ConstMethod*
 622   __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
 623   __ push(rbx);        // save Method*
 624   // Get mirror and store it in the frame as GC root for this Method*
 625   __ load_mirror(rdx, rbx);
 626   __ push(rdx);
 627   if (ProfileInterpreter) {
 628     Label method_data_continue;
 629     __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
 630     __ testptr(rdx, rdx);
 631     __ jcc(Assembler::zero, method_data_continue);
 632     __ addptr(rdx, in_bytes(MethodData::data_offset()));
 633     __ bind(method_data_continue);
 634     __ push(rdx);      // set the mdp (method data pointer)
 635   } else {
 636     __ push(0);
 637   }
 638 
 639   __ movptr(rdx, Address(rbx, Method::const_offset()));
 640   __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
 641   __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
 642   __ push(rdx); // set constant pool cache
 643   __ push(rlocals); // set locals pointer
 644   if (native_call) {
 645     __ push(0); // no bcp
 646   } else {
 647     __ push(rbcp); // set bcp
 648   }
 649   __ push(0); // reserve word for pointer to expression stack bottom
 650   __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
 651 }
 652 
 653 // End of helpers
 654 
 655 // Method entry for java.lang.ref.Reference.get.
 656 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 657   // Code: _aload_0, _getfield, _areturn
 658   // parameter size = 1
 659   //
 660   // The code that gets generated by this routine is split into 2 parts:
 661   //    1. The "intrinsified" code performing an ON_WEAK_OOP_REF load,
 662   //    2. The slow path - which is an expansion of the regular method entry.
 663   //
 664   // Notes:-
 665   // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed.
 666   // * We may jump to the slow path iff the receiver is null. If the
 667   //   Reference object is null then we no longer perform an ON_WEAK_OOP_REF load
 668   //   Thus we can use the regular method entry code to generate the NPE.
 669   //
 670   // rbx: Method*
 671 
 672   // r13: senderSP must preserve for slow path, set SP to it on fast path
 673 
 674   address entry = __ pc();
 675 
 676   const int referent_offset = java_lang_ref_Reference::referent_offset();
 677 
 678   Label slow_path;
 679   // rbx: method
 680 
 681   // Check if local 0 != NULL
 682   // If the receiver is null then it is OK to jump to the slow path.
 683   __ movptr(rax, Address(rsp, wordSize));
 684 
 685   __ testptr(rax, rax);
 686   __ jcc(Assembler::zero, slow_path);
 687 
 688   // rax: local 0
 689   // rbx: method (but can be used as scratch now)
 690   // rdx: scratch
 691   // rdi: scratch
 692 
 693   // Preserve the sender sp in case the load barrier
 694   // calls the runtime
 695   NOT_LP64(__ push(rsi));
 696 
 697   // Load the value of the referent field.
 698   const Address field_address(rax, referent_offset);
 699   __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF);
 700 
 701   // _areturn
 702   const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13);
 703   NOT_LP64(__ pop(rsi));      // get sender sp
 704   __ pop(rdi);                // get return address
 705   __ mov(rsp, sender_sp);     // set sp to sender sp
 706   __ jmp(rdi);
 707   __ ret(0);
 708 
 709   // generate a vanilla interpreter entry as the slow path
 710   __ bind(slow_path);
 711   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 712   return entry;
 713 }
 714 
 715 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
 716   // Quick & dirty stack overflow checking: bang the stack & handle trap.
 717   // Note that we do the banging after the frame is setup, since the exception
 718   // handling code expects to find a valid interpreter frame on the stack.
 719   // Doing the banging earlier fails if the caller frame is not an interpreter
 720   // frame.
 721   // (Also, the exception throwing code expects to unlock any synchronized
 722   // method receiever, so do the banging after locking the receiver.)
 723 
 724   // Bang each page in the shadow zone. We can't assume it's been done for
 725   // an interpreter frame with greater than a page of locals, so each page
 726   // needs to be checked.  Only true for non-native.
 727   const int page_size = os::vm_page_size();
 728   const int n_shadow_pages = ((int)StackOverflow::stack_shadow_zone_size()) / page_size;
 729   const int start_page = native_call ? n_shadow_pages : 1;
 730   for (int pages = start_page; pages <= n_shadow_pages; pages++) {
 731     __ bang_stack_with_offset(pages*page_size);
 732   }
 733 }
 734 
 735 // Interpreter stub for calling a native method. (asm interpreter)
 736 // This sets up a somewhat different looking stack for calling the
 737 // native method than the typical interpreter frame setup.
 738 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
 739   // determine code generation flags
 740   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
 741 
 742   // rbx: Method*
 743   // rbcp: sender sp
 744 
 745   address entry_point = __ pc();
 746 
 747   const Address constMethod       (rbx, Method::const_offset());
 748   const Address access_flags      (rbx, Method::access_flags_offset());
 749   const Address size_of_parameters(rcx, ConstMethod::
 750                                         size_of_parameters_offset());
 751 
 752 
 753   // get parameter size (always needed)
 754   __ movptr(rcx, constMethod);
 755   __ load_unsigned_short(rcx, size_of_parameters);
 756 
 757   // native calls don't need the stack size check since they have no
 758   // expression stack and the arguments are already on the stack and
 759   // we only add a handful of words to the stack
 760 
 761   // rbx: Method*
 762   // rcx: size of parameters
 763   // rbcp: sender sp
 764   __ pop(rax);                                       // get return address
 765 
 766   // for natives the size of locals is zero
 767 
 768   // compute beginning of parameters
 769   __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
 770 
 771   // add 2 zero-initialized slots for native calls
 772   // initialize result_handler slot
 773   __ push((int) NULL_WORD);
 774   // slot for oop temp
 775   // (static native method holder mirror/jni oop result)
 776   __ push((int) NULL_WORD);
 777 
 778   // initialize fixed part of activation frame
 779   generate_fixed_frame(true);
 780 
 781   // make sure method is native & not abstract
 782 #ifdef ASSERT
 783   __ movl(rax, access_flags);
 784   {
 785     Label L;
 786     __ testl(rax, JVM_ACC_NATIVE);
 787     __ jcc(Assembler::notZero, L);
 788     __ stop("tried to execute non-native method as native");
 789     __ bind(L);
 790   }
 791   {
 792     Label L;
 793     __ testl(rax, JVM_ACC_ABSTRACT);
 794     __ jcc(Assembler::zero, L);
 795     __ stop("tried to execute abstract method in interpreter");
 796     __ bind(L);
 797   }
 798 #endif
 799 
 800   // Since at this point in the method invocation the exception handler
 801   // would try to exit the monitor of synchronized methods which hasn't
 802   // been entered yet, we set the thread local variable
 803   // _do_not_unlock_if_synchronized to true. The remove_activation will
 804   // check this flag.
 805 
 806   const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread);
 807   NOT_LP64(__ get_thread(thread1));
 808   const Address do_not_unlock_if_synchronized(thread1,
 809         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 810   __ movbool(do_not_unlock_if_synchronized, true);
 811 
 812   // increment invocation count & check for overflow
 813   Label invocation_counter_overflow;
 814   if (inc_counter) {
 815     generate_counter_incr(&invocation_counter_overflow);
 816   }
 817 
 818   Label continue_after_compile;
 819   __ bind(continue_after_compile);
 820 
 821   bang_stack_shadow_pages(true);
 822 
 823   // reset the _do_not_unlock_if_synchronized flag
 824   NOT_LP64(__ get_thread(thread1));
 825   __ movbool(do_not_unlock_if_synchronized, false);
 826 
 827   // check for synchronized methods
 828   // Must happen AFTER invocation_counter check and stack overflow check,
 829   // so method is not locked if overflows.
 830   if (synchronized) {
 831     lock_method();
 832   } else {
 833     // no synchronization necessary
 834 #ifdef ASSERT
 835     {
 836       Label L;
 837       __ movl(rax, access_flags);
 838       __ testl(rax, JVM_ACC_SYNCHRONIZED);
 839       __ jcc(Assembler::zero, L);
 840       __ stop("method needs synchronization");
 841       __ bind(L);
 842     }
 843 #endif
 844   }
 845 
 846   // start execution
 847 #ifdef ASSERT
 848   {
 849     Label L;
 850     const Address monitor_block_top(rbp,
 851                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
 852     __ movptr(rax, monitor_block_top);
 853     __ cmpptr(rax, rsp);
 854     __ jcc(Assembler::equal, L);
 855     __ stop("broken stack frame setup in interpreter");
 856     __ bind(L);
 857   }
 858 #endif
 859 
 860   // jvmti support
 861   __ notify_method_entry();
 862 
 863   // work registers
 864   const Register method = rbx;
 865   const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
 866   const Register t      = NOT_LP64(rcx) LP64_ONLY(r11);
 867 
 868   // allocate space for parameters
 869   __ get_method(method);
 870   __ movptr(t, Address(method, Method::const_offset()));
 871   __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
 872 
 873 #ifndef _LP64
 874   __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes.
 875   __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
 876   __ subptr(rsp, t);
 877   __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
 878 #else
 879   __ shll(t, Interpreter::logStackElementSize);
 880 
 881   __ subptr(rsp, t);
 882   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
 883   __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
 884 #endif // _LP64
 885 
 886   // get signature handler
 887   {
 888     Label L;
 889     __ movptr(t, Address(method, Method::signature_handler_offset()));
 890     __ testptr(t, t);
 891     __ jcc(Assembler::notZero, L);
 892     __ call_VM(noreg,
 893                CAST_FROM_FN_PTR(address,
 894                                 InterpreterRuntime::prepare_native_call),
 895                method);
 896     __ get_method(method);
 897     __ movptr(t, Address(method, Method::signature_handler_offset()));
 898     __ bind(L);
 899   }
 900 
 901   // call signature handler
 902   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
 903          "adjust this code");
 904   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
 905          "adjust this code");
 906   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1),
 907          "adjust this code");
 908 
 909   // The generated handlers do not touch RBX (the method).
 910   // However, large signatures cannot be cached and are generated
 911   // each time here.  The slow-path generator can do a GC on return,
 912   // so we must reload it after the call.
 913   __ call(t);
 914   __ get_method(method);        // slow path can do a GC, reload RBX
 915 
 916 
 917   // result handler is in rax
 918   // set result handler
 919   __ movptr(Address(rbp,
 920                     (frame::interpreter_frame_result_handler_offset) * wordSize),
 921             rax);
 922 
 923   // pass mirror handle if static call
 924   {
 925     Label L;
 926     __ movl(t, Address(method, Method::access_flags_offset()));
 927     __ testl(t, JVM_ACC_STATIC);
 928     __ jcc(Assembler::zero, L);
 929     // get mirror
 930     __ load_mirror(t, method, rax);
 931     // copy mirror into activation frame
 932     __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
 933             t);
 934     // pass handle to mirror
 935 #ifndef _LP64
 936     __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
 937     __ movptr(Address(rsp, wordSize), t);
 938 #else
 939     __ lea(c_rarg1,
 940            Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
 941 #endif // _LP64
 942     __ bind(L);
 943   }
 944 
 945   // get native function entry point
 946   {
 947     Label L;
 948     __ movptr(rax, Address(method, Method::native_function_offset()));
 949     ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
 950     __ cmpptr(rax, unsatisfied.addr());
 951     __ jcc(Assembler::notEqual, L);
 952     __ call_VM(noreg,
 953                CAST_FROM_FN_PTR(address,
 954                                 InterpreterRuntime::prepare_native_call),
 955                method);
 956     __ get_method(method);
 957     __ movptr(rax, Address(method, Method::native_function_offset()));
 958     __ bind(L);
 959   }
 960 
 961   // pass JNIEnv
 962 #ifndef _LP64
 963    __ get_thread(thread);
 964    __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
 965    __ movptr(Address(rsp, 0), t);
 966 
 967    // set_last_Java_frame_before_call
 968    // It is enough that the pc()
 969    // points into the right code segment. It does not have to be the correct return pc.
 970    __ set_last_Java_frame(thread, noreg, rbp, __ pc());
 971 #else
 972    __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
 973 
 974    // It is enough that the pc() points into the right code
 975    // segment. It does not have to be the correct return pc.
 976    __ set_last_Java_frame(rsp, rbp, (address) __ pc());
 977 #endif // _LP64
 978 
 979   // change thread state
 980 #ifdef ASSERT
 981   {
 982     Label L;
 983     __ movl(t, Address(thread, JavaThread::thread_state_offset()));
 984     __ cmpl(t, _thread_in_Java);
 985     __ jcc(Assembler::equal, L);
 986     __ stop("Wrong thread state in native stub");
 987     __ bind(L);
 988   }
 989 #endif
 990 
 991   // Change state to native
 992 
 993   __ movl(Address(thread, JavaThread::thread_state_offset()),
 994           _thread_in_native);
 995 
 996   // Call the native method.
 997   __ call(rax);
 998   // 32: result potentially in rdx:rax or ST0
 999   // 64: result potentially in rax or xmm0
1000 
1001   // Verify or restore cpu control state after JNI call
1002   __ restore_cpu_control_state_after_jni();
1003 
1004   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1005   // in order to extract the result of a method call. If the order of these
1006   // pushes change or anything else is added to the stack then the code in
1007   // interpreter_frame_result must also change.
1008 
1009 #ifndef _LP64
1010   // save potential result in ST(0) & rdx:rax
1011   // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
1012   // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
1013   // It is safe to do this push because state is _thread_in_native and return address will be found
1014   // via _last_native_pc and not via _last_jave_sp
1015 
1016   // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
1017   // If the order changes or anything else is added to the stack the code in
1018   // interpreter_frame_result will have to be changed.
1019 
1020   { Label L;
1021     Label push_double;
1022     ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1023     ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1024     __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1025               float_handler.addr());
1026     __ jcc(Assembler::equal, push_double);
1027     __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1028               double_handler.addr());
1029     __ jcc(Assembler::notEqual, L);
1030     __ bind(push_double);
1031     __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0).
1032     __ bind(L);
1033   }
1034 #else
1035   __ push(dtos);
1036 #endif // _LP64
1037 
1038   __ push(ltos);
1039 
1040   // change thread state
1041   NOT_LP64(__ get_thread(thread));
1042   __ movl(Address(thread, JavaThread::thread_state_offset()),
1043           _thread_in_native_trans);
1044 
1045   // Force this write out before the read below
1046   __ membar(Assembler::Membar_mask_bits(
1047               Assembler::LoadLoad | Assembler::LoadStore |
1048               Assembler::StoreLoad | Assembler::StoreStore));
1049 
1050 #ifndef _LP64
1051   if (AlwaysRestoreFPU) {
1052     //  Make sure the control word is correct.
1053     __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
1054   }
1055 #endif // _LP64
1056 
1057   // check for safepoint operation in progress and/or pending suspend requests
1058   {
1059     Label Continue;
1060     Label slow_path;
1061 
1062     __ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */);
1063 
1064     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1065     __ jcc(Assembler::equal, Continue);
1066     __ bind(slow_path);
1067 
1068     // Don't use call_VM as it will see a possible pending exception
1069     // and forward it and never return here preventing us from
1070     // clearing _last_native_pc down below.  Also can't use
1071     // call_VM_leaf either as it will check to see if r13 & r14 are
1072     // preserved and correspond to the bcp/locals pointers. So we do a
1073     // runtime call by hand.
1074     //
1075 #ifndef _LP64
1076     __ push(thread);
1077     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1078                                             JavaThread::check_special_condition_for_native_trans)));
1079     __ increment(rsp, wordSize);
1080     __ get_thread(thread);
1081 #else
1082     __ mov(c_rarg0, r15_thread);
1083     __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1084     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1085     __ andptr(rsp, -16); // align stack as required by ABI
1086     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
1087     __ mov(rsp, r12); // restore sp
1088     __ reinit_heapbase();
1089 #endif // _LP64
1090     __ bind(Continue);
1091   }
1092 
1093   // change thread state
1094   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1095 
1096   // reset_last_Java_frame
1097   __ reset_last_Java_frame(thread, true);
1098 
1099   if (CheckJNICalls) {
1100     // clear_pending_jni_exception_check
1101     __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
1102   }
1103 
1104   // reset handle block
1105   __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1106   __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
1107 
1108   // If result is an oop unbox and store it in frame where gc will see it
1109   // and result handler will pick it up
1110 
1111   {
1112     Label no_oop;
1113     __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1114     __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1115     __ jcc(Assembler::notEqual, no_oop);
1116     // retrieve result
1117     __ pop(ltos);
1118     // Unbox oop result, e.g. JNIHandles::resolve value.
1119     __ resolve_jobject(rax /* value */,
1120                        thread /* thread */,
1121                        t /* tmp */);
1122     __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
1123     // keep stack depth as expected by pushing oop which will eventually be discarded
1124     __ push(ltos);
1125     __ bind(no_oop);
1126   }
1127 
1128 
1129   {
1130     Label no_reguard;
1131     __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()),
1132             StackOverflow::stack_guard_yellow_reserved_disabled);
1133     __ jcc(Assembler::notEqual, no_reguard);
1134 
1135     __ pusha(); // XXX only save smashed registers
1136 #ifndef _LP64
1137     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1138     __ popa();
1139 #else
1140     __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1141     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1142     __ andptr(rsp, -16); // align stack as required by ABI
1143     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1144     __ mov(rsp, r12); // restore sp
1145     __ popa(); // XXX only restore smashed registers
1146     __ reinit_heapbase();
1147 #endif // _LP64
1148 
1149     __ bind(no_reguard);
1150   }
1151 
1152 
1153   // The method register is junk from after the thread_in_native transition
1154   // until here.  Also can't call_VM until the bcp has been
1155   // restored.  Need bcp for throwing exception below so get it now.
1156   __ get_method(method);
1157 
1158   // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base()
1159   __ movptr(rbcp, Address(method, Method::const_offset()));   // get ConstMethod*
1160   __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset()));    // get codebase
1161 
1162   // handle exceptions (exception handling will handle unlocking!)
1163   {
1164     Label L;
1165     __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
1166     __ jcc(Assembler::zero, L);
1167     // Note: At some point we may want to unify this with the code
1168     // used in call_VM_base(); i.e., we should use the
1169     // StubRoutines::forward_exception code. For now this doesn't work
1170     // here because the rsp is not correctly set at this point.
1171     __ MacroAssembler::call_VM(noreg,
1172                                CAST_FROM_FN_PTR(address,
1173                                InterpreterRuntime::throw_pending_exception));
1174     __ should_not_reach_here();
1175     __ bind(L);
1176   }
1177 
1178   // do unlocking if necessary
1179   {
1180     Label L;
1181     __ movl(t, Address(method, Method::access_flags_offset()));
1182     __ testl(t, JVM_ACC_SYNCHRONIZED);
1183     __ jcc(Assembler::zero, L);
1184     // the code below should be shared with interpreter macro
1185     // assembler implementation
1186     {
1187       Label unlock;
1188       // BasicObjectLock will be first in list, since this is a
1189       // synchronized method. However, need to check that the object
1190       // has not been unlocked by an explicit monitorexit bytecode.
1191       const Address monitor(rbp,
1192                             (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1193                                        wordSize - (int)sizeof(BasicObjectLock)));
1194 
1195       const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1196 
1197       // monitor expect in c_rarg1 for slow unlock path
1198       __ lea(regmon, monitor); // address of first monitor
1199 
1200       __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes()));
1201       __ testptr(t, t);
1202       __ jcc(Assembler::notZero, unlock);
1203 
1204       // Entry already unlocked, need to throw exception
1205       __ MacroAssembler::call_VM(noreg,
1206                                  CAST_FROM_FN_PTR(address,
1207                    InterpreterRuntime::throw_illegal_monitor_state_exception));
1208       __ should_not_reach_here();
1209 
1210       __ bind(unlock);
1211       __ unlock_object(regmon);
1212     }
1213     __ bind(L);
1214   }
1215 
1216   // jvmti support
1217   // Note: This must happen _after_ handling/throwing any exceptions since
1218   //       the exception handler code notifies the runtime of method exits
1219   //       too. If this happens before, method entry/exit notifications are
1220   //       not properly paired (was bug - gri 11/22/99).
1221   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1222 
1223   // restore potential result in edx:eax, call result handler to
1224   // restore potential result in ST0 & handle result
1225 
1226   __ pop(ltos);
1227   LP64_ONLY( __ pop(dtos));
1228 
1229   __ movptr(t, Address(rbp,
1230                        (frame::interpreter_frame_result_handler_offset) * wordSize));
1231   __ call(t);
1232 
1233   // remove activation
1234   __ movptr(t, Address(rbp,
1235                        frame::interpreter_frame_sender_sp_offset *
1236                        wordSize)); // get sender sp
1237   __ leave();                                // remove frame anchor
1238   __ pop(rdi);                               // get return address
1239   __ mov(rsp, t);                            // set sp to sender sp
1240   __ jmp(rdi);
1241 
1242   if (inc_counter) {
1243     // Handle overflow of counter and compile method
1244     __ bind(invocation_counter_overflow);
1245     generate_counter_overflow(continue_after_compile);
1246   }
1247 
1248   return entry_point;
1249 }
1250 
1251 // Abstract method entry
1252 // Attempt to execute abstract method. Throw exception
1253 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
1254 
1255   address entry_point = __ pc();
1256 
1257   // abstract method entry
1258 
1259   //  pop return address, reset last_sp to NULL
1260   __ empty_expression_stack();
1261   __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
1262   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
1263 
1264   // throw exception
1265   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx);
1266   // the call_VM checks for exception, so we should never return here.
1267   __ should_not_reach_here();
1268 
1269   return entry_point;
1270 }
1271 
1272 //
1273 // Generic interpreted method entry to (asm) interpreter
1274 //
1275 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1276   // determine code generation flags
1277   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1278 
1279   // ebx: Method*
1280   // rbcp: sender sp
1281   address entry_point = __ pc();
1282 
1283   const Address constMethod(rbx, Method::const_offset());
1284   const Address access_flags(rbx, Method::access_flags_offset());
1285   const Address size_of_parameters(rdx,
1286                                    ConstMethod::size_of_parameters_offset());
1287   const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
1288 
1289 
1290   // get parameter size (always needed)
1291   __ movptr(rdx, constMethod);
1292   __ load_unsigned_short(rcx, size_of_parameters);
1293 
1294   // rbx: Method*
1295   // rcx: size of parameters
1296   // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
1297 
1298   __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1299   __ subl(rdx, rcx); // rdx = no. of additional locals
1300 
1301   // YYY
1302 //   __ incrementl(rdx);
1303 //   __ andl(rdx, -2);
1304 
1305   // see if we've got enough room on the stack for locals plus overhead.
1306   generate_stack_overflow_check();
1307 
1308   // get return address
1309   __ pop(rax);
1310 
1311   // compute beginning of parameters
1312   __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1313 
1314   // rdx - # of additional locals
1315   // allocate space for locals
1316   // explicitly initialize locals
1317   {
1318     Label exit, loop;
1319     __ testl(rdx, rdx);
1320     __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1321     __ bind(loop);
1322     __ push((int) NULL_WORD); // initialize local variables
1323     __ decrementl(rdx); // until everything initialized
1324     __ jcc(Assembler::greater, loop);
1325     __ bind(exit);
1326   }
1327 
1328   // initialize fixed part of activation frame
1329   generate_fixed_frame(false);
1330 
1331   // make sure method is not native & not abstract
1332 #ifdef ASSERT
1333   __ movl(rax, access_flags);
1334   {
1335     Label L;
1336     __ testl(rax, JVM_ACC_NATIVE);
1337     __ jcc(Assembler::zero, L);
1338     __ stop("tried to execute native method as non-native");
1339     __ bind(L);
1340   }
1341   {
1342     Label L;
1343     __ testl(rax, JVM_ACC_ABSTRACT);
1344     __ jcc(Assembler::zero, L);
1345     __ stop("tried to execute abstract method in interpreter");
1346     __ bind(L);
1347   }
1348 #endif
1349 
1350   // Since at this point in the method invocation the exception
1351   // handler would try to exit the monitor of synchronized methods
1352   // which hasn't been entered yet, we set the thread local variable
1353   // _do_not_unlock_if_synchronized to true. The remove_activation
1354   // will check this flag.
1355 
1356   const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1357   NOT_LP64(__ get_thread(thread));
1358   const Address do_not_unlock_if_synchronized(thread,
1359         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1360   __ movbool(do_not_unlock_if_synchronized, true);
1361 
1362   __ profile_parameters_type(rax, rcx, rdx);
1363   // increment invocation count & check for overflow
1364   Label invocation_counter_overflow;
1365   if (inc_counter) {
1366     generate_counter_incr(&invocation_counter_overflow);
1367   }
1368 
1369   Label continue_after_compile;
1370   __ bind(continue_after_compile);
1371 
1372   // check for synchronized interpreted methods
1373   bang_stack_shadow_pages(false);
1374 
1375   // reset the _do_not_unlock_if_synchronized flag
1376   NOT_LP64(__ get_thread(thread));
1377   __ movbool(do_not_unlock_if_synchronized, false);
1378 
1379   // check for synchronized methods
1380   // Must happen AFTER invocation_counter check and stack overflow check,
1381   // so method is not locked if overflows.
1382   if (synchronized) {
1383     // Allocate monitor and lock method
1384     lock_method();
1385   } else {
1386     // no synchronization necessary
1387 #ifdef ASSERT
1388     {
1389       Label L;
1390       __ movl(rax, access_flags);
1391       __ testl(rax, JVM_ACC_SYNCHRONIZED);
1392       __ jcc(Assembler::zero, L);
1393       __ stop("method needs synchronization");
1394       __ bind(L);
1395     }
1396 #endif
1397   }
1398 
1399   // start execution
1400 #ifdef ASSERT
1401   {
1402     Label L;
1403      const Address monitor_block_top (rbp,
1404                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
1405     __ movptr(rax, monitor_block_top);
1406     __ cmpptr(rax, rsp);
1407     __ jcc(Assembler::equal, L);
1408     __ stop("broken stack frame setup in interpreter");
1409     __ bind(L);
1410   }
1411 #endif
1412 
1413   // jvmti support
1414   __ notify_method_entry();
1415 
1416   __ dispatch_next(vtos);
1417 
1418   // invocation counter overflow
1419   if (inc_counter) {
1420     // Handle overflow of counter and compile method
1421     __ bind(invocation_counter_overflow);
1422     generate_counter_overflow(continue_after_compile);
1423   }
1424 
1425   return entry_point;
1426 }
1427 
1428 //-----------------------------------------------------------------------------
1429 // Exceptions
1430 
1431 void TemplateInterpreterGenerator::generate_throw_exception() {
1432   // Entry point in previous activation (i.e., if the caller was
1433   // interpreted)
1434   Interpreter::_rethrow_exception_entry = __ pc();
1435   // Restore sp to interpreter_frame_last_sp even though we are going
1436   // to empty the expression stack for the exception processing.
1437   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1438   // rax: exception
1439   // rdx: return address/pc that threw exception
1440   __ restore_bcp();    // r13/rsi points to call/send
1441   __ restore_locals();
1442   LP64_ONLY(__ reinit_heapbase());  // restore r12 as heapbase.
1443   // Entry point for exceptions thrown within interpreter code
1444   Interpreter::_throw_exception_entry = __ pc();
1445   // expression stack is undefined here
1446   // rax: exception
1447   // r13/rsi: exception bcp
1448   __ verify_oop(rax);
1449   Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
1450   LP64_ONLY(__ mov(c_rarg1, rax));
1451 
1452   // expression stack must be empty before entering the VM in case of
1453   // an exception
1454   __ empty_expression_stack();
1455   // find exception handler address and preserve exception oop
1456   __ call_VM(rdx,
1457              CAST_FROM_FN_PTR(address,
1458                           InterpreterRuntime::exception_handler_for_exception),
1459              rarg);
1460   // rax: exception handler entry point
1461   // rdx: preserved exception oop
1462   // r13/rsi: bcp for exception handler
1463   __ push_ptr(rdx); // push exception which is now the only value on the stack
1464   __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1465 
1466   // If the exception is not handled in the current frame the frame is
1467   // removed and the exception is rethrown (i.e. exception
1468   // continuation is _rethrow_exception).
1469   //
1470   // Note: At this point the bci is still the bxi for the instruction
1471   // which caused the exception and the expression stack is
1472   // empty. Thus, for any VM calls at this point, GC will find a legal
1473   // oop map (with empty expression stack).
1474 
1475   // In current activation
1476   // tos: exception
1477   // esi: exception bcp
1478 
1479   //
1480   // JVMTI PopFrame support
1481   //
1482 
1483   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1484   __ empty_expression_stack();
1485   // Set the popframe_processing bit in pending_popframe_condition
1486   // indicating that we are currently handling popframe, so that
1487   // call_VMs that may happen later do not trigger new popframe
1488   // handling cycles.
1489   const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1490   NOT_LP64(__ get_thread(thread));
1491   __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
1492   __ orl(rdx, JavaThread::popframe_processing_bit);
1493   __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
1494 
1495   {
1496     // Check to see whether we are returning to a deoptimized frame.
1497     // (The PopFrame call ensures that the caller of the popped frame is
1498     // either interpreted or compiled and deoptimizes it if compiled.)
1499     // In this case, we can't call dispatch_next() after the frame is
1500     // popped, but instead must save the incoming arguments and restore
1501     // them after deoptimization has occurred.
1502     //
1503     // Note that we don't compare the return PC against the
1504     // deoptimization blob's unpack entry because of the presence of
1505     // adapter frames in C2.
1506     Label caller_not_deoptimized;
1507     Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1508     __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize));
1509     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1510                                InterpreterRuntime::interpreter_contains), rarg);
1511     __ testl(rax, rax);
1512     __ jcc(Assembler::notZero, caller_not_deoptimized);
1513 
1514     // Compute size of arguments for saving when returning to
1515     // deoptimized caller
1516     __ get_method(rax);
1517     __ movptr(rax, Address(rax, Method::const_offset()));
1518     __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
1519                                                 size_of_parameters_offset())));
1520     __ shll(rax, Interpreter::logStackElementSize);
1521     __ restore_locals();
1522     __ subptr(rlocals, rax);
1523     __ addptr(rlocals, wordSize);
1524     // Save these arguments
1525     NOT_LP64(__ get_thread(thread));
1526     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1527                                            Deoptimization::
1528                                            popframe_preserve_args),
1529                           thread, rax, rlocals);
1530 
1531     __ remove_activation(vtos, rdx,
1532                          /* throw_monitor_exception */ false,
1533                          /* install_monitor_exception */ false,
1534                          /* notify_jvmdi */ false);
1535 
1536     // Inform deoptimization that it is responsible for restoring
1537     // these arguments
1538     NOT_LP64(__ get_thread(thread));
1539     __ movl(Address(thread, JavaThread::popframe_condition_offset()),
1540             JavaThread::popframe_force_deopt_reexecution_bit);
1541 
1542     // Continue in deoptimization handler
1543     __ jmp(rdx);
1544 
1545     __ bind(caller_not_deoptimized);
1546   }
1547 
1548   __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
1549                        /* throw_monitor_exception */ false,
1550                        /* install_monitor_exception */ false,
1551                        /* notify_jvmdi */ false);
1552 
1553   // Finish with popframe handling
1554   // A previous I2C followed by a deoptimization might have moved the
1555   // outgoing arguments further up the stack. PopFrame expects the
1556   // mutations to those outgoing arguments to be preserved and other
1557   // constraints basically require this frame to look exactly as
1558   // though it had previously invoked an interpreted activation with
1559   // no space between the top of the expression stack (current
1560   // last_sp) and the top of stack. Rather than force deopt to
1561   // maintain this kind of invariant all the time we call a small
1562   // fixup routine to move the mutated arguments onto the top of our
1563   // expression stack if necessary.
1564 #ifndef _LP64
1565   __ mov(rax, rsp);
1566   __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1567   __ get_thread(thread);
1568   // PC must point into interpreter here
1569   __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1570   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
1571   __ get_thread(thread);
1572 #else
1573   __ mov(c_rarg1, rsp);
1574   __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1575   // PC must point into interpreter here
1576   __ set_last_Java_frame(noreg, rbp, __ pc());
1577   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
1578 #endif
1579   __ reset_last_Java_frame(thread, true);
1580 
1581   // Restore the last_sp and null it out
1582   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1583   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1584 
1585   __ restore_bcp();
1586   __ restore_locals();
1587   // The method data pointer was incremented already during
1588   // call profiling. We have to restore the mdp for the current bcp.
1589   if (ProfileInterpreter) {
1590     __ set_method_data_pointer_for_bcp();
1591   }
1592 
1593   // Clear the popframe condition flag
1594   NOT_LP64(__ get_thread(thread));
1595   __ movl(Address(thread, JavaThread::popframe_condition_offset()),
1596           JavaThread::popframe_inactive);
1597 
1598 #if INCLUDE_JVMTI
1599   {
1600     Label L_done;
1601     const Register local0 = rlocals;
1602 
1603     __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic);
1604     __ jcc(Assembler::notEqual, L_done);
1605 
1606     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1607     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1608 
1609     __ get_method(rdx);
1610     __ movptr(rax, Address(local0, 0));
1611     __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp);
1612 
1613     __ testptr(rax, rax);
1614     __ jcc(Assembler::zero, L_done);
1615 
1616     __ movptr(Address(rbx, 0), rax);
1617     __ bind(L_done);
1618   }
1619 #endif // INCLUDE_JVMTI
1620 
1621   __ dispatch_next(vtos);
1622   // end of PopFrame support
1623 
1624   Interpreter::_remove_activation_entry = __ pc();
1625 
1626   // preserve exception over this code sequence
1627   __ pop_ptr(rax);
1628   NOT_LP64(__ get_thread(thread));
1629   __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
1630   // remove the activation (without doing throws on illegalMonitorExceptions)
1631   __ remove_activation(vtos, rdx, false, true, false);
1632   // restore exception
1633   NOT_LP64(__ get_thread(thread));
1634   __ get_vm_result(rax, thread);
1635 
1636   // In between activations - previous activation type unknown yet
1637   // compute continuation point - the continuation point expects the
1638   // following registers set up:
1639   //
1640   // rax: exception
1641   // rdx: return address/pc that threw exception
1642   // rsp: expression stack of caller
1643   // rbp: ebp of caller
1644   __ push(rax);                                  // save exception
1645   __ push(rdx);                                  // save return address
1646   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1647                           SharedRuntime::exception_handler_for_return_address),
1648                         thread, rdx);
1649   __ mov(rbx, rax);                              // save exception handler
1650   __ pop(rdx);                                   // restore return address
1651   __ pop(rax);                                   // restore exception
1652   // Note that an "issuing PC" is actually the next PC after the call
1653   __ jmp(rbx);                                   // jump to exception
1654                                                  // handler of caller
1655 }
1656 
1657 
1658 //
1659 // JVMTI ForceEarlyReturn support
1660 //
1661 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1662   address entry = __ pc();
1663 
1664   __ restore_bcp();
1665   __ restore_locals();
1666   __ empty_expression_stack();
1667   __ load_earlyret_value(state);  // 32 bits returns value in rdx, so don't reuse
1668 
1669   const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1670   NOT_LP64(__ get_thread(thread));
1671   __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
1672   Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
1673 
1674   // Clear the earlyret state
1675   __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1676 
1677   __ remove_activation(state, rsi,
1678                        false, /* throw_monitor_exception */
1679                        false, /* install_monitor_exception */
1680                        true); /* notify_jvmdi */
1681   __ jmp(rsi);
1682 
1683   return entry;
1684 } // end of ForceEarlyReturn support
1685 
1686 
1687 //-----------------------------------------------------------------------------
1688 // Helper for vtos entry point generation
1689 
1690 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1691                                                          address& bep,
1692                                                          address& cep,
1693                                                          address& sep,
1694                                                          address& aep,
1695                                                          address& iep,
1696                                                          address& lep,
1697                                                          address& fep,
1698                                                          address& dep,
1699                                                          address& vep) {
1700   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1701   Label L;
1702 #ifndef _LP64
1703   fep = __ pc();     // ftos entry point
1704       __ push(ftos);
1705       __ jmp(L);
1706   dep = __ pc();     // dtos entry point
1707       __ push(dtos);
1708       __ jmp(L);
1709 #else
1710   fep = __ pc();     // ftos entry point
1711       __ push_f(xmm0);
1712       __ jmp(L);
1713   dep = __ pc();     // dtos entry point
1714       __ push_d(xmm0);
1715       __ jmp(L);
1716 #endif // _LP64
1717   lep = __ pc();     // ltos entry point
1718       __ push_l();
1719       __ jmp(L);
1720   aep = bep = cep = sep = iep = __ pc();      // [abcsi]tos entry point
1721       __ push_i_or_ptr();
1722   vep = __ pc();    // vtos entry point
1723   __ bind(L);
1724   generate_and_dispatch(t);
1725 }
1726 
1727 //-----------------------------------------------------------------------------
1728 
1729 // Non-product code
1730 #ifndef PRODUCT
1731 
1732 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1733   address entry = __ pc();
1734 
1735 #ifndef _LP64
1736   // prepare expression stack
1737   __ pop(rcx);          // pop return address so expression stack is 'pure'
1738   __ push(state);       // save tosca
1739 
1740   // pass tosca registers as arguments & call tracer
1741   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx);
1742   __ mov(rcx, rax);     // make sure return address is not destroyed by pop(state)
1743   __ pop(state);        // restore tosca
1744 
1745   // return
1746   __ jmp(rcx);
1747 #else
1748   __ push(state);
1749   __ push(c_rarg0);
1750   __ push(c_rarg1);
1751   __ push(c_rarg2);
1752   __ push(c_rarg3);
1753   __ mov(c_rarg2, rax);  // Pass itos
1754 #ifdef _WIN64
1755   __ movflt(xmm3, xmm0); // Pass ftos
1756 #endif
1757   __ call_VM(noreg,
1758              CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode),
1759              c_rarg1, c_rarg2, c_rarg3);
1760   __ pop(c_rarg3);
1761   __ pop(c_rarg2);
1762   __ pop(c_rarg1);
1763   __ pop(c_rarg0);
1764   __ pop(state);
1765   __ ret(0);                                   // return from result handler
1766 #endif // _LP64
1767 
1768   return entry;
1769 }
1770 
1771 void TemplateInterpreterGenerator::count_bytecode() {
1772   __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1773 }
1774 
1775 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1776   __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1777 }
1778 
1779 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1780   __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
1781   __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1782   __ orl(rbx,
1783          ((int) t->bytecode()) <<
1784          BytecodePairHistogram::log2_number_of_codes);
1785   __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1786   __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
1787   __ incrementl(Address(rscratch1, rbx, Address::times_4));
1788 }
1789 
1790 
1791 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1792   // Call a little run-time stub to avoid blow-up for each bytecode.
1793   // The run-time runtime saves the right registers, depending on
1794   // the tosca in-state for the given template.
1795 
1796   assert(Interpreter::trace_code(t->tos_in()) != NULL,
1797          "entry must have been generated");
1798 #ifndef _LP64
1799   __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1800 #else
1801   __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1802   __ andptr(rsp, -16); // align stack as required by ABI
1803   __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1804   __ mov(rsp, r12); // restore sp
1805   __ reinit_heapbase();
1806 #endif // _LP64
1807 }
1808 
1809 
1810 void TemplateInterpreterGenerator::stop_interpreter_at() {
1811   Label L;
1812   __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
1813            StopInterpreterAt);
1814   __ jcc(Assembler::notEqual, L);
1815   __ int3();
1816   __ bind(L);
1817 }
1818 #endif // !PRODUCT