1 /* 2 * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "compiler/compiler_globals.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/bytecodeHistogram.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interpreterRuntime.hpp" 35 #include "interpreter/templateInterpreterGenerator.hpp" 36 #include "interpreter/templateTable.hpp" 37 #include "oops/arrayOop.hpp" 38 #include "oops/methodData.hpp" 39 #include "oops/method.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/inlineKlass.hpp" 42 #include "prims/jvmtiExport.hpp" 43 #include "prims/jvmtiThreadState.hpp" 44 #include "runtime/deoptimization.hpp" 45 #include "runtime/frame.inline.hpp" 46 #include "runtime/jniHandles.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 #include "runtime/stubRoutines.hpp" 49 #include "runtime/synchronizer.hpp" 50 #include "runtime/timer.hpp" 51 #include "runtime/vframeArray.hpp" 52 #include "utilities/debug.hpp" 53 #include "utilities/macros.hpp" 54 55 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 56 57 // Size of interpreter code. Increase if too small. Interpreter will 58 // fail with a guarantee ("not enough space for interpreter generation"); 59 // if too small. 60 // Run with +PrintInterpreter to get the VM to print out the size. 61 // Max size with JVMTI 62 #ifdef AMD64 63 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(280) NOT_JVMCI(268) * 1024; 64 #else 65 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 66 #endif // AMD64 67 68 // Global Register Names 69 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 70 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 71 72 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 73 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 74 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 75 76 77 //----------------------------------------------------------------------------- 78 79 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 80 address entry = __ pc(); 81 82 #ifdef ASSERT 83 { 84 Label L; 85 __ lea(rax, Address(rbp, 86 frame::interpreter_frame_monitor_block_top_offset * 87 wordSize)); 88 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 89 // grows negative) 90 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 91 __ stop ("interpreter frame not set up"); 92 __ bind(L); 93 } 94 #endif // ASSERT 95 // Restore bcp under the assumption that the current frame is still 96 // interpreted 97 __ restore_bcp(); 98 99 // expression stack must be empty before entering the VM if an 100 // exception happened 101 __ empty_expression_stack(); 102 // throw exception 103 __ call_VM(noreg, 104 CAST_FROM_FN_PTR(address, 105 InterpreterRuntime::throw_StackOverflowError)); 106 return entry; 107 } 108 109 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 110 address entry = __ pc(); 111 // The expression stack must be empty before entering the VM if an 112 // exception happened. 113 __ empty_expression_stack(); 114 115 // Setup parameters. 116 // ??? convention: expect aberrant index in register ebx/rbx. 117 // Pass array to create more detailed exceptions. 118 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 119 __ call_VM(noreg, 120 CAST_FROM_FN_PTR(address, 121 InterpreterRuntime:: 122 throw_ArrayIndexOutOfBoundsException), 123 rarg, rbx); 124 return entry; 125 } 126 127 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 128 address entry = __ pc(); 129 130 // object is at TOS 131 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 132 __ pop(rarg); 133 134 // expression stack must be empty before entering the VM if an 135 // exception happened 136 __ empty_expression_stack(); 137 138 __ call_VM(noreg, 139 CAST_FROM_FN_PTR(address, 140 InterpreterRuntime:: 141 throw_ClassCastException), 142 rarg); 143 return entry; 144 } 145 146 address TemplateInterpreterGenerator::generate_exception_handler_common( 147 const char* name, const char* message, bool pass_oop) { 148 assert(!pass_oop || message == NULL, "either oop or message but not both"); 149 address entry = __ pc(); 150 151 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 152 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 153 154 if (pass_oop) { 155 // object is at TOS 156 __ pop(rarg2); 157 } 158 // expression stack must be empty before entering the VM if an 159 // exception happened 160 __ empty_expression_stack(); 161 // setup parameters 162 __ lea(rarg, ExternalAddress((address)name)); 163 if (pass_oop) { 164 __ call_VM(rax, CAST_FROM_FN_PTR(address, 165 InterpreterRuntime:: 166 create_klass_exception), 167 rarg, rarg2); 168 } else { 169 __ lea(rarg2, ExternalAddress((address)message)); 170 __ call_VM(rax, 171 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 172 rarg, rarg2); 173 } 174 // throw exception 175 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 176 return entry; 177 } 178 179 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 180 address entry = __ pc(); 181 182 #ifndef _LP64 183 #ifdef COMPILER2 184 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 185 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 186 for (int i = 1; i < 8; i++) { 187 __ ffree(i); 188 } 189 } else if (UseSSE < 2) { 190 __ empty_FPU_stack(); 191 } 192 #endif // COMPILER2 193 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 194 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 195 } else { 196 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 197 } 198 199 if (state == ftos) { 200 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 201 } else if (state == dtos) { 202 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 203 } 204 #endif // _LP64 205 206 // Restore stack bottom in case i2c adjusted stack 207 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 208 // and NULL it as marker that esp is now tos until next java call 209 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 210 211 if (state == atos && InlineTypeReturnedAsFields) { 212 __ store_inline_type_fields_to_buf(NULL); 213 } 214 215 __ restore_bcp(); 216 __ restore_locals(); 217 218 if (state == atos) { 219 Register mdp = rbx; 220 Register tmp = rcx; 221 __ profile_return_type(mdp, rax, tmp); 222 } 223 224 const Register cache = rbx; 225 const Register index = rcx; 226 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 227 228 const Register flags = cache; 229 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 230 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 231 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 232 233 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 234 if (JvmtiExport::can_pop_frame()) { 235 NOT_LP64(__ get_thread(java_thread)); 236 __ check_and_handle_popframe(java_thread); 237 } 238 if (JvmtiExport::can_force_early_return()) { 239 NOT_LP64(__ get_thread(java_thread)); 240 __ check_and_handle_earlyret(java_thread); 241 } 242 243 __ dispatch_next(state, step); 244 245 return entry; 246 } 247 248 249 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 250 address entry = __ pc(); 251 252 #ifndef _LP64 253 if (state == ftos) { 254 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 255 } else if (state == dtos) { 256 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 257 } 258 #endif // _LP64 259 260 // NULL last_sp until next java call 261 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 262 __ restore_bcp(); 263 __ restore_locals(); 264 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 265 NOT_LP64(__ get_thread(thread)); 266 #if INCLUDE_JVMCI 267 // Check if we need to take lock at entry of synchronized method. This can 268 // only occur on method entry so emit it only for vtos with step 0. 269 if (EnableJVMCI && state == vtos && step == 0) { 270 Label L; 271 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 272 __ jcc(Assembler::zero, L); 273 // Clear flag. 274 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 275 // Satisfy calling convention for lock_method(). 276 __ get_method(rbx); 277 // Take lock. 278 lock_method(); 279 __ bind(L); 280 } else { 281 #ifdef ASSERT 282 if (EnableJVMCI) { 283 Label L; 284 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 285 __ jcc(Assembler::zero, L); 286 __ stop("unexpected pending monitor in deopt entry"); 287 __ bind(L); 288 } 289 #endif 290 } 291 #endif 292 // handle exceptions 293 { 294 Label L; 295 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 296 __ jcc(Assembler::zero, L); 297 __ call_VM(noreg, 298 CAST_FROM_FN_PTR(address, 299 InterpreterRuntime::throw_pending_exception)); 300 __ should_not_reach_here(); 301 __ bind(L); 302 } 303 if (continuation == NULL) { 304 __ dispatch_next(state, step); 305 } else { 306 __ jump_to_entry(continuation); 307 } 308 return entry; 309 } 310 311 address TemplateInterpreterGenerator::generate_result_handler_for( 312 BasicType type) { 313 address entry = __ pc(); 314 switch (type) { 315 case T_BOOLEAN: __ c2bool(rax); break; 316 #ifndef _LP64 317 case T_CHAR : __ andptr(rax, 0xFFFF); break; 318 #else 319 case T_CHAR : __ movzwl(rax, rax); break; 320 #endif // _LP64 321 case T_BYTE : __ sign_extend_byte(rax); break; 322 case T_SHORT : __ sign_extend_short(rax); break; 323 case T_INT : /* nothing to do */ break; 324 case T_LONG : /* nothing to do */ break; 325 case T_VOID : /* nothing to do */ break; 326 #ifndef _LP64 327 case T_DOUBLE : 328 case T_FLOAT : 329 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 330 __ pop(t); // remove return address first 331 // Must return a result for interpreter or compiler. In SSE 332 // mode, results are returned in xmm0 and the FPU stack must 333 // be empty. 334 if (type == T_FLOAT && UseSSE >= 1) { 335 // Load ST0 336 __ fld_d(Address(rsp, 0)); 337 // Store as float and empty fpu stack 338 __ fstp_s(Address(rsp, 0)); 339 // and reload 340 __ movflt(xmm0, Address(rsp, 0)); 341 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 342 __ movdbl(xmm0, Address(rsp, 0)); 343 } else { 344 // restore ST0 345 __ fld_d(Address(rsp, 0)); 346 } 347 // and pop the temp 348 __ addptr(rsp, 2 * wordSize); 349 __ push(t); // restore return address 350 } 351 break; 352 #else 353 case T_FLOAT : /* nothing to do */ break; 354 case T_DOUBLE : /* nothing to do */ break; 355 #endif // _LP64 356 357 case T_PRIMITIVE_OBJECT: // fall through (inline types are handled with oops) 358 case T_OBJECT : 359 // retrieve result from frame 360 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 361 // and verify it 362 __ verify_oop(rax); 363 break; 364 default : ShouldNotReachHere(); 365 } 366 __ ret(0); // return from result handler 367 return entry; 368 } 369 370 address TemplateInterpreterGenerator::generate_safept_entry_for( 371 TosState state, 372 address runtime_entry) { 373 address entry = __ pc(); 374 __ push(state); 375 __ call_VM(noreg, runtime_entry); 376 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 377 return entry; 378 } 379 380 381 382 // Helpers for commoning out cases in the various type of method entries. 383 // 384 385 386 // increment invocation count & check for overflow 387 // 388 // Note: checking for negative value instead of overflow 389 // so we have a 'sticky' overflow test 390 // 391 // rbx: method 392 // rcx: invocation counter 393 // 394 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { 395 Label done; 396 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 397 Label no_mdo; 398 if (ProfileInterpreter) { 399 // Are we profiling? 400 __ movptr(rax, Address(rbx, Method::method_data_offset())); 401 __ testptr(rax, rax); 402 __ jccb(Assembler::zero, no_mdo); 403 // Increment counter in the MDO 404 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 405 in_bytes(InvocationCounter::counter_offset())); 406 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 407 __ increment_mask_and_jump(mdo_invocation_counter, mask, rcx, overflow); 408 __ jmp(done); 409 } 410 __ bind(no_mdo); 411 // Increment counter in MethodCounters 412 const Address invocation_counter(rax, 413 MethodCounters::invocation_counter_offset() + 414 InvocationCounter::counter_offset()); 415 __ get_method_counters(rbx, rax, done); 416 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 417 __ increment_mask_and_jump(invocation_counter, mask, rcx, overflow); 418 __ bind(done); 419 } 420 421 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 422 423 // Asm interpreter on entry 424 // r14/rdi - locals 425 // r13/rsi - bcp 426 // rbx - method 427 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 428 // rbp - interpreter frame 429 430 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 431 // Everything as it was on entry 432 // rdx is not restored. Doesn't appear to really be set. 433 434 // InterpreterRuntime::frequency_counter_overflow takes two 435 // arguments, the first (thread) is passed by call_VM, the second 436 // indicates if the counter overflow occurs at a backwards branch 437 // (NULL bcp). We pass zero for it. The call returns the address 438 // of the verified entry point for the method or NULL if the 439 // compilation did not complete (either went background or bailed 440 // out). 441 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 442 __ movl(rarg, 0); 443 __ call_VM(noreg, 444 CAST_FROM_FN_PTR(address, 445 InterpreterRuntime::frequency_counter_overflow), 446 rarg); 447 448 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 449 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 450 // and jump to the interpreted entry. 451 __ jmp(do_continue, relocInfo::none); 452 } 453 454 // See if we've got enough room on the stack for locals plus overhead below 455 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 456 // without going through the signal handler, i.e., reserved and yellow zones 457 // will not be made usable. The shadow zone must suffice to handle the 458 // overflow. 459 // The expression stack grows down incrementally, so the normal guard 460 // page mechanism will work for that. 461 // 462 // NOTE: Since the additional locals are also always pushed (wasn't 463 // obvious in generate_fixed_frame) so the guard should work for them 464 // too. 465 // 466 // Args: 467 // rdx: number of additional locals this frame needs (what we must check) 468 // rbx: Method* 469 // 470 // Kills: 471 // rax 472 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 473 474 // monitor entry size: see picture of stack in frame_x86.hpp 475 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 476 477 // total overhead size: entry_size + (saved rbp through expr stack 478 // bottom). be sure to change this if you add/subtract anything 479 // to/from the overhead area 480 const int overhead_size = 481 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 482 483 const int page_size = os::vm_page_size(); 484 485 Label after_frame_check; 486 487 // see if the frame is greater than one page in size. If so, 488 // then we need to verify there is enough stack space remaining 489 // for the additional locals. 490 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 491 __ jcc(Assembler::belowEqual, after_frame_check); 492 493 // compute rsp as if this were going to be the last frame on 494 // the stack before the red zone 495 496 Label after_frame_check_pop; 497 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 498 #ifndef _LP64 499 __ push(thread); 500 __ get_thread(thread); 501 #endif 502 503 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 504 505 // locals + overhead, in bytes 506 __ mov(rax, rdx); 507 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 508 __ addptr(rax, overhead_size); 509 510 #ifdef ASSERT 511 Label limit_okay; 512 // Verify that thread stack overflow limit is non-zero. 513 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 514 __ jcc(Assembler::notEqual, limit_okay); 515 __ stop("stack overflow limit is zero"); 516 __ bind(limit_okay); 517 #endif 518 519 // Add locals/frame size to stack limit. 520 __ addptr(rax, stack_limit); 521 522 // Check against the current stack bottom. 523 __ cmpptr(rsp, rax); 524 525 __ jcc(Assembler::above, after_frame_check_pop); 526 NOT_LP64(__ pop(rsi)); // get saved bcp 527 528 // Restore sender's sp as SP. This is necessary if the sender's 529 // frame is an extended compiled frame (see gen_c2i_adapter()) 530 // and safer anyway in case of JSR292 adaptations. 531 532 __ pop(rax); // return address must be moved if SP is changed 533 __ mov(rsp, rbcp); 534 __ push(rax); 535 536 // Note: the restored frame is not necessarily interpreted. 537 // Use the shared runtime version of the StackOverflowError. 538 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 539 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 540 // all done with frame size check 541 __ bind(after_frame_check_pop); 542 NOT_LP64(__ pop(rsi)); 543 544 // all done with frame size check 545 __ bind(after_frame_check); 546 } 547 548 // Allocate monitor and lock method (asm interpreter) 549 // 550 // Args: 551 // rbx: Method* 552 // r14/rdi: locals 553 // 554 // Kills: 555 // rax 556 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 557 // rscratch1, rscratch2 (scratch regs) 558 void TemplateInterpreterGenerator::lock_method() { 559 // synchronize method 560 const Address access_flags(rbx, Method::access_flags_offset()); 561 const Address monitor_block_top( 562 rbp, 563 frame::interpreter_frame_monitor_block_top_offset * wordSize); 564 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 565 566 #ifdef ASSERT 567 { 568 Label L; 569 __ movl(rax, access_flags); 570 __ testl(rax, JVM_ACC_SYNCHRONIZED); 571 __ jcc(Assembler::notZero, L); 572 __ stop("method doesn't need synchronization"); 573 __ bind(L); 574 } 575 #endif // ASSERT 576 577 // get synchronization object 578 { 579 Label done; 580 __ movl(rax, access_flags); 581 __ testl(rax, JVM_ACC_STATIC); 582 // get receiver (assume this is frequent case) 583 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 584 __ jcc(Assembler::zero, done); 585 __ load_mirror(rax, rbx); 586 587 #ifdef ASSERT 588 { 589 Label L; 590 __ testptr(rax, rax); 591 __ jcc(Assembler::notZero, L); 592 __ stop("synchronization object is NULL"); 593 __ bind(L); 594 } 595 #endif // ASSERT 596 597 __ bind(done); 598 } 599 600 // add space for monitor & lock 601 __ subptr(rsp, entry_size); // add space for a monitor entry 602 __ movptr(monitor_block_top, rsp); // set new monitor block top 603 // store object 604 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 605 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 606 __ movptr(lockreg, rsp); // object address 607 __ lock_object(lockreg); 608 } 609 610 // Generate a fixed interpreter frame. This is identical setup for 611 // interpreted methods and for native methods hence the shared code. 612 // 613 // Args: 614 // rax: return address 615 // rbx: Method* 616 // r14/rdi: pointer to locals 617 // r13/rsi: sender sp 618 // rdx: cp cache 619 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 620 // initialize fixed part of activation frame 621 __ push(rax); // save return address 622 __ enter(); // save old & set new rbp 623 __ push(rbcp); // set sender sp 624 __ push((int)NULL_WORD); // leave last_sp as null 625 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 626 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 627 __ push(rbx); // save Method* 628 // Get mirror and store it in the frame as GC root for this Method* 629 __ load_mirror(rdx, rbx); 630 __ push(rdx); 631 if (ProfileInterpreter) { 632 Label method_data_continue; 633 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 634 __ testptr(rdx, rdx); 635 __ jcc(Assembler::zero, method_data_continue); 636 __ addptr(rdx, in_bytes(MethodData::data_offset())); 637 __ bind(method_data_continue); 638 __ push(rdx); // set the mdp (method data pointer) 639 } else { 640 __ push(0); 641 } 642 643 __ movptr(rdx, Address(rbx, Method::const_offset())); 644 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 645 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 646 __ push(rdx); // set constant pool cache 647 __ push(rlocals); // set locals pointer 648 if (native_call) { 649 __ push(0); // no bcp 650 } else { 651 __ push(rbcp); // set bcp 652 } 653 __ push(0); // reserve word for pointer to expression stack bottom 654 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 655 } 656 657 // End of helpers 658 659 // Method entry for java.lang.ref.Reference.get. 660 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 661 // Code: _aload_0, _getfield, _areturn 662 // parameter size = 1 663 // 664 // The code that gets generated by this routine is split into 2 parts: 665 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 666 // 2. The slow path - which is an expansion of the regular method entry. 667 // 668 // Notes:- 669 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 670 // * We may jump to the slow path iff the receiver is null. If the 671 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 672 // Thus we can use the regular method entry code to generate the NPE. 673 // 674 // rbx: Method* 675 676 // r13: senderSP must preserve for slow path, set SP to it on fast path 677 678 address entry = __ pc(); 679 680 const int referent_offset = java_lang_ref_Reference::referent_offset(); 681 682 Label slow_path; 683 // rbx: method 684 685 // Check if local 0 != NULL 686 // If the receiver is null then it is OK to jump to the slow path. 687 __ movptr(rax, Address(rsp, wordSize)); 688 689 __ testptr(rax, rax); 690 __ jcc(Assembler::zero, slow_path); 691 692 // rax: local 0 693 // rbx: method (but can be used as scratch now) 694 // rdx: scratch 695 // rdi: scratch 696 697 // Preserve the sender sp in case the load barrier 698 // calls the runtime 699 NOT_LP64(__ push(rsi)); 700 701 // Load the value of the referent field. 702 const Address field_address(rax, referent_offset); 703 __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF); 704 705 // _areturn 706 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 707 NOT_LP64(__ pop(rsi)); // get sender sp 708 __ pop(rdi); // get return address 709 __ mov(rsp, sender_sp); // set sp to sender sp 710 __ jmp(rdi); 711 __ ret(0); 712 713 // generate a vanilla interpreter entry as the slow path 714 __ bind(slow_path); 715 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 716 return entry; 717 } 718 719 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 720 // See more discussion in stackOverflow.hpp. 721 722 // Note that we do the banging after the frame is setup, since the exception 723 // handling code expects to find a valid interpreter frame on the stack. 724 // Doing the banging earlier fails if the caller frame is not an interpreter 725 // frame. 726 // (Also, the exception throwing code expects to unlock any synchronized 727 // method receiver, so do the banging after locking the receiver.) 728 729 const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size()); 730 const int page_size = os::vm_page_size(); 731 const int n_shadow_pages = shadow_zone_size / page_size; 732 733 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 734 #ifndef _LP64 735 __ push(thread); 736 __ get_thread(thread); 737 #endif 738 739 #ifdef ASSERT 740 Label L_good_limit; 741 __ cmpptr(Address(thread, JavaThread::shadow_zone_safe_limit()), (int32_t)NULL_WORD); 742 __ jcc(Assembler::notEqual, L_good_limit); 743 __ stop("shadow zone safe limit is not initialized"); 744 __ bind(L_good_limit); 745 746 Label L_good_watermark; 747 __ cmpptr(Address(thread, JavaThread::shadow_zone_growth_watermark()), (int32_t)NULL_WORD); 748 __ jcc(Assembler::notEqual, L_good_watermark); 749 __ stop("shadow zone growth watermark is not initialized"); 750 __ bind(L_good_watermark); 751 #endif 752 753 Label L_done; 754 755 __ cmpptr(rsp, Address(thread, JavaThread::shadow_zone_growth_watermark())); 756 __ jcc(Assembler::above, L_done); 757 758 for (int p = 1; p <= n_shadow_pages; p++) { 759 __ bang_stack_with_offset(p*page_size); 760 } 761 762 // Record the new watermark, but only if update is above the safe limit. 763 // Otherwise, the next time around the check above would pass the safe limit. 764 __ cmpptr(rsp, Address(thread, JavaThread::shadow_zone_safe_limit())); 765 __ jccb(Assembler::belowEqual, L_done); 766 __ movptr(Address(thread, JavaThread::shadow_zone_growth_watermark()), rsp); 767 768 __ bind(L_done); 769 770 #ifndef _LP64 771 __ pop(thread); 772 #endif 773 } 774 775 // Interpreter stub for calling a native method. (asm interpreter) 776 // This sets up a somewhat different looking stack for calling the 777 // native method than the typical interpreter frame setup. 778 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 779 // determine code generation flags 780 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 781 782 // rbx: Method* 783 // rbcp: sender sp 784 785 address entry_point = __ pc(); 786 787 const Address constMethod (rbx, Method::const_offset()); 788 const Address access_flags (rbx, Method::access_flags_offset()); 789 const Address size_of_parameters(rcx, ConstMethod:: 790 size_of_parameters_offset()); 791 792 793 // get parameter size (always needed) 794 __ movptr(rcx, constMethod); 795 __ load_unsigned_short(rcx, size_of_parameters); 796 797 // native calls don't need the stack size check since they have no 798 // expression stack and the arguments are already on the stack and 799 // we only add a handful of words to the stack 800 801 // rbx: Method* 802 // rcx: size of parameters 803 // rbcp: sender sp 804 __ pop(rax); // get return address 805 806 // for natives the size of locals is zero 807 808 // compute beginning of parameters 809 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 810 811 // add 2 zero-initialized slots for native calls 812 // initialize result_handler slot 813 __ push((int) NULL_WORD); 814 // slot for oop temp 815 // (static native method holder mirror/jni oop result) 816 __ push((int) NULL_WORD); 817 818 // initialize fixed part of activation frame 819 generate_fixed_frame(true); 820 821 // make sure method is native & not abstract 822 #ifdef ASSERT 823 __ movl(rax, access_flags); 824 { 825 Label L; 826 __ testl(rax, JVM_ACC_NATIVE); 827 __ jcc(Assembler::notZero, L); 828 __ stop("tried to execute non-native method as native"); 829 __ bind(L); 830 } 831 { 832 Label L; 833 __ testl(rax, JVM_ACC_ABSTRACT); 834 __ jcc(Assembler::zero, L); 835 __ stop("tried to execute abstract method in interpreter"); 836 __ bind(L); 837 } 838 #endif 839 840 // Since at this point in the method invocation the exception handler 841 // would try to exit the monitor of synchronized methods which hasn't 842 // been entered yet, we set the thread local variable 843 // _do_not_unlock_if_synchronized to true. The remove_activation will 844 // check this flag. 845 846 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 847 NOT_LP64(__ get_thread(thread1)); 848 const Address do_not_unlock_if_synchronized(thread1, 849 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 850 __ movbool(do_not_unlock_if_synchronized, true); 851 852 // increment invocation count & check for overflow 853 Label invocation_counter_overflow; 854 if (inc_counter) { 855 generate_counter_incr(&invocation_counter_overflow); 856 } 857 858 Label continue_after_compile; 859 __ bind(continue_after_compile); 860 861 bang_stack_shadow_pages(true); 862 863 // reset the _do_not_unlock_if_synchronized flag 864 NOT_LP64(__ get_thread(thread1)); 865 __ movbool(do_not_unlock_if_synchronized, false); 866 867 // check for synchronized methods 868 // Must happen AFTER invocation_counter check and stack overflow check, 869 // so method is not locked if overflows. 870 if (synchronized) { 871 lock_method(); 872 } else { 873 // no synchronization necessary 874 #ifdef ASSERT 875 { 876 Label L; 877 __ movl(rax, access_flags); 878 __ testl(rax, JVM_ACC_SYNCHRONIZED); 879 __ jcc(Assembler::zero, L); 880 __ stop("method needs synchronization"); 881 __ bind(L); 882 } 883 #endif 884 } 885 886 // start execution 887 #ifdef ASSERT 888 { 889 Label L; 890 const Address monitor_block_top(rbp, 891 frame::interpreter_frame_monitor_block_top_offset * wordSize); 892 __ movptr(rax, monitor_block_top); 893 __ cmpptr(rax, rsp); 894 __ jcc(Assembler::equal, L); 895 __ stop("broken stack frame setup in interpreter"); 896 __ bind(L); 897 } 898 #endif 899 900 // jvmti support 901 __ notify_method_entry(); 902 903 // work registers 904 const Register method = rbx; 905 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 906 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 907 908 // allocate space for parameters 909 __ get_method(method); 910 __ movptr(t, Address(method, Method::const_offset())); 911 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 912 913 #ifndef _LP64 914 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 915 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 916 __ subptr(rsp, t); 917 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 918 #else 919 __ shll(t, Interpreter::logStackElementSize); 920 921 __ subptr(rsp, t); 922 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 923 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 924 #endif // _LP64 925 926 // get signature handler 927 { 928 Label L; 929 __ movptr(t, Address(method, Method::signature_handler_offset())); 930 __ testptr(t, t); 931 __ jcc(Assembler::notZero, L); 932 __ call_VM(noreg, 933 CAST_FROM_FN_PTR(address, 934 InterpreterRuntime::prepare_native_call), 935 method); 936 __ get_method(method); 937 __ movptr(t, Address(method, Method::signature_handler_offset())); 938 __ bind(L); 939 } 940 941 // call signature handler 942 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 943 "adjust this code"); 944 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 945 "adjust this code"); 946 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 947 "adjust this code"); 948 949 // The generated handlers do not touch RBX (the method). 950 // However, large signatures cannot be cached and are generated 951 // each time here. The slow-path generator can do a GC on return, 952 // so we must reload it after the call. 953 __ call(t); 954 __ get_method(method); // slow path can do a GC, reload RBX 955 956 957 // result handler is in rax 958 // set result handler 959 __ movptr(Address(rbp, 960 (frame::interpreter_frame_result_handler_offset) * wordSize), 961 rax); 962 963 // pass mirror handle if static call 964 { 965 Label L; 966 __ movl(t, Address(method, Method::access_flags_offset())); 967 __ testl(t, JVM_ACC_STATIC); 968 __ jcc(Assembler::zero, L); 969 // get mirror 970 __ load_mirror(t, method, rax); 971 // copy mirror into activation frame 972 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 973 t); 974 // pass handle to mirror 975 #ifndef _LP64 976 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 977 __ movptr(Address(rsp, wordSize), t); 978 #else 979 __ lea(c_rarg1, 980 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 981 #endif // _LP64 982 __ bind(L); 983 } 984 985 // get native function entry point 986 { 987 Label L; 988 __ movptr(rax, Address(method, Method::native_function_offset())); 989 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 990 __ cmpptr(rax, unsatisfied.addr()); 991 __ jcc(Assembler::notEqual, L); 992 __ call_VM(noreg, 993 CAST_FROM_FN_PTR(address, 994 InterpreterRuntime::prepare_native_call), 995 method); 996 __ get_method(method); 997 __ movptr(rax, Address(method, Method::native_function_offset())); 998 __ bind(L); 999 } 1000 1001 // pass JNIEnv 1002 #ifndef _LP64 1003 __ get_thread(thread); 1004 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1005 __ movptr(Address(rsp, 0), t); 1006 1007 // set_last_Java_frame_before_call 1008 // It is enough that the pc() 1009 // points into the right code segment. It does not have to be the correct return pc. 1010 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1011 #else 1012 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1013 1014 // It is enough that the pc() points into the right code 1015 // segment. It does not have to be the correct return pc. 1016 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1017 #endif // _LP64 1018 1019 // change thread state 1020 #ifdef ASSERT 1021 { 1022 Label L; 1023 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1024 __ cmpl(t, _thread_in_Java); 1025 __ jcc(Assembler::equal, L); 1026 __ stop("Wrong thread state in native stub"); 1027 __ bind(L); 1028 } 1029 #endif 1030 1031 // Change state to native 1032 1033 __ movl(Address(thread, JavaThread::thread_state_offset()), 1034 _thread_in_native); 1035 1036 // Call the native method. 1037 __ call(rax); 1038 // 32: result potentially in rdx:rax or ST0 1039 // 64: result potentially in rax or xmm0 1040 1041 // Verify or restore cpu control state after JNI call 1042 __ restore_cpu_control_state_after_jni(); 1043 1044 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1045 // in order to extract the result of a method call. If the order of these 1046 // pushes change or anything else is added to the stack then the code in 1047 // interpreter_frame_result must also change. 1048 1049 #ifndef _LP64 1050 // save potential result in ST(0) & rdx:rax 1051 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1052 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1053 // It is safe to do this push because state is _thread_in_native and return address will be found 1054 // via _last_native_pc and not via _last_jave_sp 1055 1056 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1057 // If the order changes or anything else is added to the stack the code in 1058 // interpreter_frame_result will have to be changed. 1059 1060 { Label L; 1061 Label push_double; 1062 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1063 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1064 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1065 float_handler.addr()); 1066 __ jcc(Assembler::equal, push_double); 1067 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1068 double_handler.addr()); 1069 __ jcc(Assembler::notEqual, L); 1070 __ bind(push_double); 1071 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1072 __ bind(L); 1073 } 1074 #else 1075 __ push(dtos); 1076 #endif // _LP64 1077 1078 __ push(ltos); 1079 1080 // change thread state 1081 NOT_LP64(__ get_thread(thread)); 1082 __ movl(Address(thread, JavaThread::thread_state_offset()), 1083 _thread_in_native_trans); 1084 1085 // Force this write out before the read below 1086 __ membar(Assembler::Membar_mask_bits( 1087 Assembler::LoadLoad | Assembler::LoadStore | 1088 Assembler::StoreLoad | Assembler::StoreStore)); 1089 1090 #ifndef _LP64 1091 if (AlwaysRestoreFPU) { 1092 // Make sure the control word is correct. 1093 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std())); 1094 } 1095 #endif // _LP64 1096 1097 // check for safepoint operation in progress and/or pending suspend requests 1098 { 1099 Label Continue; 1100 Label slow_path; 1101 1102 __ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */); 1103 1104 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1105 __ jcc(Assembler::equal, Continue); 1106 __ bind(slow_path); 1107 1108 // Don't use call_VM as it will see a possible pending exception 1109 // and forward it and never return here preventing us from 1110 // clearing _last_native_pc down below. Also can't use 1111 // call_VM_leaf either as it will check to see if r13 & r14 are 1112 // preserved and correspond to the bcp/locals pointers. So we do a 1113 // runtime call by hand. 1114 // 1115 #ifndef _LP64 1116 __ push(thread); 1117 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1118 JavaThread::check_special_condition_for_native_trans))); 1119 __ increment(rsp, wordSize); 1120 __ get_thread(thread); 1121 #else 1122 __ mov(c_rarg0, r15_thread); 1123 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1124 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1125 __ andptr(rsp, -16); // align stack as required by ABI 1126 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1127 __ mov(rsp, r12); // restore sp 1128 __ reinit_heapbase(); 1129 #endif // _LP64 1130 __ bind(Continue); 1131 } 1132 1133 // change thread state 1134 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1135 1136 // reset_last_Java_frame 1137 __ reset_last_Java_frame(thread, true); 1138 1139 if (CheckJNICalls) { 1140 // clear_pending_jni_exception_check 1141 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1142 } 1143 1144 // reset handle block 1145 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1146 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1147 1148 // If result is an oop unbox and store it in frame where gc will see it 1149 // and result handler will pick it up 1150 1151 { 1152 Label no_oop; 1153 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1154 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1155 __ jcc(Assembler::notEqual, no_oop); 1156 // retrieve result 1157 __ pop(ltos); 1158 // Unbox oop result, e.g. JNIHandles::resolve value. 1159 __ resolve_jobject(rax /* value */, 1160 thread /* thread */, 1161 t /* tmp */); 1162 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1163 // keep stack depth as expected by pushing oop which will eventually be discarded 1164 __ push(ltos); 1165 __ bind(no_oop); 1166 } 1167 1168 1169 { 1170 Label no_reguard; 1171 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1172 StackOverflow::stack_guard_yellow_reserved_disabled); 1173 __ jcc(Assembler::notEqual, no_reguard); 1174 1175 __ pusha(); // XXX only save smashed registers 1176 #ifndef _LP64 1177 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1178 __ popa(); 1179 #else 1180 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1181 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1182 __ andptr(rsp, -16); // align stack as required by ABI 1183 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1184 __ mov(rsp, r12); // restore sp 1185 __ popa(); // XXX only restore smashed registers 1186 __ reinit_heapbase(); 1187 #endif // _LP64 1188 1189 __ bind(no_reguard); 1190 } 1191 1192 1193 // The method register is junk from after the thread_in_native transition 1194 // until here. Also can't call_VM until the bcp has been 1195 // restored. Need bcp for throwing exception below so get it now. 1196 __ get_method(method); 1197 1198 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1199 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1200 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1201 1202 // handle exceptions (exception handling will handle unlocking!) 1203 { 1204 Label L; 1205 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1206 __ jcc(Assembler::zero, L); 1207 // Note: At some point we may want to unify this with the code 1208 // used in call_VM_base(); i.e., we should use the 1209 // StubRoutines::forward_exception code. For now this doesn't work 1210 // here because the rsp is not correctly set at this point. 1211 __ MacroAssembler::call_VM(noreg, 1212 CAST_FROM_FN_PTR(address, 1213 InterpreterRuntime::throw_pending_exception)); 1214 __ should_not_reach_here(); 1215 __ bind(L); 1216 } 1217 1218 // do unlocking if necessary 1219 { 1220 Label L; 1221 __ movl(t, Address(method, Method::access_flags_offset())); 1222 __ testl(t, JVM_ACC_SYNCHRONIZED); 1223 __ jcc(Assembler::zero, L); 1224 // the code below should be shared with interpreter macro 1225 // assembler implementation 1226 { 1227 Label unlock; 1228 // BasicObjectLock will be first in list, since this is a 1229 // synchronized method. However, need to check that the object 1230 // has not been unlocked by an explicit monitorexit bytecode. 1231 const Address monitor(rbp, 1232 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1233 wordSize - (int)sizeof(BasicObjectLock))); 1234 1235 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1236 1237 // monitor expect in c_rarg1 for slow unlock path 1238 __ lea(regmon, monitor); // address of first monitor 1239 1240 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1241 __ testptr(t, t); 1242 __ jcc(Assembler::notZero, unlock); 1243 1244 // Entry already unlocked, need to throw exception 1245 __ MacroAssembler::call_VM(noreg, 1246 CAST_FROM_FN_PTR(address, 1247 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1248 __ should_not_reach_here(); 1249 1250 __ bind(unlock); 1251 __ unlock_object(regmon); 1252 } 1253 __ bind(L); 1254 } 1255 1256 // jvmti support 1257 // Note: This must happen _after_ handling/throwing any exceptions since 1258 // the exception handler code notifies the runtime of method exits 1259 // too. If this happens before, method entry/exit notifications are 1260 // not properly paired (was bug - gri 11/22/99). 1261 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1262 1263 // restore potential result in edx:eax, call result handler to 1264 // restore potential result in ST0 & handle result 1265 1266 __ pop(ltos); 1267 LP64_ONLY( __ pop(dtos)); 1268 1269 __ movptr(t, Address(rbp, 1270 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1271 __ call(t); 1272 1273 // remove activation 1274 __ movptr(t, Address(rbp, 1275 frame::interpreter_frame_sender_sp_offset * 1276 wordSize)); // get sender sp 1277 __ leave(); // remove frame anchor 1278 __ pop(rdi); // get return address 1279 __ mov(rsp, t); // set sp to sender sp 1280 __ jmp(rdi); 1281 1282 if (inc_counter) { 1283 // Handle overflow of counter and compile method 1284 __ bind(invocation_counter_overflow); 1285 generate_counter_overflow(continue_after_compile); 1286 } 1287 1288 return entry_point; 1289 } 1290 1291 // Abstract method entry 1292 // Attempt to execute abstract method. Throw exception 1293 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1294 1295 address entry_point = __ pc(); 1296 1297 // abstract method entry 1298 1299 // pop return address, reset last_sp to NULL 1300 __ empty_expression_stack(); 1301 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1302 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1303 1304 // throw exception 1305 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx); 1306 // the call_VM checks for exception, so we should never return here. 1307 __ should_not_reach_here(); 1308 1309 return entry_point; 1310 } 1311 1312 // 1313 // Generic interpreted method entry to (asm) interpreter 1314 // 1315 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1316 // determine code generation flags 1317 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1318 1319 // ebx: Method* 1320 // rbcp: sender sp 1321 address entry_point = __ pc(); 1322 1323 const Address constMethod(rbx, Method::const_offset()); 1324 const Address access_flags(rbx, Method::access_flags_offset()); 1325 const Address size_of_parameters(rdx, 1326 ConstMethod::size_of_parameters_offset()); 1327 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1328 1329 1330 // get parameter size (always needed) 1331 __ movptr(rdx, constMethod); 1332 __ load_unsigned_short(rcx, size_of_parameters); 1333 1334 // rbx: Method* 1335 // rcx: size of parameters 1336 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1337 1338 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1339 __ subl(rdx, rcx); // rdx = no. of additional locals 1340 1341 // YYY 1342 // __ incrementl(rdx); 1343 // __ andl(rdx, -2); 1344 1345 // see if we've got enough room on the stack for locals plus overhead. 1346 generate_stack_overflow_check(); 1347 1348 // get return address 1349 __ pop(rax); 1350 1351 // compute beginning of parameters 1352 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1353 1354 // rdx - # of additional locals 1355 // allocate space for locals 1356 // explicitly initialize locals 1357 { 1358 Label exit, loop; 1359 __ testl(rdx, rdx); 1360 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1361 __ bind(loop); 1362 __ push((int) NULL_WORD); // initialize local variables 1363 __ decrementl(rdx); // until everything initialized 1364 __ jcc(Assembler::greater, loop); 1365 __ bind(exit); 1366 } 1367 1368 // initialize fixed part of activation frame 1369 generate_fixed_frame(false); 1370 1371 // make sure method is not native & not abstract 1372 #ifdef ASSERT 1373 __ movl(rax, access_flags); 1374 { 1375 Label L; 1376 __ testl(rax, JVM_ACC_NATIVE); 1377 __ jcc(Assembler::zero, L); 1378 __ stop("tried to execute native method as non-native"); 1379 __ bind(L); 1380 } 1381 { 1382 Label L; 1383 __ testl(rax, JVM_ACC_ABSTRACT); 1384 __ jcc(Assembler::zero, L); 1385 __ stop("tried to execute abstract method in interpreter"); 1386 __ bind(L); 1387 } 1388 #endif 1389 1390 // Since at this point in the method invocation the exception 1391 // handler would try to exit the monitor of synchronized methods 1392 // which hasn't been entered yet, we set the thread local variable 1393 // _do_not_unlock_if_synchronized to true. The remove_activation 1394 // will check this flag. 1395 1396 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1397 NOT_LP64(__ get_thread(thread)); 1398 const Address do_not_unlock_if_synchronized(thread, 1399 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1400 __ movbool(do_not_unlock_if_synchronized, true); 1401 1402 __ profile_parameters_type(rax, rcx, rdx); 1403 // increment invocation count & check for overflow 1404 Label invocation_counter_overflow; 1405 if (inc_counter) { 1406 generate_counter_incr(&invocation_counter_overflow); 1407 } 1408 1409 Label continue_after_compile; 1410 __ bind(continue_after_compile); 1411 1412 // check for synchronized interpreted methods 1413 bang_stack_shadow_pages(false); 1414 1415 // reset the _do_not_unlock_if_synchronized flag 1416 NOT_LP64(__ get_thread(thread)); 1417 __ movbool(do_not_unlock_if_synchronized, false); 1418 1419 // check for synchronized methods 1420 // Must happen AFTER invocation_counter check and stack overflow check, 1421 // so method is not locked if overflows. 1422 if (synchronized) { 1423 // Allocate monitor and lock method 1424 lock_method(); 1425 } else { 1426 // no synchronization necessary 1427 #ifdef ASSERT 1428 { 1429 Label L; 1430 __ movl(rax, access_flags); 1431 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1432 __ jcc(Assembler::zero, L); 1433 __ stop("method needs synchronization"); 1434 __ bind(L); 1435 } 1436 #endif 1437 } 1438 1439 // start execution 1440 #ifdef ASSERT 1441 { 1442 Label L; 1443 const Address monitor_block_top (rbp, 1444 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1445 __ movptr(rax, monitor_block_top); 1446 __ cmpptr(rax, rsp); 1447 __ jcc(Assembler::equal, L); 1448 __ stop("broken stack frame setup in interpreter"); 1449 __ bind(L); 1450 } 1451 #endif 1452 1453 // jvmti support 1454 __ notify_method_entry(); 1455 1456 __ dispatch_next(vtos); 1457 1458 // invocation counter overflow 1459 if (inc_counter) { 1460 // Handle overflow of counter and compile method 1461 __ bind(invocation_counter_overflow); 1462 generate_counter_overflow(continue_after_compile); 1463 } 1464 1465 return entry_point; 1466 } 1467 1468 //----------------------------------------------------------------------------- 1469 // Exceptions 1470 1471 void TemplateInterpreterGenerator::generate_throw_exception() { 1472 // Entry point in previous activation (i.e., if the caller was 1473 // interpreted) 1474 Interpreter::_rethrow_exception_entry = __ pc(); 1475 // Restore sp to interpreter_frame_last_sp even though we are going 1476 // to empty the expression stack for the exception processing. 1477 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1478 // rax: exception 1479 // rdx: return address/pc that threw exception 1480 __ restore_bcp(); // r13/rsi points to call/send 1481 __ restore_locals(); 1482 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1483 // Entry point for exceptions thrown within interpreter code 1484 Interpreter::_throw_exception_entry = __ pc(); 1485 // expression stack is undefined here 1486 // rax: exception 1487 // r13/rsi: exception bcp 1488 __ verify_oop(rax); 1489 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1490 LP64_ONLY(__ mov(c_rarg1, rax)); 1491 1492 // expression stack must be empty before entering the VM in case of 1493 // an exception 1494 __ empty_expression_stack(); 1495 // find exception handler address and preserve exception oop 1496 __ call_VM(rdx, 1497 CAST_FROM_FN_PTR(address, 1498 InterpreterRuntime::exception_handler_for_exception), 1499 rarg); 1500 // rax: exception handler entry point 1501 // rdx: preserved exception oop 1502 // r13/rsi: bcp for exception handler 1503 __ push_ptr(rdx); // push exception which is now the only value on the stack 1504 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1505 1506 // If the exception is not handled in the current frame the frame is 1507 // removed and the exception is rethrown (i.e. exception 1508 // continuation is _rethrow_exception). 1509 // 1510 // Note: At this point the bci is still the bxi for the instruction 1511 // which caused the exception and the expression stack is 1512 // empty. Thus, for any VM calls at this point, GC will find a legal 1513 // oop map (with empty expression stack). 1514 1515 // In current activation 1516 // tos: exception 1517 // esi: exception bcp 1518 1519 // 1520 // JVMTI PopFrame support 1521 // 1522 1523 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1524 __ empty_expression_stack(); 1525 // Set the popframe_processing bit in pending_popframe_condition 1526 // indicating that we are currently handling popframe, so that 1527 // call_VMs that may happen later do not trigger new popframe 1528 // handling cycles. 1529 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1530 NOT_LP64(__ get_thread(thread)); 1531 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1532 __ orl(rdx, JavaThread::popframe_processing_bit); 1533 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1534 1535 { 1536 // Check to see whether we are returning to a deoptimized frame. 1537 // (The PopFrame call ensures that the caller of the popped frame is 1538 // either interpreted or compiled and deoptimizes it if compiled.) 1539 // In this case, we can't call dispatch_next() after the frame is 1540 // popped, but instead must save the incoming arguments and restore 1541 // them after deoptimization has occurred. 1542 // 1543 // Note that we don't compare the return PC against the 1544 // deoptimization blob's unpack entry because of the presence of 1545 // adapter frames in C2. 1546 Label caller_not_deoptimized; 1547 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1548 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1549 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1550 InterpreterRuntime::interpreter_contains), rarg); 1551 __ testl(rax, rax); 1552 __ jcc(Assembler::notZero, caller_not_deoptimized); 1553 1554 // Compute size of arguments for saving when returning to 1555 // deoptimized caller 1556 __ get_method(rax); 1557 __ movptr(rax, Address(rax, Method::const_offset())); 1558 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1559 size_of_parameters_offset()))); 1560 __ shll(rax, Interpreter::logStackElementSize); 1561 __ restore_locals(); 1562 __ subptr(rlocals, rax); 1563 __ addptr(rlocals, wordSize); 1564 // Save these arguments 1565 NOT_LP64(__ get_thread(thread)); 1566 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1567 Deoptimization:: 1568 popframe_preserve_args), 1569 thread, rax, rlocals); 1570 1571 __ remove_activation(vtos, rdx, 1572 /* throw_monitor_exception */ false, 1573 /* install_monitor_exception */ false, 1574 /* notify_jvmdi */ false); 1575 1576 // Inform deoptimization that it is responsible for restoring 1577 // these arguments 1578 NOT_LP64(__ get_thread(thread)); 1579 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1580 JavaThread::popframe_force_deopt_reexecution_bit); 1581 1582 // Continue in deoptimization handler 1583 __ jmp(rdx); 1584 1585 __ bind(caller_not_deoptimized); 1586 } 1587 1588 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1589 /* throw_monitor_exception */ false, 1590 /* install_monitor_exception */ false, 1591 /* notify_jvmdi */ false); 1592 1593 // Finish with popframe handling 1594 // A previous I2C followed by a deoptimization might have moved the 1595 // outgoing arguments further up the stack. PopFrame expects the 1596 // mutations to those outgoing arguments to be preserved and other 1597 // constraints basically require this frame to look exactly as 1598 // though it had previously invoked an interpreted activation with 1599 // no space between the top of the expression stack (current 1600 // last_sp) and the top of stack. Rather than force deopt to 1601 // maintain this kind of invariant all the time we call a small 1602 // fixup routine to move the mutated arguments onto the top of our 1603 // expression stack if necessary. 1604 #ifndef _LP64 1605 __ mov(rax, rsp); 1606 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1607 __ get_thread(thread); 1608 // PC must point into interpreter here 1609 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1610 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1611 __ get_thread(thread); 1612 #else 1613 __ mov(c_rarg1, rsp); 1614 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1615 // PC must point into interpreter here 1616 __ set_last_Java_frame(noreg, rbp, __ pc()); 1617 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1618 #endif 1619 __ reset_last_Java_frame(thread, true); 1620 1621 // Restore the last_sp and null it out 1622 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1623 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1624 1625 __ restore_bcp(); 1626 __ restore_locals(); 1627 // The method data pointer was incremented already during 1628 // call profiling. We have to restore the mdp for the current bcp. 1629 if (ProfileInterpreter) { 1630 __ set_method_data_pointer_for_bcp(); 1631 } 1632 1633 // Clear the popframe condition flag 1634 NOT_LP64(__ get_thread(thread)); 1635 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1636 JavaThread::popframe_inactive); 1637 1638 #if INCLUDE_JVMTI 1639 { 1640 Label L_done; 1641 const Register local0 = rlocals; 1642 1643 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1644 __ jcc(Assembler::notEqual, L_done); 1645 1646 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1647 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1648 1649 __ get_method(rdx); 1650 __ movptr(rax, Address(local0, 0)); 1651 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1652 1653 __ testptr(rax, rax); 1654 __ jcc(Assembler::zero, L_done); 1655 1656 __ movptr(Address(rbx, 0), rax); 1657 __ bind(L_done); 1658 } 1659 #endif // INCLUDE_JVMTI 1660 1661 __ dispatch_next(vtos); 1662 // end of PopFrame support 1663 1664 Interpreter::_remove_activation_entry = __ pc(); 1665 1666 // preserve exception over this code sequence 1667 __ pop_ptr(rax); 1668 NOT_LP64(__ get_thread(thread)); 1669 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1670 // remove the activation (without doing throws on illegalMonitorExceptions) 1671 __ remove_activation(vtos, rdx, false, true, false); 1672 // restore exception 1673 NOT_LP64(__ get_thread(thread)); 1674 __ get_vm_result(rax, thread); 1675 1676 // In between activations - previous activation type unknown yet 1677 // compute continuation point - the continuation point expects the 1678 // following registers set up: 1679 // 1680 // rax: exception 1681 // rdx: return address/pc that threw exception 1682 // rsp: expression stack of caller 1683 // rbp: ebp of caller 1684 __ push(rax); // save exception 1685 __ push(rdx); // save return address 1686 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1687 SharedRuntime::exception_handler_for_return_address), 1688 thread, rdx); 1689 __ mov(rbx, rax); // save exception handler 1690 __ pop(rdx); // restore return address 1691 __ pop(rax); // restore exception 1692 // Note that an "issuing PC" is actually the next PC after the call 1693 __ jmp(rbx); // jump to exception 1694 // handler of caller 1695 } 1696 1697 1698 // 1699 // JVMTI ForceEarlyReturn support 1700 // 1701 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1702 address entry = __ pc(); 1703 1704 __ restore_bcp(); 1705 __ restore_locals(); 1706 __ empty_expression_stack(); 1707 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1708 1709 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1710 NOT_LP64(__ get_thread(thread)); 1711 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1712 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1713 1714 // Clear the earlyret state 1715 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1716 1717 __ remove_activation(state, rsi, 1718 false, /* throw_monitor_exception */ 1719 false, /* install_monitor_exception */ 1720 true); /* notify_jvmdi */ 1721 __ jmp(rsi); 1722 1723 return entry; 1724 } // end of ForceEarlyReturn support 1725 1726 1727 //----------------------------------------------------------------------------- 1728 // Helper for vtos entry point generation 1729 1730 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1731 address& bep, 1732 address& cep, 1733 address& sep, 1734 address& aep, 1735 address& iep, 1736 address& lep, 1737 address& fep, 1738 address& dep, 1739 address& vep) { 1740 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1741 Label L; 1742 #ifndef _LP64 1743 fep = __ pc(); // ftos entry point 1744 __ push(ftos); 1745 __ jmpb(L); 1746 dep = __ pc(); // dtos entry point 1747 __ push(dtos); 1748 __ jmpb(L); 1749 #else 1750 fep = __ pc(); // ftos entry point 1751 __ push_f(xmm0); 1752 __ jmpb(L); 1753 dep = __ pc(); // dtos entry point 1754 __ push_d(xmm0); 1755 __ jmpb(L); 1756 #endif // _LP64 1757 lep = __ pc(); // ltos entry point 1758 __ push_l(); 1759 __ jmpb(L); 1760 aep = bep = cep = sep = iep = __ pc(); // [abcsi]tos entry point 1761 __ push_i_or_ptr(); 1762 vep = __ pc(); // vtos entry point 1763 __ bind(L); 1764 generate_and_dispatch(t); 1765 } 1766 1767 //----------------------------------------------------------------------------- 1768 1769 // Non-product code 1770 #ifndef PRODUCT 1771 1772 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1773 address entry = __ pc(); 1774 1775 #ifndef _LP64 1776 // prepare expression stack 1777 __ pop(rcx); // pop return address so expression stack is 'pure' 1778 __ push(state); // save tosca 1779 1780 // pass tosca registers as arguments & call tracer 1781 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1782 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1783 __ pop(state); // restore tosca 1784 1785 // return 1786 __ jmp(rcx); 1787 #else 1788 __ push(state); 1789 __ push(c_rarg0); 1790 __ push(c_rarg1); 1791 __ push(c_rarg2); 1792 __ push(c_rarg3); 1793 __ mov(c_rarg2, rax); // Pass itos 1794 #ifdef _WIN64 1795 __ movflt(xmm3, xmm0); // Pass ftos 1796 #endif 1797 __ call_VM(noreg, 1798 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1799 c_rarg1, c_rarg2, c_rarg3); 1800 __ pop(c_rarg3); 1801 __ pop(c_rarg2); 1802 __ pop(c_rarg1); 1803 __ pop(c_rarg0); 1804 __ pop(state); 1805 __ ret(0); // return from result handler 1806 #endif // _LP64 1807 1808 return entry; 1809 } 1810 1811 void TemplateInterpreterGenerator::count_bytecode() { 1812 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1813 } 1814 1815 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1816 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1817 } 1818 1819 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1820 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1821 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1822 __ orl(rbx, 1823 ((int) t->bytecode()) << 1824 BytecodePairHistogram::log2_number_of_codes); 1825 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1826 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1827 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1828 } 1829 1830 1831 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1832 // Call a little run-time stub to avoid blow-up for each bytecode. 1833 // The run-time runtime saves the right registers, depending on 1834 // the tosca in-state for the given template. 1835 1836 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1837 "entry must have been generated"); 1838 #ifndef _LP64 1839 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1840 #else 1841 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1842 __ andptr(rsp, -16); // align stack as required by ABI 1843 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1844 __ mov(rsp, r12); // restore sp 1845 __ reinit_heapbase(); 1846 #endif // _LP64 1847 } 1848 1849 1850 void TemplateInterpreterGenerator::stop_interpreter_at() { 1851 Label L; 1852 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1853 StopInterpreterAt); 1854 __ jcc(Assembler::notEqual, L); 1855 __ int3(); 1856 __ bind(L); 1857 } 1858 #endif // !PRODUCT