1 /* 2 * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "compiler/compiler_globals.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/bytecodeHistogram.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interpreterRuntime.hpp" 35 #include "interpreter/templateInterpreterGenerator.hpp" 36 #include "interpreter/templateTable.hpp" 37 #include "oops/arrayOop.hpp" 38 #include "oops/methodData.hpp" 39 #include "oops/method.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/inlineKlass.hpp" 42 #include "prims/jvmtiExport.hpp" 43 #include "prims/jvmtiThreadState.hpp" 44 #include "runtime/continuation.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/frame.inline.hpp" 47 #include "runtime/globals.hpp" 48 #include "runtime/jniHandles.hpp" 49 #include "runtime/sharedRuntime.hpp" 50 #include "runtime/stubRoutines.hpp" 51 #include "runtime/synchronizer.hpp" 52 #include "runtime/timer.hpp" 53 #include "runtime/vframeArray.hpp" 54 #include "utilities/debug.hpp" 55 #include "utilities/macros.hpp" 56 57 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 58 59 // Size of interpreter code. Increase if too small. Interpreter will 60 // fail with a guarantee ("not enough space for interpreter generation"); 61 // if too small. 62 // Run with +PrintInterpreter to get the VM to print out the size. 63 // Max size with JVMTI 64 #ifdef AMD64 65 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(280) NOT_JVMCI(268) * 1024; 66 #else 67 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 68 #endif // AMD64 69 70 // Global Register Names 71 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 72 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 73 74 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 75 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 76 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 77 78 79 //----------------------------------------------------------------------------- 80 81 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 82 address entry = __ pc(); 83 84 #ifdef ASSERT 85 { 86 Label L; 87 __ lea(rax, Address(rbp, 88 frame::interpreter_frame_monitor_block_top_offset * 89 wordSize)); 90 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 91 // grows negative) 92 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 93 __ stop ("interpreter frame not set up"); 94 __ bind(L); 95 } 96 #endif // ASSERT 97 // Restore bcp under the assumption that the current frame is still 98 // interpreted 99 __ restore_bcp(); 100 101 // expression stack must be empty before entering the VM if an 102 // exception happened 103 __ empty_expression_stack(); 104 // throw exception 105 __ call_VM(noreg, 106 CAST_FROM_FN_PTR(address, 107 InterpreterRuntime::throw_StackOverflowError)); 108 return entry; 109 } 110 111 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 112 address entry = __ pc(); 113 // The expression stack must be empty before entering the VM if an 114 // exception happened. 115 __ empty_expression_stack(); 116 117 // Setup parameters. 118 // ??? convention: expect aberrant index in register ebx/rbx. 119 // Pass array to create more detailed exceptions. 120 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 121 __ call_VM(noreg, 122 CAST_FROM_FN_PTR(address, 123 InterpreterRuntime:: 124 throw_ArrayIndexOutOfBoundsException), 125 rarg, rbx); 126 return entry; 127 } 128 129 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 130 address entry = __ pc(); 131 132 // object is at TOS 133 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 134 __ pop(rarg); 135 136 // expression stack must be empty before entering the VM if an 137 // exception happened 138 __ empty_expression_stack(); 139 140 __ call_VM(noreg, 141 CAST_FROM_FN_PTR(address, 142 InterpreterRuntime:: 143 throw_ClassCastException), 144 rarg); 145 return entry; 146 } 147 148 address TemplateInterpreterGenerator::generate_exception_handler_common( 149 const char* name, const char* message, bool pass_oop) { 150 assert(!pass_oop || message == NULL, "either oop or message but not both"); 151 address entry = __ pc(); 152 153 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 154 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 155 156 if (pass_oop) { 157 // object is at TOS 158 __ pop(rarg2); 159 } 160 // expression stack must be empty before entering the VM if an 161 // exception happened 162 __ empty_expression_stack(); 163 // setup parameters 164 __ lea(rarg, ExternalAddress((address)name)); 165 if (pass_oop) { 166 __ call_VM(rax, CAST_FROM_FN_PTR(address, 167 InterpreterRuntime:: 168 create_klass_exception), 169 rarg, rarg2); 170 } else { 171 __ lea(rarg2, ExternalAddress((address)message)); 172 __ call_VM(rax, 173 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 174 rarg, rarg2); 175 } 176 // throw exception 177 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 178 return entry; 179 } 180 181 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 182 address entry = __ pc(); 183 184 #ifndef _LP64 185 #ifdef COMPILER2 186 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 187 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 188 for (int i = 1; i < 8; i++) { 189 __ ffree(i); 190 } 191 } else if (UseSSE < 2) { 192 __ empty_FPU_stack(); 193 } 194 #endif // COMPILER2 195 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 196 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 197 } else { 198 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 199 } 200 201 if (state == ftos) { 202 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 203 } else if (state == dtos) { 204 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 205 } 206 #endif // _LP64 207 208 // Restore stack bottom in case i2c adjusted stack 209 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 210 // and NULL it as marker that esp is now tos until next java call 211 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 212 213 if (state == atos && InlineTypeReturnedAsFields) { 214 __ store_inline_type_fields_to_buf(NULL); 215 } 216 217 __ restore_bcp(); 218 __ restore_locals(); 219 220 if (state == atos) { 221 Register mdp = rbx; 222 Register tmp = rcx; 223 __ profile_return_type(mdp, rax, tmp); 224 } 225 226 const Register cache = rbx; 227 const Register index = rcx; 228 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 229 230 const Register flags = cache; 231 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 232 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 233 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 234 235 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 236 if (JvmtiExport::can_pop_frame()) { 237 NOT_LP64(__ get_thread(java_thread)); 238 __ check_and_handle_popframe(java_thread); 239 } 240 if (JvmtiExport::can_force_early_return()) { 241 NOT_LP64(__ get_thread(java_thread)); 242 __ check_and_handle_earlyret(java_thread); 243 } 244 245 __ dispatch_next(state, step); 246 247 return entry; 248 } 249 250 251 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 252 address entry = __ pc(); 253 254 #ifndef _LP64 255 if (state == ftos) { 256 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 257 } else if (state == dtos) { 258 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 259 } 260 #endif // _LP64 261 262 // NULL last_sp until next java call 263 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 264 __ restore_bcp(); 265 __ restore_locals(); 266 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 267 NOT_LP64(__ get_thread(thread)); 268 #if INCLUDE_JVMCI 269 // Check if we need to take lock at entry of synchronized method. This can 270 // only occur on method entry so emit it only for vtos with step 0. 271 if (EnableJVMCI && state == vtos && step == 0) { 272 Label L; 273 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 274 __ jcc(Assembler::zero, L); 275 // Clear flag. 276 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 277 // Satisfy calling convention for lock_method(). 278 __ get_method(rbx); 279 // Take lock. 280 lock_method(); 281 __ bind(L); 282 } else { 283 #ifdef ASSERT 284 if (EnableJVMCI) { 285 Label L; 286 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 287 __ jcc(Assembler::zero, L); 288 __ stop("unexpected pending monitor in deopt entry"); 289 __ bind(L); 290 } 291 #endif 292 } 293 #endif 294 // handle exceptions 295 { 296 Label L; 297 __ cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 298 __ jcc(Assembler::zero, L); 299 __ call_VM(noreg, 300 CAST_FROM_FN_PTR(address, 301 InterpreterRuntime::throw_pending_exception)); 302 __ should_not_reach_here(); 303 __ bind(L); 304 } 305 if (continuation == NULL) { 306 __ dispatch_next(state, step); 307 } else { 308 __ jump_to_entry(continuation); 309 } 310 return entry; 311 } 312 313 address TemplateInterpreterGenerator::generate_result_handler_for( 314 BasicType type) { 315 address entry = __ pc(); 316 switch (type) { 317 case T_BOOLEAN: __ c2bool(rax); break; 318 #ifndef _LP64 319 case T_CHAR : __ andptr(rax, 0xFFFF); break; 320 #else 321 case T_CHAR : __ movzwl(rax, rax); break; 322 #endif // _LP64 323 case T_BYTE : __ sign_extend_byte(rax); break; 324 case T_SHORT : __ sign_extend_short(rax); break; 325 case T_INT : /* nothing to do */ break; 326 case T_LONG : /* nothing to do */ break; 327 case T_VOID : /* nothing to do */ break; 328 #ifndef _LP64 329 case T_DOUBLE : 330 case T_FLOAT : 331 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 332 __ pop(t); // remove return address first 333 // Must return a result for interpreter or compiler. In SSE 334 // mode, results are returned in xmm0 and the FPU stack must 335 // be empty. 336 if (type == T_FLOAT && UseSSE >= 1) { 337 // Load ST0 338 __ fld_d(Address(rsp, 0)); 339 // Store as float and empty fpu stack 340 __ fstp_s(Address(rsp, 0)); 341 // and reload 342 __ movflt(xmm0, Address(rsp, 0)); 343 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 344 __ movdbl(xmm0, Address(rsp, 0)); 345 } else { 346 // restore ST0 347 __ fld_d(Address(rsp, 0)); 348 } 349 // and pop the temp 350 __ addptr(rsp, 2 * wordSize); 351 __ push(t); // restore return address 352 } 353 break; 354 #else 355 case T_FLOAT : /* nothing to do */ break; 356 case T_DOUBLE : /* nothing to do */ break; 357 #endif // _LP64 358 359 case T_PRIMITIVE_OBJECT: // fall through (inline types are handled with oops) 360 case T_OBJECT : 361 // retrieve result from frame 362 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 363 // and verify it 364 __ verify_oop(rax); 365 break; 366 default : ShouldNotReachHere(); 367 } 368 __ ret(0); // return from result handler 369 return entry; 370 } 371 372 address TemplateInterpreterGenerator::generate_safept_entry_for( 373 TosState state, 374 address runtime_entry) { 375 address entry = __ pc(); 376 377 __ push(state); 378 __ push_cont_fastpath(); 379 __ call_VM(noreg, runtime_entry); 380 __ pop_cont_fastpath(); 381 382 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 383 return entry; 384 } 385 386 387 388 // Helpers for commoning out cases in the various type of method entries. 389 // 390 391 392 // increment invocation count & check for overflow 393 // 394 // Note: checking for negative value instead of overflow 395 // so we have a 'sticky' overflow test 396 // 397 // rbx: method 398 // rcx: invocation counter 399 // 400 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { 401 Label done; 402 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 403 Label no_mdo; 404 if (ProfileInterpreter) { 405 // Are we profiling? 406 __ movptr(rax, Address(rbx, Method::method_data_offset())); 407 __ testptr(rax, rax); 408 __ jccb(Assembler::zero, no_mdo); 409 // Increment counter in the MDO 410 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 411 in_bytes(InvocationCounter::counter_offset())); 412 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 413 __ increment_mask_and_jump(mdo_invocation_counter, mask, rcx, overflow); 414 __ jmp(done); 415 } 416 __ bind(no_mdo); 417 // Increment counter in MethodCounters 418 const Address invocation_counter(rax, 419 MethodCounters::invocation_counter_offset() + 420 InvocationCounter::counter_offset()); 421 __ get_method_counters(rbx, rax, done); 422 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 423 __ increment_mask_and_jump(invocation_counter, mask, rcx, overflow); 424 __ bind(done); 425 } 426 427 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 428 429 // Asm interpreter on entry 430 // r14/rdi - locals 431 // r13/rsi - bcp 432 // rbx - method 433 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 434 // rbp - interpreter frame 435 436 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 437 // Everything as it was on entry 438 // rdx is not restored. Doesn't appear to really be set. 439 440 // InterpreterRuntime::frequency_counter_overflow takes two 441 // arguments, the first (thread) is passed by call_VM, the second 442 // indicates if the counter overflow occurs at a backwards branch 443 // (NULL bcp). We pass zero for it. The call returns the address 444 // of the verified entry point for the method or NULL if the 445 // compilation did not complete (either went background or bailed 446 // out). 447 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 448 __ movl(rarg, 0); 449 __ call_VM(noreg, 450 CAST_FROM_FN_PTR(address, 451 InterpreterRuntime::frequency_counter_overflow), 452 rarg); 453 454 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 455 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 456 // and jump to the interpreted entry. 457 __ jmp(do_continue, relocInfo::none); 458 } 459 460 // See if we've got enough room on the stack for locals plus overhead below 461 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 462 // without going through the signal handler, i.e., reserved and yellow zones 463 // will not be made usable. The shadow zone must suffice to handle the 464 // overflow. 465 // The expression stack grows down incrementally, so the normal guard 466 // page mechanism will work for that. 467 // 468 // NOTE: Since the additional locals are also always pushed (wasn't 469 // obvious in generate_fixed_frame) so the guard should work for them 470 // too. 471 // 472 // Args: 473 // rdx: number of additional locals this frame needs (what we must check) 474 // rbx: Method* 475 // 476 // Kills: 477 // rax 478 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 479 480 // monitor entry size: see picture of stack in frame_x86.hpp 481 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 482 483 // total overhead size: entry_size + (saved rbp through expr stack 484 // bottom). be sure to change this if you add/subtract anything 485 // to/from the overhead area 486 const int overhead_size = 487 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 488 489 const int page_size = os::vm_page_size(); 490 491 Label after_frame_check; 492 493 // see if the frame is greater than one page in size. If so, 494 // then we need to verify there is enough stack space remaining 495 // for the additional locals. 496 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 497 __ jcc(Assembler::belowEqual, after_frame_check); 498 499 // compute rsp as if this were going to be the last frame on 500 // the stack before the red zone 501 502 Label after_frame_check_pop; 503 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 504 #ifndef _LP64 505 __ push(thread); 506 __ get_thread(thread); 507 #endif 508 509 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 510 511 // locals + overhead, in bytes 512 __ mov(rax, rdx); 513 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 514 __ addptr(rax, overhead_size); 515 516 #ifdef ASSERT 517 Label limit_okay; 518 // Verify that thread stack overflow limit is non-zero. 519 __ cmpptr(stack_limit, NULL_WORD); 520 __ jcc(Assembler::notEqual, limit_okay); 521 __ stop("stack overflow limit is zero"); 522 __ bind(limit_okay); 523 #endif 524 525 // Add locals/frame size to stack limit. 526 __ addptr(rax, stack_limit); 527 528 // Check against the current stack bottom. 529 __ cmpptr(rsp, rax); 530 531 __ jcc(Assembler::above, after_frame_check_pop); 532 NOT_LP64(__ pop(rsi)); // get saved bcp 533 534 // Restore sender's sp as SP. This is necessary if the sender's 535 // frame is an extended compiled frame (see gen_c2i_adapter()) 536 // and safer anyway in case of JSR292 adaptations. 537 538 __ pop(rax); // return address must be moved if SP is changed 539 __ mov(rsp, rbcp); 540 __ push(rax); 541 542 // Note: the restored frame is not necessarily interpreted. 543 // Use the shared runtime version of the StackOverflowError. 544 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 545 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 546 // all done with frame size check 547 __ bind(after_frame_check_pop); 548 NOT_LP64(__ pop(rsi)); 549 550 // all done with frame size check 551 __ bind(after_frame_check); 552 } 553 554 // Allocate monitor and lock method (asm interpreter) 555 // 556 // Args: 557 // rbx: Method* 558 // r14/rdi: locals 559 // 560 // Kills: 561 // rax 562 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 563 // rscratch1, rscratch2 (scratch regs) 564 void TemplateInterpreterGenerator::lock_method() { 565 // synchronize method 566 const Address access_flags(rbx, Method::access_flags_offset()); 567 const Address monitor_block_top( 568 rbp, 569 frame::interpreter_frame_monitor_block_top_offset * wordSize); 570 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 571 572 #ifdef ASSERT 573 { 574 Label L; 575 __ movl(rax, access_flags); 576 __ testl(rax, JVM_ACC_SYNCHRONIZED); 577 __ jcc(Assembler::notZero, L); 578 __ stop("method doesn't need synchronization"); 579 __ bind(L); 580 } 581 #endif // ASSERT 582 583 // get synchronization object 584 { 585 Label done; 586 __ movl(rax, access_flags); 587 __ testl(rax, JVM_ACC_STATIC); 588 // get receiver (assume this is frequent case) 589 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 590 __ jcc(Assembler::zero, done); 591 __ load_mirror(rax, rbx, rscratch2); 592 593 #ifdef ASSERT 594 { 595 Label L; 596 __ testptr(rax, rax); 597 __ jcc(Assembler::notZero, L); 598 __ stop("synchronization object is NULL"); 599 __ bind(L); 600 } 601 #endif // ASSERT 602 603 __ bind(done); 604 } 605 606 // add space for monitor & lock 607 __ subptr(rsp, entry_size); // add space for a monitor entry 608 __ movptr(monitor_block_top, rsp); // set new monitor block top 609 // store object 610 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 611 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 612 __ movptr(lockreg, rsp); // object address 613 __ lock_object(lockreg); 614 } 615 616 // Generate a fixed interpreter frame. This is identical setup for 617 // interpreted methods and for native methods hence the shared code. 618 // 619 // Args: 620 // rax: return address 621 // rbx: Method* 622 // r14/rdi: pointer to locals 623 // r13/rsi: sender sp 624 // rdx: cp cache 625 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 626 // initialize fixed part of activation frame 627 __ push(rax); // save return address 628 __ enter(); // save old & set new rbp 629 __ push(rbcp); // set sender sp 630 __ push(NULL_WORD); // leave last_sp as null 631 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 632 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 633 __ push(rbx); // save Method* 634 // Get mirror and store it in the frame as GC root for this Method* 635 __ load_mirror(rdx, rbx, rscratch2); 636 __ push(rdx); 637 if (ProfileInterpreter) { 638 Label method_data_continue; 639 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 640 __ testptr(rdx, rdx); 641 __ jcc(Assembler::zero, method_data_continue); 642 __ addptr(rdx, in_bytes(MethodData::data_offset())); 643 __ bind(method_data_continue); 644 __ push(rdx); // set the mdp (method data pointer) 645 } else { 646 __ push(0); 647 } 648 649 __ movptr(rdx, Address(rbx, Method::const_offset())); 650 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 651 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 652 __ push(rdx); // set constant pool cache 653 __ push(rlocals); // set locals pointer 654 if (native_call) { 655 __ push(0); // no bcp 656 } else { 657 __ push(rbcp); // set bcp 658 } 659 __ push(0); // reserve word for pointer to expression stack bottom 660 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 661 } 662 663 // End of helpers 664 665 // Method entry for java.lang.ref.Reference.get. 666 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 667 // Code: _aload_0, _getfield, _areturn 668 // parameter size = 1 669 // 670 // The code that gets generated by this routine is split into 2 parts: 671 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 672 // 2. The slow path - which is an expansion of the regular method entry. 673 // 674 // Notes:- 675 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 676 // * We may jump to the slow path iff the receiver is null. If the 677 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 678 // Thus we can use the regular method entry code to generate the NPE. 679 // 680 // rbx: Method* 681 682 // r13: senderSP must preserve for slow path, set SP to it on fast path 683 684 address entry = __ pc(); 685 686 const int referent_offset = java_lang_ref_Reference::referent_offset(); 687 688 Label slow_path; 689 // rbx: method 690 691 // Check if local 0 != NULL 692 // If the receiver is null then it is OK to jump to the slow path. 693 __ movptr(rax, Address(rsp, wordSize)); 694 695 __ testptr(rax, rax); 696 __ jcc(Assembler::zero, slow_path); 697 698 // rax: local 0 699 // rbx: method (but can be used as scratch now) 700 // rdx: scratch 701 // rdi: scratch 702 703 // Preserve the sender sp in case the load barrier 704 // calls the runtime 705 NOT_LP64(__ push(rsi)); 706 707 // Load the value of the referent field. 708 const Address field_address(rax, referent_offset); 709 __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF); 710 711 // _areturn 712 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 713 NOT_LP64(__ pop(rsi)); // get sender sp 714 __ pop(rdi); // get return address 715 __ mov(rsp, sender_sp); // set sp to sender sp 716 __ jmp(rdi); 717 __ ret(0); 718 719 // generate a vanilla interpreter entry as the slow path 720 __ bind(slow_path); 721 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 722 return entry; 723 } 724 725 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 726 // See more discussion in stackOverflow.hpp. 727 728 // Note that we do the banging after the frame is setup, since the exception 729 // handling code expects to find a valid interpreter frame on the stack. 730 // Doing the banging earlier fails if the caller frame is not an interpreter 731 // frame. 732 // (Also, the exception throwing code expects to unlock any synchronized 733 // method receiver, so do the banging after locking the receiver.) 734 735 const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size()); 736 const int page_size = os::vm_page_size(); 737 const int n_shadow_pages = shadow_zone_size / page_size; 738 739 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 740 #ifndef _LP64 741 __ push(thread); 742 __ get_thread(thread); 743 #endif 744 745 #ifdef ASSERT 746 Label L_good_limit; 747 __ cmpptr(Address(thread, JavaThread::shadow_zone_safe_limit()), NULL_WORD); 748 __ jcc(Assembler::notEqual, L_good_limit); 749 __ stop("shadow zone safe limit is not initialized"); 750 __ bind(L_good_limit); 751 752 Label L_good_watermark; 753 __ cmpptr(Address(thread, JavaThread::shadow_zone_growth_watermark()), NULL_WORD); 754 __ jcc(Assembler::notEqual, L_good_watermark); 755 __ stop("shadow zone growth watermark is not initialized"); 756 __ bind(L_good_watermark); 757 #endif 758 759 Label L_done; 760 761 __ cmpptr(rsp, Address(thread, JavaThread::shadow_zone_growth_watermark())); 762 __ jcc(Assembler::above, L_done); 763 764 for (int p = 1; p <= n_shadow_pages; p++) { 765 __ bang_stack_with_offset(p*page_size); 766 } 767 768 // Record the new watermark, but only if update is above the safe limit. 769 // Otherwise, the next time around the check above would pass the safe limit. 770 __ cmpptr(rsp, Address(thread, JavaThread::shadow_zone_safe_limit())); 771 __ jccb(Assembler::belowEqual, L_done); 772 __ movptr(Address(thread, JavaThread::shadow_zone_growth_watermark()), rsp); 773 774 __ bind(L_done); 775 776 #ifndef _LP64 777 __ pop(thread); 778 #endif 779 } 780 781 // Interpreter stub for calling a native method. (asm interpreter) 782 // This sets up a somewhat different looking stack for calling the 783 // native method than the typical interpreter frame setup. 784 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 785 // determine code generation flags 786 bool inc_counter = UseCompiler || CountCompiledCalls; 787 788 // rbx: Method* 789 // rbcp: sender sp 790 791 address entry_point = __ pc(); 792 793 const Address constMethod (rbx, Method::const_offset()); 794 const Address access_flags (rbx, Method::access_flags_offset()); 795 const Address size_of_parameters(rcx, ConstMethod:: 796 size_of_parameters_offset()); 797 798 799 // get parameter size (always needed) 800 __ movptr(rcx, constMethod); 801 __ load_unsigned_short(rcx, size_of_parameters); 802 803 // native calls don't need the stack size check since they have no 804 // expression stack and the arguments are already on the stack and 805 // we only add a handful of words to the stack 806 807 // rbx: Method* 808 // rcx: size of parameters 809 // rbcp: sender sp 810 __ pop(rax); // get return address 811 812 // for natives the size of locals is zero 813 814 // compute beginning of parameters 815 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 816 817 // add 2 zero-initialized slots for native calls 818 // initialize result_handler slot 819 __ push(NULL_WORD); 820 // slot for oop temp 821 // (static native method holder mirror/jni oop result) 822 __ push(NULL_WORD); 823 824 // initialize fixed part of activation frame 825 generate_fixed_frame(true); 826 827 // make sure method is native & not abstract 828 #ifdef ASSERT 829 __ movl(rax, access_flags); 830 { 831 Label L; 832 __ testl(rax, JVM_ACC_NATIVE); 833 __ jcc(Assembler::notZero, L); 834 __ stop("tried to execute non-native method as native"); 835 __ bind(L); 836 } 837 { 838 Label L; 839 __ testl(rax, JVM_ACC_ABSTRACT); 840 __ jcc(Assembler::zero, L); 841 __ stop("tried to execute abstract method in interpreter"); 842 __ bind(L); 843 } 844 #endif 845 846 // Since at this point in the method invocation the exception handler 847 // would try to exit the monitor of synchronized methods which hasn't 848 // been entered yet, we set the thread local variable 849 // _do_not_unlock_if_synchronized to true. The remove_activation will 850 // check this flag. 851 852 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 853 NOT_LP64(__ get_thread(thread1)); 854 const Address do_not_unlock_if_synchronized(thread1, 855 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 856 __ movbool(do_not_unlock_if_synchronized, true); 857 858 // increment invocation count & check for overflow 859 Label invocation_counter_overflow; 860 if (inc_counter) { 861 generate_counter_incr(&invocation_counter_overflow); 862 } 863 864 Label continue_after_compile; 865 __ bind(continue_after_compile); 866 867 bang_stack_shadow_pages(true); 868 869 // reset the _do_not_unlock_if_synchronized flag 870 NOT_LP64(__ get_thread(thread1)); 871 __ movbool(do_not_unlock_if_synchronized, false); 872 873 // check for synchronized methods 874 // Must happen AFTER invocation_counter check and stack overflow check, 875 // so method is not locked if overflows. 876 if (synchronized) { 877 lock_method(); 878 } else { 879 // no synchronization necessary 880 #ifdef ASSERT 881 { 882 Label L; 883 __ movl(rax, access_flags); 884 __ testl(rax, JVM_ACC_SYNCHRONIZED); 885 __ jcc(Assembler::zero, L); 886 __ stop("method needs synchronization"); 887 __ bind(L); 888 } 889 #endif 890 } 891 892 // start execution 893 #ifdef ASSERT 894 { 895 Label L; 896 const Address monitor_block_top(rbp, 897 frame::interpreter_frame_monitor_block_top_offset * wordSize); 898 __ movptr(rax, monitor_block_top); 899 __ cmpptr(rax, rsp); 900 __ jcc(Assembler::equal, L); 901 __ stop("broken stack frame setup in interpreter 5"); 902 __ bind(L); 903 } 904 #endif 905 906 // jvmti support 907 __ notify_method_entry(); 908 909 // work registers 910 const Register method = rbx; 911 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 912 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 913 914 // allocate space for parameters 915 __ get_method(method); 916 __ movptr(t, Address(method, Method::const_offset())); 917 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 918 919 #ifndef _LP64 920 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 921 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 922 __ subptr(rsp, t); 923 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 924 #else 925 __ shll(t, Interpreter::logStackElementSize); 926 927 __ subptr(rsp, t); 928 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 929 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 930 #endif // _LP64 931 932 // get signature handler 933 { 934 Label L; 935 __ movptr(t, Address(method, Method::signature_handler_offset())); 936 __ testptr(t, t); 937 __ jcc(Assembler::notZero, L); 938 __ call_VM(noreg, 939 CAST_FROM_FN_PTR(address, 940 InterpreterRuntime::prepare_native_call), 941 method); 942 __ get_method(method); 943 __ movptr(t, Address(method, Method::signature_handler_offset())); 944 __ bind(L); 945 } 946 947 // call signature handler 948 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 949 "adjust this code"); 950 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 951 "adjust this code"); 952 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 953 "adjust this code"); 954 955 // The generated handlers do not touch RBX (the method). 956 // However, large signatures cannot be cached and are generated 957 // each time here. The slow-path generator can do a GC on return, 958 // so we must reload it after the call. 959 __ call(t); 960 __ get_method(method); // slow path can do a GC, reload RBX 961 962 963 // result handler is in rax 964 // set result handler 965 __ movptr(Address(rbp, 966 (frame::interpreter_frame_result_handler_offset) * wordSize), 967 rax); 968 969 // pass mirror handle if static call 970 { 971 Label L; 972 __ movl(t, Address(method, Method::access_flags_offset())); 973 __ testl(t, JVM_ACC_STATIC); 974 __ jcc(Assembler::zero, L); 975 // get mirror 976 __ load_mirror(t, method, rax); 977 // copy mirror into activation frame 978 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 979 t); 980 // pass handle to mirror 981 #ifndef _LP64 982 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 983 __ movptr(Address(rsp, wordSize), t); 984 #else 985 __ lea(c_rarg1, 986 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 987 #endif // _LP64 988 __ bind(L); 989 } 990 991 // get native function entry point 992 { 993 Label L; 994 __ movptr(rax, Address(method, Method::native_function_offset())); 995 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 996 __ cmpptr(rax, unsatisfied.addr(), rscratch1); 997 __ jcc(Assembler::notEqual, L); 998 __ call_VM(noreg, 999 CAST_FROM_FN_PTR(address, 1000 InterpreterRuntime::prepare_native_call), 1001 method); 1002 __ get_method(method); 1003 __ movptr(rax, Address(method, Method::native_function_offset())); 1004 __ bind(L); 1005 } 1006 1007 // pass JNIEnv 1008 #ifndef _LP64 1009 __ get_thread(thread); 1010 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1011 __ movptr(Address(rsp, 0), t); 1012 1013 // set_last_Java_frame_before_call 1014 // It is enough that the pc() 1015 // points into the right code segment. It does not have to be the correct return pc. 1016 __ set_last_Java_frame(thread, noreg, rbp, __ pc(), noreg); 1017 #else 1018 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1019 1020 // It is enough that the pc() points into the right code 1021 // segment. It does not have to be the correct return pc. 1022 __ set_last_Java_frame(rsp, rbp, (address) __ pc(), rscratch1); 1023 #endif // _LP64 1024 1025 // change thread state 1026 #ifdef ASSERT 1027 { 1028 Label L; 1029 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1030 __ cmpl(t, _thread_in_Java); 1031 __ jcc(Assembler::equal, L); 1032 __ stop("Wrong thread state in native stub"); 1033 __ bind(L); 1034 } 1035 #endif 1036 1037 // Change state to native 1038 1039 __ movl(Address(thread, JavaThread::thread_state_offset()), 1040 _thread_in_native); 1041 1042 // Call the native method. 1043 __ call(rax); 1044 // 32: result potentially in rdx:rax or ST0 1045 // 64: result potentially in rax or xmm0 1046 1047 // Verify or restore cpu control state after JNI call 1048 __ restore_cpu_control_state_after_jni(rscratch1); 1049 1050 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1051 // in order to extract the result of a method call. If the order of these 1052 // pushes change or anything else is added to the stack then the code in 1053 // interpreter_frame_result must also change. 1054 1055 #ifndef _LP64 1056 // save potential result in ST(0) & rdx:rax 1057 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1058 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1059 // It is safe to do this push because state is _thread_in_native and return address will be found 1060 // via _last_native_pc and not via _last_jave_sp 1061 1062 // NOTE: the order of these push(es) is known to frame::interpreter_frame_result. 1063 // If the order changes or anything else is added to the stack the code in 1064 // interpreter_frame_result will have to be changed. 1065 1066 { Label L; 1067 Label push_double; 1068 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1069 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1070 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1071 float_handler.addr(), noreg); 1072 __ jcc(Assembler::equal, push_double); 1073 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1074 double_handler.addr(), noreg); 1075 __ jcc(Assembler::notEqual, L); 1076 __ bind(push_double); 1077 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1078 __ bind(L); 1079 } 1080 #else 1081 __ push(dtos); 1082 #endif // _LP64 1083 1084 __ push(ltos); 1085 1086 // change thread state 1087 NOT_LP64(__ get_thread(thread)); 1088 __ movl(Address(thread, JavaThread::thread_state_offset()), 1089 _thread_in_native_trans); 1090 1091 // Force this write out before the read below 1092 if (!UseSystemMemoryBarrier) { 1093 __ membar(Assembler::Membar_mask_bits( 1094 Assembler::LoadLoad | Assembler::LoadStore | 1095 Assembler::StoreLoad | Assembler::StoreStore)); 1096 } 1097 #ifndef _LP64 1098 if (AlwaysRestoreFPU) { 1099 // Make sure the control word is correct. 1100 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std())); 1101 } 1102 #endif // _LP64 1103 1104 // check for safepoint operation in progress and/or pending suspend requests 1105 { 1106 Label Continue; 1107 Label slow_path; 1108 1109 __ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */); 1110 1111 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1112 __ jcc(Assembler::equal, Continue); 1113 __ bind(slow_path); 1114 1115 // Don't use call_VM as it will see a possible pending exception 1116 // and forward it and never return here preventing us from 1117 // clearing _last_native_pc down below. Also can't use 1118 // call_VM_leaf either as it will check to see if r13 & r14 are 1119 // preserved and correspond to the bcp/locals pointers. So we do a 1120 // runtime call by hand. 1121 // 1122 #ifndef _LP64 1123 __ push(thread); 1124 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1125 JavaThread::check_special_condition_for_native_trans))); 1126 __ increment(rsp, wordSize); 1127 __ get_thread(thread); 1128 #else 1129 __ mov(c_rarg0, r15_thread); 1130 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1131 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1132 __ andptr(rsp, -16); // align stack as required by ABI 1133 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1134 __ mov(rsp, r12); // restore sp 1135 __ reinit_heapbase(); 1136 #endif // _LP64 1137 __ bind(Continue); 1138 } 1139 1140 // change thread state 1141 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1142 1143 // reset_last_Java_frame 1144 __ reset_last_Java_frame(thread, true); 1145 1146 if (CheckJNICalls) { 1147 // clear_pending_jni_exception_check 1148 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1149 } 1150 1151 // reset handle block 1152 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1153 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD); 1154 1155 // If result is an oop unbox and store it in frame where gc will see it 1156 // and result handler will pick it up 1157 1158 { 1159 Label no_oop; 1160 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1161 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1162 __ jcc(Assembler::notEqual, no_oop); 1163 // retrieve result 1164 __ pop(ltos); 1165 // Unbox oop result, e.g. JNIHandles::resolve value. 1166 __ resolve_jobject(rax /* value */, 1167 thread /* thread */, 1168 t /* tmp */); 1169 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1170 // keep stack depth as expected by pushing oop which will eventually be discarded 1171 __ push(ltos); 1172 __ bind(no_oop); 1173 } 1174 1175 1176 { 1177 Label no_reguard; 1178 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1179 StackOverflow::stack_guard_yellow_reserved_disabled); 1180 __ jcc(Assembler::notEqual, no_reguard); 1181 1182 __ pusha(); // XXX only save smashed registers 1183 #ifndef _LP64 1184 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1185 __ popa(); 1186 #else 1187 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1188 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1189 __ andptr(rsp, -16); // align stack as required by ABI 1190 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1191 __ mov(rsp, r12); // restore sp 1192 __ popa(); // XXX only restore smashed registers 1193 __ reinit_heapbase(); 1194 #endif // _LP64 1195 1196 __ bind(no_reguard); 1197 } 1198 1199 1200 // The method register is junk from after the thread_in_native transition 1201 // until here. Also can't call_VM until the bcp has been 1202 // restored. Need bcp for throwing exception below so get it now. 1203 __ get_method(method); 1204 1205 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1206 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1207 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1208 1209 // handle exceptions (exception handling will handle unlocking!) 1210 { 1211 Label L; 1212 __ cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 1213 __ jcc(Assembler::zero, L); 1214 // Note: At some point we may want to unify this with the code 1215 // used in call_VM_base(); i.e., we should use the 1216 // StubRoutines::forward_exception code. For now this doesn't work 1217 // here because the rsp is not correctly set at this point. 1218 __ MacroAssembler::call_VM(noreg, 1219 CAST_FROM_FN_PTR(address, 1220 InterpreterRuntime::throw_pending_exception)); 1221 __ should_not_reach_here(); 1222 __ bind(L); 1223 } 1224 1225 // do unlocking if necessary 1226 { 1227 Label L; 1228 __ movl(t, Address(method, Method::access_flags_offset())); 1229 __ testl(t, JVM_ACC_SYNCHRONIZED); 1230 __ jcc(Assembler::zero, L); 1231 // the code below should be shared with interpreter macro 1232 // assembler implementation 1233 { 1234 Label unlock; 1235 // BasicObjectLock will be first in list, since this is a 1236 // synchronized method. However, need to check that the object 1237 // has not been unlocked by an explicit monitorexit bytecode. 1238 const Address monitor(rbp, 1239 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1240 wordSize - (int)sizeof(BasicObjectLock))); 1241 1242 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1243 1244 // monitor expect in c_rarg1 for slow unlock path 1245 __ lea(regmon, monitor); // address of first monitor 1246 1247 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1248 __ testptr(t, t); 1249 __ jcc(Assembler::notZero, unlock); 1250 1251 // Entry already unlocked, need to throw exception 1252 __ MacroAssembler::call_VM(noreg, 1253 CAST_FROM_FN_PTR(address, 1254 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1255 __ should_not_reach_here(); 1256 1257 __ bind(unlock); 1258 __ unlock_object(regmon); 1259 } 1260 __ bind(L); 1261 } 1262 1263 // jvmti support 1264 // Note: This must happen _after_ handling/throwing any exceptions since 1265 // the exception handler code notifies the runtime of method exits 1266 // too. If this happens before, method entry/exit notifications are 1267 // not properly paired (was bug - gri 11/22/99). 1268 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1269 1270 // restore potential result in edx:eax, call result handler to 1271 // restore potential result in ST0 & handle result 1272 1273 __ pop(ltos); 1274 LP64_ONLY( __ pop(dtos)); 1275 1276 __ movptr(t, Address(rbp, 1277 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1278 __ call(t); 1279 1280 // remove activation 1281 __ movptr(t, Address(rbp, 1282 frame::interpreter_frame_sender_sp_offset * 1283 wordSize)); // get sender sp 1284 __ leave(); // remove frame anchor 1285 __ pop(rdi); // get return address 1286 __ mov(rsp, t); // set sp to sender sp 1287 __ jmp(rdi); 1288 1289 if (inc_counter) { 1290 // Handle overflow of counter and compile method 1291 __ bind(invocation_counter_overflow); 1292 generate_counter_overflow(continue_after_compile); 1293 } 1294 1295 return entry_point; 1296 } 1297 1298 // Abstract method entry 1299 // Attempt to execute abstract method. Throw exception 1300 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1301 1302 address entry_point = __ pc(); 1303 1304 // abstract method entry 1305 1306 // pop return address, reset last_sp to NULL 1307 __ empty_expression_stack(); 1308 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1309 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1310 1311 // throw exception 1312 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx); 1313 // the call_VM checks for exception, so we should never return here. 1314 __ should_not_reach_here(); 1315 1316 return entry_point; 1317 } 1318 1319 // 1320 // Generic interpreted method entry to (asm) interpreter 1321 // 1322 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1323 // determine code generation flags 1324 bool inc_counter = UseCompiler || CountCompiledCalls; 1325 1326 // ebx: Method* 1327 // rbcp: sender sp (set in InterpreterMacroAssembler::prepare_to_jump_from_interpreted / generate_call_stub) 1328 address entry_point = __ pc(); 1329 1330 const Address constMethod(rbx, Method::const_offset()); 1331 const Address access_flags(rbx, Method::access_flags_offset()); 1332 const Address size_of_parameters(rdx, 1333 ConstMethod::size_of_parameters_offset()); 1334 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1335 1336 1337 // get parameter size (always needed) 1338 __ movptr(rdx, constMethod); 1339 __ load_unsigned_short(rcx, size_of_parameters); 1340 1341 // rbx: Method* 1342 // rcx: size of parameters 1343 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1344 1345 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1346 __ subl(rdx, rcx); // rdx = no. of additional locals 1347 1348 // YYY 1349 // __ incrementl(rdx); 1350 // __ andl(rdx, -2); 1351 1352 // see if we've got enough room on the stack for locals plus overhead. 1353 generate_stack_overflow_check(); 1354 1355 // get return address 1356 __ pop(rax); 1357 1358 // compute beginning of parameters 1359 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1360 1361 // rdx - # of additional locals 1362 // allocate space for locals 1363 // explicitly initialize locals 1364 { 1365 Label exit, loop; 1366 __ testl(rdx, rdx); 1367 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1368 __ bind(loop); 1369 __ push(NULL_WORD); // initialize local variables 1370 __ decrementl(rdx); // until everything initialized 1371 __ jcc(Assembler::greater, loop); 1372 __ bind(exit); 1373 } 1374 1375 // initialize fixed part of activation frame 1376 generate_fixed_frame(false); 1377 1378 // make sure method is not native & not abstract 1379 #ifdef ASSERT 1380 __ movl(rax, access_flags); 1381 { 1382 Label L; 1383 __ testl(rax, JVM_ACC_NATIVE); 1384 __ jcc(Assembler::zero, L); 1385 __ stop("tried to execute native method as non-native"); 1386 __ bind(L); 1387 } 1388 { 1389 Label L; 1390 __ testl(rax, JVM_ACC_ABSTRACT); 1391 __ jcc(Assembler::zero, L); 1392 __ stop("tried to execute abstract method in interpreter"); 1393 __ bind(L); 1394 } 1395 #endif 1396 1397 // Since at this point in the method invocation the exception 1398 // handler would try to exit the monitor of synchronized methods 1399 // which hasn't been entered yet, we set the thread local variable 1400 // _do_not_unlock_if_synchronized to true. The remove_activation 1401 // will check this flag. 1402 1403 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1404 NOT_LP64(__ get_thread(thread)); 1405 const Address do_not_unlock_if_synchronized(thread, 1406 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1407 __ movbool(do_not_unlock_if_synchronized, true); 1408 1409 __ profile_parameters_type(rax, rcx, rdx); 1410 // increment invocation count & check for overflow 1411 Label invocation_counter_overflow; 1412 if (inc_counter) { 1413 generate_counter_incr(&invocation_counter_overflow); 1414 } 1415 1416 Label continue_after_compile; 1417 __ bind(continue_after_compile); 1418 1419 // check for synchronized interpreted methods 1420 bang_stack_shadow_pages(false); 1421 1422 // reset the _do_not_unlock_if_synchronized flag 1423 NOT_LP64(__ get_thread(thread)); 1424 __ movbool(do_not_unlock_if_synchronized, false); 1425 1426 // check for synchronized methods 1427 // Must happen AFTER invocation_counter check and stack overflow check, 1428 // so method is not locked if overflows. 1429 if (synchronized) { 1430 // Allocate monitor and lock method 1431 lock_method(); 1432 } else { 1433 // no synchronization necessary 1434 #ifdef ASSERT 1435 { 1436 Label L; 1437 __ movl(rax, access_flags); 1438 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1439 __ jcc(Assembler::zero, L); 1440 __ stop("method needs synchronization"); 1441 __ bind(L); 1442 } 1443 #endif 1444 } 1445 1446 // start execution 1447 #ifdef ASSERT 1448 { 1449 Label L; 1450 const Address monitor_block_top (rbp, 1451 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1452 __ movptr(rax, monitor_block_top); 1453 __ cmpptr(rax, rsp); 1454 __ jcc(Assembler::equal, L); 1455 __ stop("broken stack frame setup in interpreter 6"); 1456 __ bind(L); 1457 } 1458 #endif 1459 1460 // jvmti support 1461 __ notify_method_entry(); 1462 1463 __ dispatch_next(vtos); 1464 1465 // invocation counter overflow 1466 if (inc_counter) { 1467 // Handle overflow of counter and compile method 1468 __ bind(invocation_counter_overflow); 1469 generate_counter_overflow(continue_after_compile); 1470 } 1471 1472 return entry_point; 1473 } 1474 1475 //----------------------------------------------------------------------------- 1476 // Exceptions 1477 1478 void TemplateInterpreterGenerator::generate_throw_exception() { 1479 // Entry point in previous activation (i.e., if the caller was 1480 // interpreted) 1481 Interpreter::_rethrow_exception_entry = __ pc(); 1482 // Restore sp to interpreter_frame_last_sp even though we are going 1483 // to empty the expression stack for the exception processing. 1484 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1485 // rax: exception 1486 // rdx: return address/pc that threw exception 1487 __ restore_bcp(); // r13/rsi points to call/send 1488 __ restore_locals(); 1489 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1490 // Entry point for exceptions thrown within interpreter code 1491 Interpreter::_throw_exception_entry = __ pc(); 1492 // expression stack is undefined here 1493 // rax: exception 1494 // r13/rsi: exception bcp 1495 __ verify_oop(rax); 1496 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1497 LP64_ONLY(__ mov(c_rarg1, rax)); 1498 1499 // expression stack must be empty before entering the VM in case of 1500 // an exception 1501 __ empty_expression_stack(); 1502 // find exception handler address and preserve exception oop 1503 __ call_VM(rdx, 1504 CAST_FROM_FN_PTR(address, 1505 InterpreterRuntime::exception_handler_for_exception), 1506 rarg); 1507 // rax: exception handler entry point 1508 // rdx: preserved exception oop 1509 // r13/rsi: bcp for exception handler 1510 __ push_ptr(rdx); // push exception which is now the only value on the stack 1511 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1512 1513 // If the exception is not handled in the current frame the frame is 1514 // removed and the exception is rethrown (i.e. exception 1515 // continuation is _rethrow_exception). 1516 // 1517 // Note: At this point the bci is still the bxi for the instruction 1518 // which caused the exception and the expression stack is 1519 // empty. Thus, for any VM calls at this point, GC will find a legal 1520 // oop map (with empty expression stack). 1521 1522 // In current activation 1523 // tos: exception 1524 // esi: exception bcp 1525 1526 // 1527 // JVMTI PopFrame support 1528 // 1529 1530 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1531 __ empty_expression_stack(); 1532 // Set the popframe_processing bit in pending_popframe_condition 1533 // indicating that we are currently handling popframe, so that 1534 // call_VMs that may happen later do not trigger new popframe 1535 // handling cycles. 1536 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1537 NOT_LP64(__ get_thread(thread)); 1538 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1539 __ orl(rdx, JavaThread::popframe_processing_bit); 1540 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1541 1542 { 1543 // Check to see whether we are returning to a deoptimized frame. 1544 // (The PopFrame call ensures that the caller of the popped frame is 1545 // either interpreted or compiled and deoptimizes it if compiled.) 1546 // In this case, we can't call dispatch_next() after the frame is 1547 // popped, but instead must save the incoming arguments and restore 1548 // them after deoptimization has occurred. 1549 // 1550 // Note that we don't compare the return PC against the 1551 // deoptimization blob's unpack entry because of the presence of 1552 // adapter frames in C2. 1553 Label caller_not_deoptimized; 1554 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1555 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1556 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1557 InterpreterRuntime::interpreter_contains), rarg); 1558 __ testl(rax, rax); 1559 __ jcc(Assembler::notZero, caller_not_deoptimized); 1560 1561 // Compute size of arguments for saving when returning to 1562 // deoptimized caller 1563 __ get_method(rax); 1564 __ movptr(rax, Address(rax, Method::const_offset())); 1565 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1566 size_of_parameters_offset()))); 1567 __ shll(rax, Interpreter::logStackElementSize); 1568 __ restore_locals(); 1569 __ subptr(rlocals, rax); 1570 __ addptr(rlocals, wordSize); 1571 // Save these arguments 1572 NOT_LP64(__ get_thread(thread)); 1573 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1574 Deoptimization:: 1575 popframe_preserve_args), 1576 thread, rax, rlocals); 1577 1578 __ remove_activation(vtos, rdx, 1579 /* throw_monitor_exception */ false, 1580 /* install_monitor_exception */ false, 1581 /* notify_jvmdi */ false); 1582 1583 // Inform deoptimization that it is responsible for restoring 1584 // these arguments 1585 NOT_LP64(__ get_thread(thread)); 1586 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1587 JavaThread::popframe_force_deopt_reexecution_bit); 1588 1589 // Continue in deoptimization handler 1590 __ jmp(rdx); 1591 1592 __ bind(caller_not_deoptimized); 1593 } 1594 1595 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1596 /* throw_monitor_exception */ false, 1597 /* install_monitor_exception */ false, 1598 /* notify_jvmdi */ false); 1599 1600 // Finish with popframe handling 1601 // A previous I2C followed by a deoptimization might have moved the 1602 // outgoing arguments further up the stack. PopFrame expects the 1603 // mutations to those outgoing arguments to be preserved and other 1604 // constraints basically require this frame to look exactly as 1605 // though it had previously invoked an interpreted activation with 1606 // no space between the top of the expression stack (current 1607 // last_sp) and the top of stack. Rather than force deopt to 1608 // maintain this kind of invariant all the time we call a small 1609 // fixup routine to move the mutated arguments onto the top of our 1610 // expression stack if necessary. 1611 #ifndef _LP64 1612 __ mov(rax, rsp); 1613 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1614 __ get_thread(thread); 1615 // PC must point into interpreter here 1616 __ set_last_Java_frame(thread, noreg, rbp, __ pc(), noreg); 1617 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1618 __ get_thread(thread); 1619 #else 1620 __ mov(c_rarg1, rsp); 1621 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1622 // PC must point into interpreter here 1623 __ set_last_Java_frame(noreg, rbp, __ pc(), rscratch1); 1624 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1625 #endif 1626 __ reset_last_Java_frame(thread, true); 1627 1628 // Restore the last_sp and null it out 1629 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1630 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1631 1632 __ restore_bcp(); 1633 __ restore_locals(); 1634 // The method data pointer was incremented already during 1635 // call profiling. We have to restore the mdp for the current bcp. 1636 if (ProfileInterpreter) { 1637 __ set_method_data_pointer_for_bcp(); 1638 } 1639 1640 // Clear the popframe condition flag 1641 NOT_LP64(__ get_thread(thread)); 1642 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1643 JavaThread::popframe_inactive); 1644 1645 #if INCLUDE_JVMTI 1646 { 1647 Label L_done; 1648 const Register local0 = rlocals; 1649 1650 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1651 __ jcc(Assembler::notEqual, L_done); 1652 1653 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1654 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1655 1656 __ get_method(rdx); 1657 __ movptr(rax, Address(local0, 0)); 1658 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1659 1660 __ testptr(rax, rax); 1661 __ jcc(Assembler::zero, L_done); 1662 1663 __ movptr(Address(rbx, 0), rax); 1664 __ bind(L_done); 1665 } 1666 #endif // INCLUDE_JVMTI 1667 1668 __ dispatch_next(vtos); 1669 // end of PopFrame support 1670 1671 Interpreter::_remove_activation_entry = __ pc(); 1672 1673 // preserve exception over this code sequence 1674 __ pop_ptr(rax); 1675 NOT_LP64(__ get_thread(thread)); 1676 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1677 // remove the activation (without doing throws on illegalMonitorExceptions) 1678 __ remove_activation(vtos, rdx, false, true, false); 1679 // restore exception 1680 NOT_LP64(__ get_thread(thread)); 1681 __ get_vm_result(rax, thread); 1682 1683 // In between activations - previous activation type unknown yet 1684 // compute continuation point - the continuation point expects the 1685 // following registers set up: 1686 // 1687 // rax: exception 1688 // rdx: return address/pc that threw exception 1689 // rsp: expression stack of caller 1690 // rbp: ebp of caller 1691 __ push(rax); // save exception 1692 __ push(rdx); // save return address 1693 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1694 SharedRuntime::exception_handler_for_return_address), 1695 thread, rdx); 1696 __ mov(rbx, rax); // save exception handler 1697 __ pop(rdx); // restore return address 1698 __ pop(rax); // restore exception 1699 // Note that an "issuing PC" is actually the next PC after the call 1700 __ jmp(rbx); // jump to exception 1701 // handler of caller 1702 } 1703 1704 1705 // 1706 // JVMTI ForceEarlyReturn support 1707 // 1708 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1709 address entry = __ pc(); 1710 1711 __ restore_bcp(); 1712 __ restore_locals(); 1713 __ empty_expression_stack(); 1714 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1715 1716 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1717 NOT_LP64(__ get_thread(thread)); 1718 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1719 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1720 1721 // Clear the earlyret state 1722 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1723 1724 __ remove_activation(state, rsi, 1725 false, /* throw_monitor_exception */ 1726 false, /* install_monitor_exception */ 1727 true); /* notify_jvmdi */ 1728 __ jmp(rsi); 1729 1730 return entry; 1731 } // end of ForceEarlyReturn support 1732 1733 1734 //----------------------------------------------------------------------------- 1735 // Helper for vtos entry point generation 1736 1737 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1738 address& bep, 1739 address& cep, 1740 address& sep, 1741 address& aep, 1742 address& iep, 1743 address& lep, 1744 address& fep, 1745 address& dep, 1746 address& vep) { 1747 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1748 Label L; 1749 #ifndef _LP64 1750 fep = __ pc(); // ftos entry point 1751 __ push(ftos); 1752 __ jmpb(L); 1753 dep = __ pc(); // dtos entry point 1754 __ push(dtos); 1755 __ jmpb(L); 1756 #else 1757 fep = __ pc(); // ftos entry point 1758 __ push_f(xmm0); 1759 __ jmpb(L); 1760 dep = __ pc(); // dtos entry point 1761 __ push_d(xmm0); 1762 __ jmpb(L); 1763 #endif // _LP64 1764 lep = __ pc(); // ltos entry point 1765 __ push_l(); 1766 __ jmpb(L); 1767 aep = bep = cep = sep = iep = __ pc(); // [abcsi]tos entry point 1768 __ push_i_or_ptr(); 1769 vep = __ pc(); // vtos entry point 1770 __ bind(L); 1771 generate_and_dispatch(t); 1772 } 1773 1774 //----------------------------------------------------------------------------- 1775 1776 // Non-product code 1777 #ifndef PRODUCT 1778 1779 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1780 address entry = __ pc(); 1781 1782 #ifndef _LP64 1783 // prepare expression stack 1784 __ pop(rcx); // pop return address so expression stack is 'pure' 1785 __ push(state); // save tosca 1786 1787 // pass tosca registers as arguments & call tracer 1788 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1789 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1790 __ pop(state); // restore tosca 1791 1792 // return 1793 __ jmp(rcx); 1794 #else 1795 __ push(state); 1796 __ push(c_rarg0); 1797 __ push(c_rarg1); 1798 __ push(c_rarg2); 1799 __ push(c_rarg3); 1800 __ mov(c_rarg2, rax); // Pass itos 1801 #ifdef _WIN64 1802 __ movflt(xmm3, xmm0); // Pass ftos 1803 #endif 1804 __ call_VM(noreg, 1805 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1806 c_rarg1, c_rarg2, c_rarg3); 1807 __ pop(c_rarg3); 1808 __ pop(c_rarg2); 1809 __ pop(c_rarg1); 1810 __ pop(c_rarg0); 1811 __ pop(state); 1812 __ ret(0); // return from result handler 1813 #endif // _LP64 1814 1815 return entry; 1816 } 1817 1818 void TemplateInterpreterGenerator::count_bytecode() { 1819 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value), rscratch1); 1820 } 1821 1822 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1823 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]), rscratch1); 1824 } 1825 1826 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1827 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1828 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1829 __ orl(rbx, 1830 ((int) t->bytecode()) << 1831 BytecodePairHistogram::log2_number_of_codes); 1832 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx, rscratch1); 1833 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1834 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1835 } 1836 1837 1838 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1839 // Call a little run-time stub to avoid blow-up for each bytecode. 1840 // The run-time runtime saves the right registers, depending on 1841 // the tosca in-state for the given template. 1842 1843 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1844 "entry must have been generated"); 1845 #ifndef _LP64 1846 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1847 #else 1848 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1849 __ andptr(rsp, -16); // align stack as required by ABI 1850 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1851 __ mov(rsp, r12); // restore sp 1852 __ reinit_heapbase(); 1853 #endif // _LP64 1854 } 1855 1856 1857 void TemplateInterpreterGenerator::stop_interpreter_at() { 1858 Label L; 1859 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1860 StopInterpreterAt, 1861 rscratch1); 1862 __ jcc(Assembler::notEqual, L); 1863 __ int3(); 1864 __ bind(L); 1865 } 1866 #endif // !PRODUCT