1 /* 2 * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "asm/macroAssembler.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "compiler/compiler_globals.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "gc/shared/barrierSetAssembler.hpp" 30 #include "interpreter/bytecodeHistogram.hpp" 31 #include "interpreter/interp_masm.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "interpreter/templateInterpreterGenerator.hpp" 35 #include "interpreter/templateTable.hpp" 36 #include "oops/arrayOop.hpp" 37 #include "oops/methodCounters.hpp" 38 #include "oops/methodData.hpp" 39 #include "oops/method.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/resolvedIndyEntry.hpp" 42 #include "oops/resolvedMethodEntry.hpp" 43 #include "prims/jvmtiExport.hpp" 44 #include "prims/jvmtiThreadState.hpp" 45 #include "runtime/continuation.hpp" 46 #include "runtime/deoptimization.hpp" 47 #include "runtime/frame.inline.hpp" 48 #include "runtime/globals.hpp" 49 #include "runtime/jniHandles.hpp" 50 #include "runtime/sharedRuntime.hpp" 51 #include "runtime/stubRoutines.hpp" 52 #include "runtime/synchronizer.hpp" 53 #include "runtime/timer.hpp" 54 #include "runtime/vframeArray.hpp" 55 #include "utilities/checkedCast.hpp" 56 #include "utilities/debug.hpp" 57 #include "utilities/macros.hpp" 58 59 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 60 61 // Size of interpreter code. Increase if too small. Interpreter will 62 // fail with a guarantee ("not enough space for interpreter generation"); 63 // if too small. 64 // Run with +PrintInterpreter to get the VM to print out the size. 65 // Max size with JVMTI 66 #ifdef AMD64 67 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; 68 #else 69 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 70 #endif // AMD64 71 72 // Global Register Names 73 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 74 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 75 76 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 77 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 78 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 79 80 81 //----------------------------------------------------------------------------- 82 83 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 84 address entry = __ pc(); 85 86 #ifdef ASSERT 87 { 88 Label L; 89 __ movptr(rax, Address(rbp, 90 frame::interpreter_frame_monitor_block_top_offset * 91 wordSize)); 92 __ lea(rax, Address(rbp, rax, Address::times_ptr)); 93 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 94 // grows negative) 95 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 96 __ stop ("interpreter frame not set up"); 97 __ bind(L); 98 } 99 #endif // ASSERT 100 // Restore bcp under the assumption that the current frame is still 101 // interpreted 102 __ restore_bcp(); 103 104 // expression stack must be empty before entering the VM if an 105 // exception happened 106 __ empty_expression_stack(); 107 // throw exception 108 __ call_VM(noreg, 109 CAST_FROM_FN_PTR(address, 110 InterpreterRuntime::throw_StackOverflowError)); 111 return entry; 112 } 113 114 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 115 address entry = __ pc(); 116 // The expression stack must be empty before entering the VM if an 117 // exception happened. 118 __ empty_expression_stack(); 119 120 // Setup parameters. 121 // ??? convention: expect aberrant index in register ebx/rbx. 122 // Pass array to create more detailed exceptions. 123 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 124 __ call_VM(noreg, 125 CAST_FROM_FN_PTR(address, 126 InterpreterRuntime:: 127 throw_ArrayIndexOutOfBoundsException), 128 rarg, rbx); 129 return entry; 130 } 131 132 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 133 address entry = __ pc(); 134 135 // object is at TOS 136 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 137 __ pop(rarg); 138 139 // expression stack must be empty before entering the VM if an 140 // exception happened 141 __ empty_expression_stack(); 142 143 __ call_VM(noreg, 144 CAST_FROM_FN_PTR(address, 145 InterpreterRuntime:: 146 throw_ClassCastException), 147 rarg); 148 return entry; 149 } 150 151 address TemplateInterpreterGenerator::generate_exception_handler_common( 152 const char* name, const char* message, bool pass_oop) { 153 assert(!pass_oop || message == nullptr, "either oop or message but not both"); 154 address entry = __ pc(); 155 156 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 157 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 158 159 if (pass_oop) { 160 // object is at TOS 161 __ pop(rarg2); 162 } 163 // expression stack must be empty before entering the VM if an 164 // exception happened 165 __ empty_expression_stack(); 166 // setup parameters 167 __ lea(rarg, ExternalAddress((address)name)); 168 if (pass_oop) { 169 __ call_VM(rax, CAST_FROM_FN_PTR(address, 170 InterpreterRuntime:: 171 create_klass_exception), 172 rarg, rarg2); 173 } else { 174 __ lea(rarg2, ExternalAddress((address)message)); 175 __ call_VM(rax, 176 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 177 rarg, rarg2); 178 } 179 // throw exception 180 __ jump(RuntimeAddress(Interpreter::throw_exception_entry())); 181 return entry; 182 } 183 184 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 185 address entry = __ pc(); 186 187 #ifndef _LP64 188 #ifdef COMPILER2 189 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 190 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 191 for (int i = 1; i < 8; i++) { 192 __ ffree(i); 193 } 194 } else if (UseSSE < 2) { 195 __ empty_FPU_stack(); 196 } 197 #endif // COMPILER2 198 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 199 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 200 } else { 201 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 202 } 203 204 if (state == ftos) { 205 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 206 } else if (state == dtos) { 207 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 208 } 209 #endif // _LP64 210 211 // Restore stack bottom in case i2c adjusted stack 212 __ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 213 __ lea(rsp, Address(rbp, rcx, Address::times_ptr)); 214 // and null it as marker that esp is now tos until next java call 215 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 216 217 __ restore_bcp(); 218 __ restore_locals(); 219 220 if (state == atos) { 221 Register mdp = rbx; 222 Register tmp = rcx; 223 __ profile_return_type(mdp, rax, tmp); 224 } 225 226 const Register cache = rbx; 227 const Register index = rcx; 228 if (index_size == sizeof(u4)) { 229 __ load_resolved_indy_entry(cache, index); 230 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset()))); 231 __ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale())); 232 } else { 233 assert(index_size == sizeof(u2), "Can only be u2"); 234 __ load_method_entry(cache, index); 235 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset()))); 236 __ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale())); 237 } 238 239 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 240 if (JvmtiExport::can_pop_frame()) { 241 NOT_LP64(__ get_thread(java_thread)); 242 __ check_and_handle_popframe(java_thread); 243 } 244 if (JvmtiExport::can_force_early_return()) { 245 NOT_LP64(__ get_thread(java_thread)); 246 __ check_and_handle_earlyret(java_thread); 247 } 248 249 __ dispatch_next(state, step); 250 251 return entry; 252 } 253 254 255 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 256 address entry = __ pc(); 257 258 #ifndef _LP64 259 if (state == ftos) { 260 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 261 } else if (state == dtos) { 262 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 263 } 264 #endif // _LP64 265 266 // null last_sp until next java call 267 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 268 __ restore_bcp(); 269 __ restore_locals(); 270 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 271 NOT_LP64(__ get_thread(thread)); 272 #if INCLUDE_JVMCI 273 // Check if we need to take lock at entry of synchronized method. This can 274 // only occur on method entry so emit it only for vtos with step 0. 275 if (EnableJVMCI && state == vtos && step == 0) { 276 Label L; 277 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 278 __ jcc(Assembler::zero, L); 279 // Clear flag. 280 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 281 // Satisfy calling convention for lock_method(). 282 __ get_method(rbx); 283 // Take lock. 284 lock_method(); 285 __ bind(L); 286 } else { 287 #ifdef ASSERT 288 if (EnableJVMCI) { 289 Label L; 290 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 291 __ jcc(Assembler::zero, L); 292 __ stop("unexpected pending monitor in deopt entry"); 293 __ bind(L); 294 } 295 #endif 296 } 297 #endif 298 // handle exceptions 299 { 300 Label L; 301 __ cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 302 __ jcc(Assembler::zero, L); 303 __ call_VM(noreg, 304 CAST_FROM_FN_PTR(address, 305 InterpreterRuntime::throw_pending_exception)); 306 __ should_not_reach_here(); 307 __ bind(L); 308 } 309 if (continuation == nullptr) { 310 __ dispatch_next(state, step); 311 } else { 312 __ jump_to_entry(continuation); 313 } 314 return entry; 315 } 316 317 address TemplateInterpreterGenerator::generate_result_handler_for( 318 BasicType type) { 319 address entry = __ pc(); 320 switch (type) { 321 case T_BOOLEAN: __ c2bool(rax); break; 322 #ifndef _LP64 323 case T_CHAR : __ andptr(rax, 0xFFFF); break; 324 #else 325 case T_CHAR : __ movzwl(rax, rax); break; 326 #endif // _LP64 327 case T_BYTE : __ sign_extend_byte(rax); break; 328 case T_SHORT : __ sign_extend_short(rax); break; 329 case T_INT : /* nothing to do */ break; 330 case T_LONG : /* nothing to do */ break; 331 case T_VOID : /* nothing to do */ break; 332 #ifndef _LP64 333 case T_DOUBLE : 334 case T_FLOAT : 335 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 336 __ pop(t); // remove return address first 337 // Must return a result for interpreter or compiler. In SSE 338 // mode, results are returned in xmm0 and the FPU stack must 339 // be empty. 340 if (type == T_FLOAT && UseSSE >= 1) { 341 // Load ST0 342 __ fld_d(Address(rsp, 0)); 343 // Store as float and empty fpu stack 344 __ fstp_s(Address(rsp, 0)); 345 // and reload 346 __ movflt(xmm0, Address(rsp, 0)); 347 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 348 __ movdbl(xmm0, Address(rsp, 0)); 349 } else { 350 // restore ST0 351 __ fld_d(Address(rsp, 0)); 352 } 353 // and pop the temp 354 __ addptr(rsp, 2 * wordSize); 355 __ push(t); // restore return address 356 } 357 break; 358 #else 359 case T_FLOAT : /* nothing to do */ break; 360 case T_DOUBLE : /* nothing to do */ break; 361 #endif // _LP64 362 363 case T_OBJECT : 364 // retrieve result from frame 365 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 366 // and verify it 367 __ verify_oop(rax); 368 break; 369 default : ShouldNotReachHere(); 370 } 371 __ ret(0); // return from result handler 372 return entry; 373 } 374 375 address TemplateInterpreterGenerator::generate_safept_entry_for( 376 TosState state, 377 address runtime_entry) { 378 address entry = __ pc(); 379 380 __ push(state); 381 __ push_cont_fastpath(); 382 __ call_VM(noreg, runtime_entry); 383 __ pop_cont_fastpath(); 384 385 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 386 return entry; 387 } 388 389 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() { 390 if (!Continuations::enabled()) return nullptr; 391 address start = __ pc(); 392 393 __ restore_bcp(); 394 __ restore_locals(); 395 396 // Get return address before adjusting rsp 397 __ movptr(rax, Address(rsp, 0)); 398 399 // Restore stack bottom 400 __ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 401 __ lea(rsp, Address(rbp, rcx, Address::times_ptr)); 402 // and null it as marker that esp is now tos until next java call 403 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 404 405 __ jmp(rax); 406 407 return start; 408 } 409 410 411 // Helpers for commoning out cases in the various type of method entries. 412 // 413 414 415 // increment invocation count & check for overflow 416 // 417 // Note: checking for negative value instead of overflow 418 // so we have a 'sticky' overflow test 419 // 420 // rbx: method 421 // rcx: invocation counter 422 // 423 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { 424 Label done; 425 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 426 Label no_mdo; 427 if (ProfileInterpreter) { 428 // Are we profiling? 429 __ movptr(rax, Address(rbx, Method::method_data_offset())); 430 __ testptr(rax, rax); 431 __ jccb(Assembler::zero, no_mdo); 432 // Increment counter in the MDO 433 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 434 in_bytes(InvocationCounter::counter_offset())); 435 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 436 __ increment_mask_and_jump(mdo_invocation_counter, mask, rcx, overflow); 437 __ jmp(done); 438 } 439 __ bind(no_mdo); 440 // Increment counter in MethodCounters 441 const Address invocation_counter(rax, 442 MethodCounters::invocation_counter_offset() + 443 InvocationCounter::counter_offset()); 444 __ get_method_counters(rbx, rax, done); 445 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 446 __ increment_mask_and_jump(invocation_counter, mask, rcx, overflow); 447 __ bind(done); 448 } 449 450 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 451 452 // Asm interpreter on entry 453 // r14/rdi - locals 454 // r13/rsi - bcp 455 // rbx - method 456 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 457 // rbp - interpreter frame 458 459 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 460 // Everything as it was on entry 461 // rdx is not restored. Doesn't appear to really be set. 462 463 // InterpreterRuntime::frequency_counter_overflow takes two 464 // arguments, the first (thread) is passed by call_VM, the second 465 // indicates if the counter overflow occurs at a backwards branch 466 // (null bcp). We pass zero for it. The call returns the address 467 // of the verified entry point for the method or null if the 468 // compilation did not complete (either went background or bailed 469 // out). 470 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 471 __ movl(rarg, 0); 472 __ call_VM(noreg, 473 CAST_FROM_FN_PTR(address, 474 InterpreterRuntime::frequency_counter_overflow), 475 rarg); 476 477 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 478 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 479 // and jump to the interpreted entry. 480 __ jmp(do_continue, relocInfo::none); 481 } 482 483 // See if we've got enough room on the stack for locals plus overhead below 484 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 485 // without going through the signal handler, i.e., reserved and yellow zones 486 // will not be made usable. The shadow zone must suffice to handle the 487 // overflow. 488 // The expression stack grows down incrementally, so the normal guard 489 // page mechanism will work for that. 490 // 491 // NOTE: Since the additional locals are also always pushed (wasn't 492 // obvious in generate_fixed_frame) so the guard should work for them 493 // too. 494 // 495 // Args: 496 // rdx: number of additional locals this frame needs (what we must check) 497 // rbx: Method* 498 // 499 // Kills: 500 // rax 501 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 502 503 // monitor entry size: see picture of stack in frame_x86.hpp 504 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 505 506 // total overhead size: entry_size + (saved rbp through expr stack 507 // bottom). be sure to change this if you add/subtract anything 508 // to/from the overhead area 509 const int overhead_size = 510 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 511 512 const int page_size = (int)os::vm_page_size(); 513 514 Label after_frame_check; 515 516 // see if the frame is greater than one page in size. If so, 517 // then we need to verify there is enough stack space remaining 518 // for the additional locals. 519 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 520 __ jcc(Assembler::belowEqual, after_frame_check); 521 522 // compute rsp as if this were going to be the last frame on 523 // the stack before the red zone 524 525 Label after_frame_check_pop; 526 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 527 #ifndef _LP64 528 __ push(thread); 529 __ get_thread(thread); 530 #endif 531 532 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 533 534 // locals + overhead, in bytes 535 __ mov(rax, rdx); 536 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 537 __ addptr(rax, overhead_size); 538 539 #ifdef ASSERT 540 Label limit_okay; 541 // Verify that thread stack overflow limit is non-zero. 542 __ cmpptr(stack_limit, NULL_WORD); 543 __ jcc(Assembler::notEqual, limit_okay); 544 __ stop("stack overflow limit is zero"); 545 __ bind(limit_okay); 546 #endif 547 548 // Add locals/frame size to stack limit. 549 __ addptr(rax, stack_limit); 550 551 // Check against the current stack bottom. 552 __ cmpptr(rsp, rax); 553 554 __ jcc(Assembler::above, after_frame_check_pop); 555 NOT_LP64(__ pop(rsi)); // get saved bcp 556 557 // Restore sender's sp as SP. This is necessary if the sender's 558 // frame is an extended compiled frame (see gen_c2i_adapter()) 559 // and safer anyway in case of JSR292 adaptations. 560 561 __ pop(rax); // return address must be moved if SP is changed 562 __ mov(rsp, rbcp); 563 __ push(rax); 564 565 // Note: the restored frame is not necessarily interpreted. 566 // Use the shared runtime version of the StackOverflowError. 567 assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); 568 __ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); 569 // all done with frame size check 570 __ bind(after_frame_check_pop); 571 NOT_LP64(__ pop(rsi)); 572 573 // all done with frame size check 574 __ bind(after_frame_check); 575 } 576 577 // Allocate monitor and lock method (asm interpreter) 578 // 579 // Args: 580 // rbx: Method* 581 // r14/rdi: locals 582 // 583 // Kills: 584 // rax 585 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 586 // rscratch1, rscratch2 (scratch regs) 587 void TemplateInterpreterGenerator::lock_method() { 588 // synchronize method 589 const Address access_flags(rbx, Method::access_flags_offset()); 590 const Address monitor_block_top( 591 rbp, 592 frame::interpreter_frame_monitor_block_top_offset * wordSize); 593 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 594 595 #ifdef ASSERT 596 { 597 Label L; 598 __ load_unsigned_short(rax, access_flags); 599 __ testl(rax, JVM_ACC_SYNCHRONIZED); 600 __ jcc(Assembler::notZero, L); 601 __ stop("method doesn't need synchronization"); 602 __ bind(L); 603 } 604 #endif // ASSERT 605 606 // get synchronization object 607 { 608 Label done; 609 __ load_unsigned_short(rax, access_flags); 610 __ testl(rax, JVM_ACC_STATIC); 611 // get receiver (assume this is frequent case) 612 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 613 __ jcc(Assembler::zero, done); 614 __ load_mirror(rax, rbx, rscratch2); 615 616 #ifdef ASSERT 617 { 618 Label L; 619 __ testptr(rax, rax); 620 __ jcc(Assembler::notZero, L); 621 __ stop("synchronization object is null"); 622 __ bind(L); 623 } 624 #endif // ASSERT 625 626 __ bind(done); 627 } 628 629 // add space for monitor & lock 630 __ subptr(rsp, entry_size); // add space for a monitor entry 631 __ subptr(monitor_block_top, entry_size / wordSize); // set new monitor block top 632 // store object 633 __ movptr(Address(rsp, BasicObjectLock::obj_offset()), rax); 634 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 635 __ movptr(lockreg, rsp); // object address 636 __ lock_object(lockreg); 637 } 638 639 // Generate a fixed interpreter frame. This is identical setup for 640 // interpreted methods and for native methods hence the shared code. 641 // 642 // Args: 643 // rax: return address 644 // rbx: Method* 645 // r14/rdi: pointer to locals 646 // r13/rsi: sender sp 647 // rdx: cp cache 648 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 649 // initialize fixed part of activation frame 650 __ push(rax); // save return address 651 __ enter(); // save old & set new rbp 652 __ push(rbcp); // set sender sp 653 __ push(NULL_WORD); // leave last_sp as null 654 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 655 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 656 __ push(rbx); // save Method* 657 // Get mirror and store it in the frame as GC root for this Method* 658 __ load_mirror(rdx, rbx, rscratch2); 659 __ push(rdx); 660 if (ProfileInterpreter) { 661 Label method_data_continue; 662 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 663 __ testptr(rdx, rdx); 664 __ jcc(Assembler::zero, method_data_continue); 665 __ addptr(rdx, in_bytes(MethodData::data_offset())); 666 __ bind(method_data_continue); 667 __ push(rdx); // set the mdp (method data pointer) 668 } else { 669 __ push(0); 670 } 671 672 __ movptr(rdx, Address(rbx, Method::const_offset())); 673 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 674 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset())); 675 __ push(rdx); // set constant pool cache 676 677 __ movptr(rax, rlocals); 678 __ subptr(rax, rbp); 679 __ shrptr(rax, Interpreter::logStackElementSize); // rax = rlocals - fp(); 680 __ push(rax); // set relativized rlocals, see frame::interpreter_frame_locals() 681 682 if (native_call) { 683 __ push(0); // no bcp 684 } else { 685 __ push(rbcp); // set bcp 686 } 687 // initialize relativized pointer to expression stack bottom 688 __ push(frame::interpreter_frame_initial_sp_offset); 689 } 690 691 // End of helpers 692 693 // Method entry for java.lang.ref.Reference.get. 694 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 695 // Code: _aload_0, _getfield, _areturn 696 // parameter size = 1 697 // 698 // The code that gets generated by this routine is split into 2 parts: 699 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 700 // 2. The slow path - which is an expansion of the regular method entry. 701 // 702 // Notes:- 703 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 704 // * We may jump to the slow path iff the receiver is null. If the 705 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 706 // Thus we can use the regular method entry code to generate the NPE. 707 // 708 // rbx: Method* 709 710 // r13: senderSP must preserve for slow path, set SP to it on fast path 711 712 address entry = __ pc(); 713 714 const int referent_offset = java_lang_ref_Reference::referent_offset(); 715 716 Label slow_path; 717 // rbx: method 718 719 // Check if local 0 != null 720 // If the receiver is null then it is OK to jump to the slow path. 721 __ movptr(rax, Address(rsp, wordSize)); 722 723 __ testptr(rax, rax); 724 __ jcc(Assembler::zero, slow_path); 725 726 // rax: local 0 727 // rbx: method (but can be used as scratch now) 728 // rdx: scratch 729 // rdi: scratch 730 731 // Preserve the sender sp in case the load barrier 732 // calls the runtime 733 NOT_LP64(__ push(rsi)); 734 735 // Load the value of the referent field. 736 const Address field_address(rax, referent_offset); 737 __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF); 738 739 // _areturn 740 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 741 NOT_LP64(__ pop(rsi)); // get sender sp 742 __ pop(rdi); // get return address 743 __ mov(rsp, sender_sp); // set sp to sender sp 744 __ jmp(rdi); 745 __ ret(0); 746 747 // generate a vanilla interpreter entry as the slow path 748 __ bind(slow_path); 749 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 750 return entry; 751 } 752 753 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 754 // See more discussion in stackOverflow.hpp. 755 756 // Note that we do the banging after the frame is setup, since the exception 757 // handling code expects to find a valid interpreter frame on the stack. 758 // Doing the banging earlier fails if the caller frame is not an interpreter 759 // frame. 760 // (Also, the exception throwing code expects to unlock any synchronized 761 // method receiver, so do the banging after locking the receiver.) 762 763 const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size()); 764 const int page_size = (int)os::vm_page_size(); 765 const int n_shadow_pages = shadow_zone_size / page_size; 766 767 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 768 #ifndef _LP64 769 __ push(thread); 770 __ get_thread(thread); 771 #endif 772 773 #ifdef ASSERT 774 Label L_good_limit; 775 __ cmpptr(Address(thread, JavaThread::shadow_zone_safe_limit()), NULL_WORD); 776 __ jcc(Assembler::notEqual, L_good_limit); 777 __ stop("shadow zone safe limit is not initialized"); 778 __ bind(L_good_limit); 779 780 Label L_good_watermark; 781 __ cmpptr(Address(thread, JavaThread::shadow_zone_growth_watermark()), NULL_WORD); 782 __ jcc(Assembler::notEqual, L_good_watermark); 783 __ stop("shadow zone growth watermark is not initialized"); 784 __ bind(L_good_watermark); 785 #endif 786 787 Label L_done; 788 789 __ cmpptr(rsp, Address(thread, JavaThread::shadow_zone_growth_watermark())); 790 __ jcc(Assembler::above, L_done); 791 792 for (int p = 1; p <= n_shadow_pages; p++) { 793 __ bang_stack_with_offset(p*page_size); 794 } 795 796 // Record the new watermark, but only if update is above the safe limit. 797 // Otherwise, the next time around the check above would pass the safe limit. 798 __ cmpptr(rsp, Address(thread, JavaThread::shadow_zone_safe_limit())); 799 __ jccb(Assembler::belowEqual, L_done); 800 __ movptr(Address(thread, JavaThread::shadow_zone_growth_watermark()), rsp); 801 802 __ bind(L_done); 803 804 #ifndef _LP64 805 __ pop(thread); 806 #endif 807 } 808 809 // Interpreter stub for calling a native method. (asm interpreter) 810 // This sets up a somewhat different looking stack for calling the 811 // native method than the typical interpreter frame setup. 812 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) { 813 // determine code generation flags 814 bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly; 815 816 // rbx: Method* 817 // rbcp: sender sp 818 819 address entry_point = __ pc(); 820 821 const Address constMethod (rbx, Method::const_offset()); 822 const Address access_flags (rbx, Method::access_flags_offset()); 823 const Address size_of_parameters(rcx, ConstMethod:: 824 size_of_parameters_offset()); 825 826 827 // get parameter size (always needed) 828 __ movptr(rcx, constMethod); 829 __ load_unsigned_short(rcx, size_of_parameters); 830 831 // native calls don't need the stack size check since they have no 832 // expression stack and the arguments are already on the stack and 833 // we only add a handful of words to the stack 834 835 // rbx: Method* 836 // rcx: size of parameters 837 // rbcp: sender sp 838 __ pop(rax); // get return address 839 840 // for natives the size of locals is zero 841 842 // compute beginning of parameters 843 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 844 845 // add 2 zero-initialized slots for native calls 846 // initialize result_handler slot 847 __ push(NULL_WORD); 848 // slot for oop temp 849 // (static native method holder mirror/jni oop result) 850 __ push(NULL_WORD); 851 852 // initialize fixed part of activation frame 853 generate_fixed_frame(true); 854 855 // make sure method is native & not abstract 856 #ifdef ASSERT 857 __ load_unsigned_short(rax, access_flags); 858 { 859 Label L; 860 __ testl(rax, JVM_ACC_NATIVE); 861 __ jcc(Assembler::notZero, L); 862 __ stop("tried to execute non-native method as native"); 863 __ bind(L); 864 } 865 { 866 Label L; 867 __ testl(rax, JVM_ACC_ABSTRACT); 868 __ jcc(Assembler::zero, L); 869 __ stop("tried to execute abstract method in interpreter"); 870 __ bind(L); 871 } 872 #endif 873 874 // Since at this point in the method invocation the exception handler 875 // would try to exit the monitor of synchronized methods which hasn't 876 // been entered yet, we set the thread local variable 877 // _do_not_unlock_if_synchronized to true. The remove_activation will 878 // check this flag. 879 880 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 881 NOT_LP64(__ get_thread(thread1)); 882 const Address do_not_unlock_if_synchronized(thread1, 883 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 884 __ movbool(do_not_unlock_if_synchronized, true); 885 886 // increment invocation count & check for overflow 887 Label invocation_counter_overflow; 888 if (inc_counter) { 889 generate_counter_incr(&invocation_counter_overflow); 890 } 891 892 Label continue_after_compile; 893 __ bind(continue_after_compile); 894 895 bang_stack_shadow_pages(true); 896 897 // reset the _do_not_unlock_if_synchronized flag 898 NOT_LP64(__ get_thread(thread1)); 899 __ movbool(do_not_unlock_if_synchronized, false); 900 901 // check for synchronized methods 902 // Must happen AFTER invocation_counter check and stack overflow check, 903 // so method is not locked if overflows. 904 if (synchronized) { 905 lock_method(); 906 } else { 907 // no synchronization necessary 908 #ifdef ASSERT 909 { 910 Label L; 911 __ load_unsigned_short(rax, access_flags); 912 __ testl(rax, JVM_ACC_SYNCHRONIZED); 913 __ jcc(Assembler::zero, L); 914 __ stop("method needs synchronization"); 915 __ bind(L); 916 } 917 #endif 918 } 919 920 // start execution 921 #ifdef ASSERT 922 { 923 Label L; 924 const Address monitor_block_top(rbp, 925 frame::interpreter_frame_monitor_block_top_offset * wordSize); 926 __ movptr(rax, monitor_block_top); 927 __ lea(rax, Address(rbp, rax, Address::times_ptr)); 928 __ cmpptr(rax, rsp); 929 __ jcc(Assembler::equal, L); 930 __ stop("broken stack frame setup in interpreter 5"); 931 __ bind(L); 932 } 933 #endif 934 935 // jvmti support 936 __ notify_method_entry(); 937 938 // runtime upcalls 939 if (runtime_upcalls) { 940 __ generate_runtime_upcalls_on_method_entry(); 941 } 942 943 // work registers 944 const Register method = rbx; 945 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 946 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 947 948 // allocate space for parameters 949 __ get_method(method); 950 __ movptr(t, Address(method, Method::const_offset())); 951 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 952 953 #ifndef _LP64 954 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 955 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 956 __ subptr(rsp, t); 957 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 958 #else 959 __ shll(t, Interpreter::logStackElementSize); 960 961 __ subptr(rsp, t); 962 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 963 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 964 #endif // _LP64 965 966 // get signature handler 967 { 968 Label L; 969 __ movptr(t, Address(method, Method::signature_handler_offset())); 970 __ testptr(t, t); 971 __ jcc(Assembler::notZero, L); 972 __ call_VM(noreg, 973 CAST_FROM_FN_PTR(address, 974 InterpreterRuntime::prepare_native_call), 975 method); 976 __ get_method(method); 977 __ movptr(t, Address(method, Method::signature_handler_offset())); 978 __ bind(L); 979 } 980 981 // call signature handler 982 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 983 "adjust this code"); 984 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 985 "adjust this code"); 986 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 987 "adjust this code"); 988 989 // The generated handlers do not touch RBX (the method). 990 // However, large signatures cannot be cached and are generated 991 // each time here. The slow-path generator can do a GC on return, 992 // so we must reload it after the call. 993 __ call(t); 994 __ get_method(method); // slow path can do a GC, reload RBX 995 996 997 // result handler is in rax 998 // set result handler 999 __ movptr(Address(rbp, 1000 (frame::interpreter_frame_result_handler_offset) * wordSize), 1001 rax); 1002 1003 // pass mirror handle if static call 1004 { 1005 Label L; 1006 __ load_unsigned_short(t, Address(method, Method::access_flags_offset())); 1007 __ testl(t, JVM_ACC_STATIC); 1008 __ jcc(Assembler::zero, L); 1009 // get mirror 1010 __ load_mirror(t, method, rax); 1011 // copy mirror into activation frame 1012 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1013 t); 1014 // pass handle to mirror 1015 #ifndef _LP64 1016 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1017 __ movptr(Address(rsp, wordSize), t); 1018 #else 1019 __ lea(c_rarg1, 1020 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1021 #endif // _LP64 1022 __ bind(L); 1023 } 1024 1025 // get native function entry point 1026 { 1027 Label L; 1028 __ movptr(rax, Address(method, Method::native_function_offset())); 1029 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1030 __ cmpptr(rax, unsatisfied.addr(), rscratch1); 1031 __ jcc(Assembler::notEqual, L); 1032 __ call_VM(noreg, 1033 CAST_FROM_FN_PTR(address, 1034 InterpreterRuntime::prepare_native_call), 1035 method); 1036 __ get_method(method); 1037 __ movptr(rax, Address(method, Method::native_function_offset())); 1038 __ bind(L); 1039 } 1040 1041 // pass JNIEnv 1042 #ifndef _LP64 1043 __ get_thread(thread); 1044 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1045 __ movptr(Address(rsp, 0), t); 1046 1047 // set_last_Java_frame_before_call 1048 // It is enough that the pc() 1049 // points into the right code segment. It does not have to be the correct return pc. 1050 __ set_last_Java_frame(thread, noreg, rbp, __ pc(), noreg); 1051 #else 1052 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1053 1054 // It is enough that the pc() points into the right code 1055 // segment. It does not have to be the correct return pc. 1056 // For convenience we use the pc we want to resume to in 1057 // case of preemption on Object.wait. 1058 Label native_return; 1059 __ set_last_Java_frame(rsp, rbp, native_return, rscratch1); 1060 #endif // _LP64 1061 1062 // change thread state 1063 #ifdef ASSERT 1064 { 1065 Label L; 1066 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1067 __ cmpl(t, _thread_in_Java); 1068 __ jcc(Assembler::equal, L); 1069 __ stop("Wrong thread state in native stub"); 1070 __ bind(L); 1071 } 1072 #endif 1073 1074 // Change state to native 1075 1076 __ movl(Address(thread, JavaThread::thread_state_offset()), 1077 _thread_in_native); 1078 1079 __ push_cont_fastpath(); 1080 1081 // Call the native method. 1082 __ call(rax); 1083 // 32: result potentially in rdx:rax or ST0 1084 // 64: result potentially in rax or xmm0 1085 1086 __ pop_cont_fastpath(); 1087 1088 // Verify or restore cpu control state after JNI call 1089 __ restore_cpu_control_state_after_jni(rscratch1); 1090 1091 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1092 // in order to extract the result of a method call. If the order of these 1093 // pushes change or anything else is added to the stack then the code in 1094 // interpreter_frame_result must also change. 1095 1096 #ifndef _LP64 1097 // save potential result in ST(0) & rdx:rax 1098 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1099 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1100 // It is safe to do this push because state is _thread_in_native and return address will be found 1101 // via _last_native_pc and not via _last_jave_sp 1102 1103 // NOTE: the order of these push(es) is known to frame::interpreter_frame_result. 1104 // If the order changes or anything else is added to the stack the code in 1105 // interpreter_frame_result will have to be changed. 1106 1107 { Label L; 1108 Label push_double; 1109 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1110 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1111 __ cmpptr(Address(rbp, (frame::interpreter_frame_result_handler_offset)*wordSize), 1112 float_handler.addr(), noreg); 1113 __ jcc(Assembler::equal, push_double); 1114 __ cmpptr(Address(rbp, (frame::interpreter_frame_result_handler_offset)*wordSize), 1115 double_handler.addr(), noreg); 1116 __ jcc(Assembler::notEqual, L); 1117 __ bind(push_double); 1118 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1119 __ bind(L); 1120 } 1121 #else 1122 __ push(dtos); 1123 #endif // _LP64 1124 1125 __ push(ltos); 1126 1127 // change thread state 1128 NOT_LP64(__ get_thread(thread)); 1129 __ movl(Address(thread, JavaThread::thread_state_offset()), 1130 _thread_in_native_trans); 1131 1132 // Force this write out before the read below 1133 if (!UseSystemMemoryBarrier) { 1134 __ membar(Assembler::Membar_mask_bits( 1135 Assembler::LoadLoad | Assembler::LoadStore | 1136 Assembler::StoreLoad | Assembler::StoreStore)); 1137 } 1138 #ifndef _LP64 1139 if (AlwaysRestoreFPU) { 1140 // Make sure the control word is correct. 1141 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std())); 1142 } 1143 #endif // _LP64 1144 1145 // check for safepoint operation in progress and/or pending suspend requests 1146 { 1147 Label Continue; 1148 Label slow_path; 1149 1150 __ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */); 1151 1152 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1153 __ jcc(Assembler::equal, Continue); 1154 __ bind(slow_path); 1155 1156 // Don't use call_VM as it will see a possible pending exception 1157 // and forward it and never return here preventing us from 1158 // clearing _last_native_pc down below. Also can't use 1159 // call_VM_leaf either as it will check to see if r13 & r14 are 1160 // preserved and correspond to the bcp/locals pointers. So we do a 1161 // runtime call by hand. 1162 // 1163 #ifndef _LP64 1164 __ push(thread); 1165 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1166 JavaThread::check_special_condition_for_native_trans))); 1167 __ increment(rsp, wordSize); 1168 __ get_thread(thread); 1169 #else 1170 __ mov(c_rarg0, r15_thread); 1171 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1172 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1173 __ andptr(rsp, -16); // align stack as required by ABI 1174 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1175 __ mov(rsp, r12); // restore sp 1176 __ reinit_heapbase(); 1177 #endif // _LP64 1178 __ bind(Continue); 1179 } 1180 1181 // change thread state 1182 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1183 1184 #ifdef _LP64 1185 if (LockingMode != LM_LEGACY) { 1186 // Check preemption for Object.wait() 1187 Label not_preempted; 1188 __ movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset())); 1189 __ cmpptr(rscratch1, NULL_WORD); 1190 __ jccb(Assembler::equal, not_preempted); 1191 __ movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD); 1192 __ jmp(rscratch1); 1193 __ bind(native_return); 1194 __ restore_after_resume(true /* is_native */); 1195 __ bind(not_preempted); 1196 } else { 1197 // any pc will do so just use this one for LM_LEGACY to keep code together. 1198 __ bind(native_return); 1199 } 1200 #endif // _LP64 1201 1202 // reset_last_Java_frame 1203 __ reset_last_Java_frame(thread, true); 1204 1205 if (CheckJNICalls) { 1206 // clear_pending_jni_exception_check 1207 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1208 } 1209 1210 // reset handle block 1211 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1212 __ movl(Address(t, JNIHandleBlock::top_offset()), NULL_WORD); 1213 1214 // If result is an oop unbox and store it in frame where gc will see it 1215 // and result handler will pick it up 1216 1217 { 1218 Label no_oop; 1219 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1220 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1221 __ jcc(Assembler::notEqual, no_oop); 1222 // retrieve result 1223 __ pop(ltos); 1224 // Unbox oop result, e.g. JNIHandles::resolve value. 1225 __ resolve_jobject(rax /* value */, 1226 thread /* thread */, 1227 t /* tmp */); 1228 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1229 // keep stack depth as expected by pushing oop which will eventually be discarded 1230 __ push(ltos); 1231 __ bind(no_oop); 1232 } 1233 1234 1235 { 1236 Label no_reguard; 1237 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1238 StackOverflow::stack_guard_yellow_reserved_disabled); 1239 __ jcc(Assembler::notEqual, no_reguard); 1240 1241 __ pusha(); // XXX only save smashed registers 1242 #ifndef _LP64 1243 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1244 __ popa(); 1245 #else 1246 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1247 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1248 __ andptr(rsp, -16); // align stack as required by ABI 1249 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1250 __ mov(rsp, r12); // restore sp 1251 __ popa(); // XXX only restore smashed registers 1252 __ reinit_heapbase(); 1253 #endif // _LP64 1254 1255 __ bind(no_reguard); 1256 } 1257 1258 1259 // The method register is junk from after the thread_in_native transition 1260 // until here. Also can't call_VM until the bcp has been 1261 // restored. Need bcp for throwing exception below so get it now. 1262 __ get_method(method); 1263 1264 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1265 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1266 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1267 1268 // handle exceptions (exception handling will handle unlocking!) 1269 { 1270 Label L; 1271 __ cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 1272 __ jcc(Assembler::zero, L); 1273 // Note: At some point we may want to unify this with the code 1274 // used in call_VM_base(); i.e., we should use the 1275 // StubRoutines::forward_exception code. For now this doesn't work 1276 // here because the rsp is not correctly set at this point. 1277 __ MacroAssembler::call_VM(noreg, 1278 CAST_FROM_FN_PTR(address, 1279 InterpreterRuntime::throw_pending_exception)); 1280 __ should_not_reach_here(); 1281 __ bind(L); 1282 } 1283 1284 // do unlocking if necessary 1285 { 1286 Label L; 1287 __ load_unsigned_short(t, Address(method, Method::access_flags_offset())); 1288 __ testl(t, JVM_ACC_SYNCHRONIZED); 1289 __ jcc(Assembler::zero, L); 1290 // the code below should be shared with interpreter macro 1291 // assembler implementation 1292 { 1293 Label unlock; 1294 // BasicObjectLock will be first in list, since this is a 1295 // synchronized method. However, need to check that the object 1296 // has not been unlocked by an explicit monitorexit bytecode. 1297 const Address monitor(rbp, 1298 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1299 wordSize - (int)sizeof(BasicObjectLock))); 1300 1301 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1302 1303 // monitor expect in c_rarg1 for slow unlock path 1304 __ lea(regmon, monitor); // address of first monitor 1305 1306 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset())); 1307 __ testptr(t, t); 1308 __ jcc(Assembler::notZero, unlock); 1309 1310 // Entry already unlocked, need to throw exception 1311 __ MacroAssembler::call_VM(noreg, 1312 CAST_FROM_FN_PTR(address, 1313 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1314 __ should_not_reach_here(); 1315 1316 __ bind(unlock); 1317 __ unlock_object(regmon); 1318 } 1319 __ bind(L); 1320 } 1321 1322 // jvmti support 1323 // Note: This must happen _after_ handling/throwing any exceptions since 1324 // the exception handler code notifies the runtime of method exits 1325 // too. If this happens before, method entry/exit notifications are 1326 // not properly paired (was bug - gri 11/22/99). 1327 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1328 1329 // restore potential result in edx:eax, call result handler to 1330 // restore potential result in ST0 & handle result 1331 1332 __ pop(ltos); 1333 LP64_ONLY( __ pop(dtos)); 1334 1335 __ movptr(t, Address(rbp, 1336 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1337 __ call(t); 1338 1339 // remove activation 1340 __ movptr(t, Address(rbp, 1341 frame::interpreter_frame_sender_sp_offset * 1342 wordSize)); // get sender sp 1343 __ leave(); // remove frame anchor 1344 __ pop(rdi); // get return address 1345 __ mov(rsp, t); // set sp to sender sp 1346 __ jmp(rdi); 1347 1348 if (inc_counter) { 1349 // Handle overflow of counter and compile method 1350 __ bind(invocation_counter_overflow); 1351 generate_counter_overflow(continue_after_compile); 1352 } 1353 1354 return entry_point; 1355 } 1356 1357 // Abstract method entry 1358 // Attempt to execute abstract method. Throw exception 1359 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1360 1361 address entry_point = __ pc(); 1362 1363 // abstract method entry 1364 1365 // pop return address, reset last_sp to null 1366 __ empty_expression_stack(); 1367 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1368 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1369 1370 // throw exception 1371 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx); 1372 // the call_VM checks for exception, so we should never return here. 1373 __ should_not_reach_here(); 1374 1375 return entry_point; 1376 } 1377 1378 // 1379 // Generic interpreted method entry to (asm) interpreter 1380 // 1381 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) { 1382 // determine code generation flags 1383 bool inc_counter = (UseCompiler || CountCompiledCalls) && !PreloadOnly; 1384 1385 // ebx: Method* 1386 // rbcp: sender sp (set in InterpreterMacroAssembler::prepare_to_jump_from_interpreted / generate_call_stub) 1387 address entry_point = __ pc(); 1388 1389 const Address constMethod(rbx, Method::const_offset()); 1390 const Address access_flags(rbx, Method::access_flags_offset()); 1391 const Address size_of_parameters(rdx, 1392 ConstMethod::size_of_parameters_offset()); 1393 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1394 1395 1396 // get parameter size (always needed) 1397 __ movptr(rdx, constMethod); 1398 __ load_unsigned_short(rcx, size_of_parameters); 1399 1400 // rbx: Method* 1401 // rcx: size of parameters 1402 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1403 1404 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1405 __ subl(rdx, rcx); // rdx = no. of additional locals 1406 1407 // YYY 1408 // __ incrementl(rdx); 1409 // __ andl(rdx, -2); 1410 1411 // see if we've got enough room on the stack for locals plus overhead. 1412 generate_stack_overflow_check(); 1413 1414 // get return address 1415 __ pop(rax); 1416 1417 // compute beginning of parameters 1418 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1419 1420 // rdx - # of additional locals 1421 // allocate space for locals 1422 // explicitly initialize locals 1423 { 1424 Label exit, loop; 1425 __ testl(rdx, rdx); 1426 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1427 __ bind(loop); 1428 __ push(NULL_WORD); // initialize local variables 1429 __ decrementl(rdx); // until everything initialized 1430 __ jcc(Assembler::greater, loop); 1431 __ bind(exit); 1432 } 1433 1434 // initialize fixed part of activation frame 1435 generate_fixed_frame(false); 1436 1437 // make sure method is not native & not abstract 1438 #ifdef ASSERT 1439 __ load_unsigned_short(rax, access_flags); 1440 { 1441 Label L; 1442 __ testl(rax, JVM_ACC_NATIVE); 1443 __ jcc(Assembler::zero, L); 1444 __ stop("tried to execute native method as non-native"); 1445 __ bind(L); 1446 } 1447 { 1448 Label L; 1449 __ testl(rax, JVM_ACC_ABSTRACT); 1450 __ jcc(Assembler::zero, L); 1451 __ stop("tried to execute abstract method in interpreter"); 1452 __ bind(L); 1453 } 1454 #endif 1455 1456 // Since at this point in the method invocation the exception 1457 // handler would try to exit the monitor of synchronized methods 1458 // which hasn't been entered yet, we set the thread local variable 1459 // _do_not_unlock_if_synchronized to true. The remove_activation 1460 // will check this flag. 1461 1462 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1463 NOT_LP64(__ get_thread(thread)); 1464 const Address do_not_unlock_if_synchronized(thread, 1465 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1466 __ movbool(do_not_unlock_if_synchronized, true); 1467 1468 __ profile_parameters_type(rax, rcx, rdx); 1469 // increment invocation count & check for overflow 1470 Label invocation_counter_overflow; 1471 if (inc_counter) { 1472 generate_counter_incr(&invocation_counter_overflow); 1473 } 1474 1475 Label continue_after_compile; 1476 __ bind(continue_after_compile); 1477 1478 // check for synchronized interpreted methods 1479 bang_stack_shadow_pages(false); 1480 1481 // reset the _do_not_unlock_if_synchronized flag 1482 NOT_LP64(__ get_thread(thread)); 1483 __ movbool(do_not_unlock_if_synchronized, false); 1484 1485 // check for synchronized methods 1486 // Must happen AFTER invocation_counter check and stack overflow check, 1487 // so method is not locked if overflows. 1488 if (synchronized) { 1489 // Allocate monitor and lock method 1490 lock_method(); 1491 } else { 1492 // no synchronization necessary 1493 #ifdef ASSERT 1494 { 1495 Label L; 1496 __ load_unsigned_short(rax, access_flags); 1497 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1498 __ jcc(Assembler::zero, L); 1499 __ stop("method needs synchronization"); 1500 __ bind(L); 1501 } 1502 #endif 1503 } 1504 1505 // start execution 1506 #ifdef ASSERT 1507 { 1508 Label L; 1509 const Address monitor_block_top (rbp, 1510 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1511 __ movptr(rax, monitor_block_top); 1512 __ lea(rax, Address(rbp, rax, Address::times_ptr)); 1513 __ cmpptr(rax, rsp); 1514 __ jcc(Assembler::equal, L); 1515 __ stop("broken stack frame setup in interpreter 6"); 1516 __ bind(L); 1517 } 1518 #endif 1519 1520 // jvmti support 1521 __ notify_method_entry(); 1522 1523 if (runtime_upcalls) { 1524 __ generate_runtime_upcalls_on_method_entry(); 1525 } 1526 1527 __ dispatch_next(vtos); 1528 1529 // invocation counter overflow 1530 if (inc_counter) { 1531 // Handle overflow of counter and compile method 1532 __ bind(invocation_counter_overflow); 1533 generate_counter_overflow(continue_after_compile); 1534 } 1535 1536 return entry_point; 1537 } 1538 1539 //----------------------------------------------------------------------------- 1540 // Exceptions 1541 1542 void TemplateInterpreterGenerator::generate_throw_exception() { 1543 // Entry point in previous activation (i.e., if the caller was 1544 // interpreted) 1545 Interpreter::_rethrow_exception_entry = __ pc(); 1546 // Restore sp to interpreter_frame_last_sp even though we are going 1547 // to empty the expression stack for the exception processing. 1548 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1549 // rax: exception 1550 // rdx: return address/pc that threw exception 1551 __ restore_bcp(); // r13/rsi points to call/send 1552 __ restore_locals(); 1553 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1554 // Entry point for exceptions thrown within interpreter code 1555 Interpreter::_throw_exception_entry = __ pc(); 1556 // expression stack is undefined here 1557 // rax: exception 1558 // r13/rsi: exception bcp 1559 __ verify_oop(rax); 1560 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1561 LP64_ONLY(__ mov(c_rarg1, rax)); 1562 1563 // expression stack must be empty before entering the VM in case of 1564 // an exception 1565 __ empty_expression_stack(); 1566 // find exception handler address and preserve exception oop 1567 __ call_VM(rdx, 1568 CAST_FROM_FN_PTR(address, 1569 InterpreterRuntime::exception_handler_for_exception), 1570 rarg); 1571 // rax: exception handler entry point 1572 // rdx: preserved exception oop 1573 // r13/rsi: bcp for exception handler 1574 __ push_ptr(rdx); // push exception which is now the only value on the stack 1575 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1576 1577 // If the exception is not handled in the current frame the frame is 1578 // removed and the exception is rethrown (i.e. exception 1579 // continuation is _rethrow_exception). 1580 // 1581 // Note: At this point the bci is still the bxi for the instruction 1582 // which caused the exception and the expression stack is 1583 // empty. Thus, for any VM calls at this point, GC will find a legal 1584 // oop map (with empty expression stack). 1585 1586 // In current activation 1587 // tos: exception 1588 // esi: exception bcp 1589 1590 // 1591 // JVMTI PopFrame support 1592 // 1593 1594 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1595 __ empty_expression_stack(); 1596 // Set the popframe_processing bit in pending_popframe_condition 1597 // indicating that we are currently handling popframe, so that 1598 // call_VMs that may happen later do not trigger new popframe 1599 // handling cycles. 1600 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1601 NOT_LP64(__ get_thread(thread)); 1602 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1603 __ orl(rdx, JavaThread::popframe_processing_bit); 1604 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1605 1606 { 1607 // Check to see whether we are returning to a deoptimized frame. 1608 // (The PopFrame call ensures that the caller of the popped frame is 1609 // either interpreted or compiled and deoptimizes it if compiled.) 1610 // In this case, we can't call dispatch_next() after the frame is 1611 // popped, but instead must save the incoming arguments and restore 1612 // them after deoptimization has occurred. 1613 // 1614 // Note that we don't compare the return PC against the 1615 // deoptimization blob's unpack entry because of the presence of 1616 // adapter frames in C2. 1617 Label caller_not_deoptimized; 1618 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1619 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1620 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1621 InterpreterRuntime::interpreter_contains), rarg); 1622 __ testl(rax, rax); 1623 __ jcc(Assembler::notZero, caller_not_deoptimized); 1624 1625 // Compute size of arguments for saving when returning to 1626 // deoptimized caller 1627 __ get_method(rax); 1628 __ movptr(rax, Address(rax, Method::const_offset())); 1629 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1630 size_of_parameters_offset()))); 1631 __ shll(rax, Interpreter::logStackElementSize); 1632 __ restore_locals(); 1633 __ subptr(rlocals, rax); 1634 __ addptr(rlocals, wordSize); 1635 // Save these arguments 1636 NOT_LP64(__ get_thread(thread)); 1637 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1638 Deoptimization:: 1639 popframe_preserve_args), 1640 thread, rax, rlocals); 1641 1642 __ remove_activation(vtos, rdx, 1643 /* throw_monitor_exception */ false, 1644 /* install_monitor_exception */ false, 1645 /* notify_jvmdi */ false); 1646 1647 // Inform deoptimization that it is responsible for restoring 1648 // these arguments 1649 NOT_LP64(__ get_thread(thread)); 1650 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1651 JavaThread::popframe_force_deopt_reexecution_bit); 1652 1653 // Continue in deoptimization handler 1654 __ jmp(rdx); 1655 1656 __ bind(caller_not_deoptimized); 1657 } 1658 1659 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1660 /* throw_monitor_exception */ false, 1661 /* install_monitor_exception */ false, 1662 /* notify_jvmdi */ false); 1663 1664 // Finish with popframe handling 1665 // A previous I2C followed by a deoptimization might have moved the 1666 // outgoing arguments further up the stack. PopFrame expects the 1667 // mutations to those outgoing arguments to be preserved and other 1668 // constraints basically require this frame to look exactly as 1669 // though it had previously invoked an interpreted activation with 1670 // no space between the top of the expression stack (current 1671 // last_sp) and the top of stack. Rather than force deopt to 1672 // maintain this kind of invariant all the time we call a small 1673 // fixup routine to move the mutated arguments onto the top of our 1674 // expression stack if necessary. 1675 #ifndef _LP64 1676 __ mov(rax, rsp); 1677 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1678 __ lea(rbx, Address(rbp, rbx, Address::times_ptr)); 1679 __ get_thread(thread); 1680 // PC must point into interpreter here 1681 __ set_last_Java_frame(thread, noreg, rbp, __ pc(), noreg); 1682 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1683 __ get_thread(thread); 1684 #else 1685 __ mov(c_rarg1, rsp); 1686 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1687 __ lea(c_rarg2, Address(rbp, c_rarg2, Address::times_ptr)); 1688 // PC must point into interpreter here 1689 __ set_last_Java_frame(noreg, rbp, __ pc(), rscratch1); 1690 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1691 #endif 1692 __ reset_last_Java_frame(thread, true); 1693 1694 // Restore the last_sp and null it out 1695 __ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1696 __ lea(rsp, Address(rbp, rcx, Address::times_ptr)); 1697 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1698 1699 __ restore_bcp(); 1700 __ restore_locals(); 1701 // The method data pointer was incremented already during 1702 // call profiling. We have to restore the mdp for the current bcp. 1703 if (ProfileInterpreter) { 1704 __ set_method_data_pointer_for_bcp(); 1705 } 1706 1707 // Clear the popframe condition flag 1708 NOT_LP64(__ get_thread(thread)); 1709 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1710 JavaThread::popframe_inactive); 1711 1712 #if INCLUDE_JVMTI 1713 { 1714 Label L_done; 1715 const Register local0 = rlocals; 1716 1717 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1718 __ jcc(Assembler::notEqual, L_done); 1719 1720 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1721 // Detect such a case in the InterpreterRuntime function and return the member name argument, or null. 1722 1723 __ get_method(rdx); 1724 __ movptr(rax, Address(local0, 0)); 1725 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1726 1727 __ testptr(rax, rax); 1728 __ jcc(Assembler::zero, L_done); 1729 1730 __ movptr(Address(rbx, 0), rax); 1731 __ bind(L_done); 1732 } 1733 #endif // INCLUDE_JVMTI 1734 1735 __ dispatch_next(vtos); 1736 // end of PopFrame support 1737 1738 Interpreter::_remove_activation_entry = __ pc(); 1739 1740 // preserve exception over this code sequence 1741 __ pop_ptr(rax); 1742 NOT_LP64(__ get_thread(thread)); 1743 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1744 // remove the activation (without doing throws on illegalMonitorExceptions) 1745 __ remove_activation(vtos, rdx, false, true, false); 1746 // restore exception 1747 NOT_LP64(__ get_thread(thread)); 1748 __ get_vm_result(rax, thread); 1749 1750 // In between activations - previous activation type unknown yet 1751 // compute continuation point - the continuation point expects the 1752 // following registers set up: 1753 // 1754 // rax: exception 1755 // rdx: return address/pc that threw exception 1756 // rsp: expression stack of caller 1757 // rbp: ebp of caller 1758 __ push(rax); // save exception 1759 __ push(rdx); // save return address 1760 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1761 SharedRuntime::exception_handler_for_return_address), 1762 thread, rdx); 1763 __ mov(rbx, rax); // save exception handler 1764 __ pop(rdx); // restore return address 1765 __ pop(rax); // restore exception 1766 // Note that an "issuing PC" is actually the next PC after the call 1767 __ jmp(rbx); // jump to exception 1768 // handler of caller 1769 } 1770 1771 1772 // 1773 // JVMTI ForceEarlyReturn support 1774 // 1775 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1776 address entry = __ pc(); 1777 1778 __ restore_bcp(); 1779 __ restore_locals(); 1780 __ empty_expression_stack(); 1781 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1782 1783 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1784 NOT_LP64(__ get_thread(thread)); 1785 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1786 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1787 1788 // Clear the earlyret state 1789 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1790 1791 __ remove_activation(state, rsi, 1792 false, /* throw_monitor_exception */ 1793 false, /* install_monitor_exception */ 1794 true); /* notify_jvmdi */ 1795 __ jmp(rsi); 1796 1797 return entry; 1798 } // end of ForceEarlyReturn support 1799 1800 1801 //----------------------------------------------------------------------------- 1802 // Helper for vtos entry point generation 1803 1804 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1805 address& bep, 1806 address& cep, 1807 address& sep, 1808 address& aep, 1809 address& iep, 1810 address& lep, 1811 address& fep, 1812 address& dep, 1813 address& vep) { 1814 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1815 Label L; 1816 #ifndef _LP64 1817 fep = __ pc(); // ftos entry point 1818 __ push(ftos); 1819 __ jmpb(L); 1820 dep = __ pc(); // dtos entry point 1821 __ push(dtos); 1822 __ jmpb(L); 1823 #else 1824 fep = __ pc(); // ftos entry point 1825 __ push_f(xmm0); 1826 __ jmpb(L); 1827 dep = __ pc(); // dtos entry point 1828 __ push_d(xmm0); 1829 __ jmpb(L); 1830 #endif // _LP64 1831 lep = __ pc(); // ltos entry point 1832 __ push_l(); 1833 __ jmpb(L); 1834 aep = bep = cep = sep = iep = __ pc(); // [abcsi]tos entry point 1835 __ push_i_or_ptr(); 1836 vep = __ pc(); // vtos entry point 1837 __ bind(L); 1838 generate_and_dispatch(t); 1839 } 1840 1841 //----------------------------------------------------------------------------- 1842 1843 void TemplateInterpreterGenerator::count_bytecode() { 1844 #ifdef _LP64 1845 __ incrementq(ExternalAddress((address) &BytecodeCounter::_counter_value), rscratch1); 1846 #else 1847 Unimplemented(); 1848 #endif 1849 } 1850 1851 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1852 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]), rscratch1); 1853 } 1854 1855 // Non-product code 1856 #ifndef PRODUCT 1857 1858 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1859 address entry = __ pc(); 1860 1861 #ifndef _LP64 1862 // prepare expression stack 1863 __ pop(rcx); // pop return address so expression stack is 'pure' 1864 __ push(state); // save tosca 1865 1866 // pass tosca registers as arguments & call tracer 1867 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1868 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1869 __ pop(state); // restore tosca 1870 1871 // return 1872 __ jmp(rcx); 1873 #else 1874 __ push(state); 1875 __ push(c_rarg0); 1876 __ push(c_rarg1); 1877 __ push(c_rarg2); 1878 __ push(c_rarg3); 1879 __ mov(c_rarg2, rax); // Pass itos 1880 #ifdef _WIN64 1881 __ movflt(xmm3, xmm0); // Pass ftos 1882 #endif 1883 __ call_VM(noreg, 1884 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1885 c_rarg1, c_rarg2, c_rarg3); 1886 __ pop(c_rarg3); 1887 __ pop(c_rarg2); 1888 __ pop(c_rarg1); 1889 __ pop(c_rarg0); 1890 __ pop(state); 1891 __ ret(0); // return from result handler 1892 #endif // _LP64 1893 1894 return entry; 1895 } 1896 1897 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1898 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1899 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1900 __ orl(rbx, 1901 ((int) t->bytecode()) << 1902 BytecodePairHistogram::log2_number_of_codes); 1903 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx, rscratch1); 1904 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1905 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1906 } 1907 1908 1909 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1910 // Call a little run-time stub to avoid blow-up for each bytecode. 1911 // The run-time runtime saves the right registers, depending on 1912 // the tosca in-state for the given template. 1913 1914 assert(Interpreter::trace_code(t->tos_in()) != nullptr, 1915 "entry must have been generated"); 1916 #ifndef _LP64 1917 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1918 #else 1919 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1920 __ andptr(rsp, -16); // align stack as required by ABI 1921 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1922 __ mov(rsp, r12); // restore sp 1923 __ reinit_heapbase(); 1924 #endif // _LP64 1925 } 1926 1927 1928 void TemplateInterpreterGenerator::stop_interpreter_at() { 1929 Label L; 1930 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1931 StopInterpreterAt, 1932 rscratch1); 1933 __ jcc(Assembler::notEqual, L); 1934 __ int3(); 1935 __ bind(L); 1936 } 1937 #endif // !PRODUCT