1 /* 2 * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "compiler/compiler_globals.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/bytecodeHistogram.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interpreterRuntime.hpp" 35 #include "interpreter/templateInterpreterGenerator.hpp" 36 #include "interpreter/templateTable.hpp" 37 #include "oops/arrayOop.hpp" 38 #include "oops/methodCounters.hpp" 39 #include "oops/methodData.hpp" 40 #include "oops/method.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/resolvedIndyEntry.hpp" 43 #include "oops/resolvedMethodEntry.hpp" 44 #include "prims/jvmtiExport.hpp" 45 #include "prims/jvmtiThreadState.hpp" 46 #include "runtime/continuation.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/frame.inline.hpp" 49 #include "runtime/globals.hpp" 50 #include "runtime/jniHandles.hpp" 51 #include "runtime/sharedRuntime.hpp" 52 #include "runtime/stubRoutines.hpp" 53 #include "runtime/synchronizer.hpp" 54 #include "runtime/timer.hpp" 55 #include "runtime/vframeArray.hpp" 56 #include "utilities/checkedCast.hpp" 57 #include "utilities/debug.hpp" 58 #include "utilities/macros.hpp" 59 60 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 61 62 // Size of interpreter code. Increase if too small. Interpreter will 63 // fail with a guarantee ("not enough space for interpreter generation"); 64 // if too small. 65 // Run with +PrintInterpreter to get the VM to print out the size. 66 // Max size with JVMTI 67 #ifdef AMD64 68 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; 69 #else 70 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 71 #endif // AMD64 72 73 // Global Register Names 74 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 75 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 76 77 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 78 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 79 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 80 81 82 //----------------------------------------------------------------------------- 83 84 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 85 address entry = __ pc(); 86 87 #ifdef ASSERT 88 { 89 Label L; 90 __ movptr(rax, Address(rbp, 91 frame::interpreter_frame_monitor_block_top_offset * 92 wordSize)); 93 __ lea(rax, Address(rbp, rax, Address::times_ptr)); 94 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 95 // grows negative) 96 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 97 __ stop ("interpreter frame not set up"); 98 __ bind(L); 99 } 100 #endif // ASSERT 101 // Restore bcp under the assumption that the current frame is still 102 // interpreted 103 __ restore_bcp(); 104 105 // expression stack must be empty before entering the VM if an 106 // exception happened 107 __ empty_expression_stack(); 108 // throw exception 109 __ call_VM(noreg, 110 CAST_FROM_FN_PTR(address, 111 InterpreterRuntime::throw_StackOverflowError)); 112 return entry; 113 } 114 115 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 116 address entry = __ pc(); 117 // The expression stack must be empty before entering the VM if an 118 // exception happened. 119 __ empty_expression_stack(); 120 121 // Setup parameters. 122 // ??? convention: expect aberrant index in register ebx/rbx. 123 // Pass array to create more detailed exceptions. 124 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 125 __ call_VM(noreg, 126 CAST_FROM_FN_PTR(address, 127 InterpreterRuntime:: 128 throw_ArrayIndexOutOfBoundsException), 129 rarg, rbx); 130 return entry; 131 } 132 133 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 134 address entry = __ pc(); 135 136 // object is at TOS 137 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 138 __ pop(rarg); 139 140 // expression stack must be empty before entering the VM if an 141 // exception happened 142 __ empty_expression_stack(); 143 144 __ call_VM(noreg, 145 CAST_FROM_FN_PTR(address, 146 InterpreterRuntime:: 147 throw_ClassCastException), 148 rarg); 149 return entry; 150 } 151 152 address TemplateInterpreterGenerator::generate_exception_handler_common( 153 const char* name, const char* message, bool pass_oop) { 154 assert(!pass_oop || message == nullptr, "either oop or message but not both"); 155 address entry = __ pc(); 156 157 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 158 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 159 160 if (pass_oop) { 161 // object is at TOS 162 __ pop(rarg2); 163 } 164 // expression stack must be empty before entering the VM if an 165 // exception happened 166 __ empty_expression_stack(); 167 // setup parameters 168 __ lea(rarg, ExternalAddress((address)name)); 169 if (pass_oop) { 170 __ call_VM(rax, CAST_FROM_FN_PTR(address, 171 InterpreterRuntime:: 172 create_klass_exception), 173 rarg, rarg2); 174 } else { 175 __ lea(rarg2, ExternalAddress((address)message)); 176 __ call_VM(rax, 177 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 178 rarg, rarg2); 179 } 180 // throw exception 181 __ jump(RuntimeAddress(Interpreter::throw_exception_entry())); 182 return entry; 183 } 184 185 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 186 address entry = __ pc(); 187 188 #ifndef _LP64 189 #ifdef COMPILER2 190 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 191 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 192 for (int i = 1; i < 8; i++) { 193 __ ffree(i); 194 } 195 } else if (UseSSE < 2) { 196 __ empty_FPU_stack(); 197 } 198 #endif // COMPILER2 199 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 200 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 201 } else { 202 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 203 } 204 205 if (state == ftos) { 206 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 207 } else if (state == dtos) { 208 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 209 } 210 #endif // _LP64 211 212 // Restore stack bottom in case i2c adjusted stack 213 __ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 214 __ lea(rsp, Address(rbp, rcx, Address::times_ptr)); 215 // and null it as marker that esp is now tos until next java call 216 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 217 218 __ restore_bcp(); 219 __ restore_locals(); 220 221 if (state == atos) { 222 Register mdp = rbx; 223 Register tmp = rcx; 224 __ profile_return_type(mdp, rax, tmp); 225 } 226 227 const Register cache = rbx; 228 const Register index = rcx; 229 if (index_size == sizeof(u4)) { 230 __ load_resolved_indy_entry(cache, index); 231 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset()))); 232 __ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale())); 233 } else { 234 assert(index_size == sizeof(u2), "Can only be u2"); 235 __ load_method_entry(cache, index); 236 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset()))); 237 __ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale())); 238 } 239 240 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 241 if (JvmtiExport::can_pop_frame()) { 242 NOT_LP64(__ get_thread(java_thread)); 243 __ check_and_handle_popframe(java_thread); 244 } 245 if (JvmtiExport::can_force_early_return()) { 246 NOT_LP64(__ get_thread(java_thread)); 247 __ check_and_handle_earlyret(java_thread); 248 } 249 250 __ dispatch_next(state, step); 251 252 return entry; 253 } 254 255 256 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 257 address entry = __ pc(); 258 259 #ifndef _LP64 260 if (state == ftos) { 261 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 262 } else if (state == dtos) { 263 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 264 } 265 #endif // _LP64 266 267 // null last_sp until next java call 268 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 269 __ restore_bcp(); 270 __ restore_locals(); 271 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 272 NOT_LP64(__ get_thread(thread)); 273 #if INCLUDE_JVMCI 274 // Check if we need to take lock at entry of synchronized method. This can 275 // only occur on method entry so emit it only for vtos with step 0. 276 if (EnableJVMCI && state == vtos && step == 0) { 277 Label L; 278 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 279 __ jcc(Assembler::zero, L); 280 // Clear flag. 281 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 282 // Satisfy calling convention for lock_method(). 283 __ get_method(rbx); 284 // Take lock. 285 lock_method(); 286 __ bind(L); 287 } else { 288 #ifdef ASSERT 289 if (EnableJVMCI) { 290 Label L; 291 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 292 __ jcc(Assembler::zero, L); 293 __ stop("unexpected pending monitor in deopt entry"); 294 __ bind(L); 295 } 296 #endif 297 } 298 #endif 299 // handle exceptions 300 { 301 Label L; 302 __ cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 303 __ jcc(Assembler::zero, L); 304 __ call_VM(noreg, 305 CAST_FROM_FN_PTR(address, 306 InterpreterRuntime::throw_pending_exception)); 307 __ should_not_reach_here(); 308 __ bind(L); 309 } 310 if (continuation == nullptr) { 311 __ dispatch_next(state, step); 312 } else { 313 __ jump_to_entry(continuation); 314 } 315 return entry; 316 } 317 318 address TemplateInterpreterGenerator::generate_result_handler_for( 319 BasicType type) { 320 address entry = __ pc(); 321 switch (type) { 322 case T_BOOLEAN: __ c2bool(rax); break; 323 #ifndef _LP64 324 case T_CHAR : __ andptr(rax, 0xFFFF); break; 325 #else 326 case T_CHAR : __ movzwl(rax, rax); break; 327 #endif // _LP64 328 case T_BYTE : __ sign_extend_byte(rax); break; 329 case T_SHORT : __ sign_extend_short(rax); break; 330 case T_INT : /* nothing to do */ break; 331 case T_LONG : /* nothing to do */ break; 332 case T_VOID : /* nothing to do */ break; 333 #ifndef _LP64 334 case T_DOUBLE : 335 case T_FLOAT : 336 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 337 __ pop(t); // remove return address first 338 // Must return a result for interpreter or compiler. In SSE 339 // mode, results are returned in xmm0 and the FPU stack must 340 // be empty. 341 if (type == T_FLOAT && UseSSE >= 1) { 342 // Load ST0 343 __ fld_d(Address(rsp, 0)); 344 // Store as float and empty fpu stack 345 __ fstp_s(Address(rsp, 0)); 346 // and reload 347 __ movflt(xmm0, Address(rsp, 0)); 348 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 349 __ movdbl(xmm0, Address(rsp, 0)); 350 } else { 351 // restore ST0 352 __ fld_d(Address(rsp, 0)); 353 } 354 // and pop the temp 355 __ addptr(rsp, 2 * wordSize); 356 __ push(t); // restore return address 357 } 358 break; 359 #else 360 case T_FLOAT : /* nothing to do */ break; 361 case T_DOUBLE : /* nothing to do */ break; 362 #endif // _LP64 363 364 case T_OBJECT : 365 // retrieve result from frame 366 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 367 // and verify it 368 __ verify_oop(rax); 369 break; 370 default : ShouldNotReachHere(); 371 } 372 __ ret(0); // return from result handler 373 return entry; 374 } 375 376 address TemplateInterpreterGenerator::generate_safept_entry_for( 377 TosState state, 378 address runtime_entry) { 379 address entry = __ pc(); 380 381 __ push(state); 382 __ push_cont_fastpath(); 383 __ call_VM(noreg, runtime_entry); 384 __ pop_cont_fastpath(); 385 386 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 387 return entry; 388 } 389 390 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() { 391 if (!Continuations::enabled()) return nullptr; 392 address start = __ pc(); 393 394 __ restore_bcp(); 395 __ restore_locals(); 396 397 // Get return address before adjusting rsp 398 __ movptr(rax, Address(rsp, 0)); 399 400 // Restore stack bottom 401 __ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 402 __ lea(rsp, Address(rbp, rcx, Address::times_ptr)); 403 // and NULL it as marker that esp is now tos until next java call 404 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 405 406 __ jmp(rax); 407 408 return start; 409 } 410 411 412 // Helpers for commoning out cases in the various type of method entries. 413 // 414 415 416 // increment invocation count & check for overflow 417 // 418 // Note: checking for negative value instead of overflow 419 // so we have a 'sticky' overflow test 420 // 421 // rbx: method 422 // rcx: invocation counter 423 // 424 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { 425 Label done; 426 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 427 Label no_mdo; 428 if (ProfileInterpreter) { 429 // Are we profiling? 430 __ movptr(rax, Address(rbx, Method::method_data_offset())); 431 __ testptr(rax, rax); 432 __ jccb(Assembler::zero, no_mdo); 433 // Increment counter in the MDO 434 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 435 in_bytes(InvocationCounter::counter_offset())); 436 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 437 __ increment_mask_and_jump(mdo_invocation_counter, mask, rcx, overflow); 438 __ jmp(done); 439 } 440 __ bind(no_mdo); 441 // Increment counter in MethodCounters 442 const Address invocation_counter(rax, 443 MethodCounters::invocation_counter_offset() + 444 InvocationCounter::counter_offset()); 445 __ get_method_counters(rbx, rax, done); 446 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 447 __ increment_mask_and_jump(invocation_counter, mask, rcx, overflow); 448 __ bind(done); 449 } 450 451 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 452 453 // Asm interpreter on entry 454 // r14/rdi - locals 455 // r13/rsi - bcp 456 // rbx - method 457 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 458 // rbp - interpreter frame 459 460 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 461 // Everything as it was on entry 462 // rdx is not restored. Doesn't appear to really be set. 463 464 // InterpreterRuntime::frequency_counter_overflow takes two 465 // arguments, the first (thread) is passed by call_VM, the second 466 // indicates if the counter overflow occurs at a backwards branch 467 // (null bcp). We pass zero for it. The call returns the address 468 // of the verified entry point for the method or null if the 469 // compilation did not complete (either went background or bailed 470 // out). 471 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 472 __ movl(rarg, 0); 473 __ call_VM(noreg, 474 CAST_FROM_FN_PTR(address, 475 InterpreterRuntime::frequency_counter_overflow), 476 rarg); 477 478 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 479 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 480 // and jump to the interpreted entry. 481 __ jmp(do_continue, relocInfo::none); 482 } 483 484 // See if we've got enough room on the stack for locals plus overhead below 485 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 486 // without going through the signal handler, i.e., reserved and yellow zones 487 // will not be made usable. The shadow zone must suffice to handle the 488 // overflow. 489 // The expression stack grows down incrementally, so the normal guard 490 // page mechanism will work for that. 491 // 492 // NOTE: Since the additional locals are also always pushed (wasn't 493 // obvious in generate_fixed_frame) so the guard should work for them 494 // too. 495 // 496 // Args: 497 // rdx: number of additional locals this frame needs (what we must check) 498 // rbx: Method* 499 // 500 // Kills: 501 // rax 502 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 503 504 // monitor entry size: see picture of stack in frame_x86.hpp 505 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 506 507 // total overhead size: entry_size + (saved rbp through expr stack 508 // bottom). be sure to change this if you add/subtract anything 509 // to/from the overhead area 510 const int overhead_size = 511 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 512 513 const int page_size = (int)os::vm_page_size(); 514 515 Label after_frame_check; 516 517 // see if the frame is greater than one page in size. If so, 518 // then we need to verify there is enough stack space remaining 519 // for the additional locals. 520 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 521 __ jcc(Assembler::belowEqual, after_frame_check); 522 523 // compute rsp as if this were going to be the last frame on 524 // the stack before the red zone 525 526 Label after_frame_check_pop; 527 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 528 #ifndef _LP64 529 __ push(thread); 530 __ get_thread(thread); 531 #endif 532 533 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 534 535 // locals + overhead, in bytes 536 __ mov(rax, rdx); 537 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 538 __ addptr(rax, overhead_size); 539 540 #ifdef ASSERT 541 Label limit_okay; 542 // Verify that thread stack overflow limit is non-zero. 543 __ cmpptr(stack_limit, NULL_WORD); 544 __ jcc(Assembler::notEqual, limit_okay); 545 __ stop("stack overflow limit is zero"); 546 __ bind(limit_okay); 547 #endif 548 549 // Add locals/frame size to stack limit. 550 __ addptr(rax, stack_limit); 551 552 // Check against the current stack bottom. 553 __ cmpptr(rsp, rax); 554 555 __ jcc(Assembler::above, after_frame_check_pop); 556 NOT_LP64(__ pop(rsi)); // get saved bcp 557 558 // Restore sender's sp as SP. This is necessary if the sender's 559 // frame is an extended compiled frame (see gen_c2i_adapter()) 560 // and safer anyway in case of JSR292 adaptations. 561 562 __ pop(rax); // return address must be moved if SP is changed 563 __ mov(rsp, rbcp); 564 __ push(rax); 565 566 // Note: the restored frame is not necessarily interpreted. 567 // Use the shared runtime version of the StackOverflowError. 568 assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); 569 __ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); 570 // all done with frame size check 571 __ bind(after_frame_check_pop); 572 NOT_LP64(__ pop(rsi)); 573 574 // all done with frame size check 575 __ bind(after_frame_check); 576 } 577 578 // Allocate monitor and lock method (asm interpreter) 579 // 580 // Args: 581 // rbx: Method* 582 // r14/rdi: locals 583 // 584 // Kills: 585 // rax 586 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 587 // rscratch1, rscratch2 (scratch regs) 588 void TemplateInterpreterGenerator::lock_method() { 589 // synchronize method 590 const Address access_flags(rbx, Method::access_flags_offset()); 591 const Address monitor_block_top( 592 rbp, 593 frame::interpreter_frame_monitor_block_top_offset * wordSize); 594 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 595 596 #ifdef ASSERT 597 { 598 Label L; 599 __ movl(rax, access_flags); 600 __ testl(rax, JVM_ACC_SYNCHRONIZED); 601 __ jcc(Assembler::notZero, L); 602 __ stop("method doesn't need synchronization"); 603 __ bind(L); 604 } 605 #endif // ASSERT 606 607 // get synchronization object 608 { 609 Label done; 610 __ movl(rax, access_flags); 611 __ testl(rax, JVM_ACC_STATIC); 612 // get receiver (assume this is frequent case) 613 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 614 __ jcc(Assembler::zero, done); 615 __ load_mirror(rax, rbx, rscratch2); 616 617 #ifdef ASSERT 618 { 619 Label L; 620 __ testptr(rax, rax); 621 __ jcc(Assembler::notZero, L); 622 __ stop("synchronization object is null"); 623 __ bind(L); 624 } 625 #endif // ASSERT 626 627 __ bind(done); 628 } 629 630 // add space for monitor & lock 631 __ subptr(rsp, entry_size); // add space for a monitor entry 632 __ subptr(monitor_block_top, entry_size / wordSize); // set new monitor block top 633 // store object 634 __ movptr(Address(rsp, BasicObjectLock::obj_offset()), rax); 635 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 636 __ movptr(lockreg, rsp); // object address 637 __ lock_object(lockreg); 638 } 639 640 // Generate a fixed interpreter frame. This is identical setup for 641 // interpreted methods and for native methods hence the shared code. 642 // 643 // Args: 644 // rax: return address 645 // rbx: Method* 646 // r14/rdi: pointer to locals 647 // r13/rsi: sender sp 648 // rdx: cp cache 649 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 650 // initialize fixed part of activation frame 651 __ push(rax); // save return address 652 __ enter(); // save old & set new rbp 653 __ push(rbcp); // set sender sp 654 __ push(NULL_WORD); // leave last_sp as null 655 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 656 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 657 __ push(rbx); // save Method* 658 // Get mirror and store it in the frame as GC root for this Method* 659 __ load_mirror(rdx, rbx, rscratch2); 660 __ push(rdx); 661 if (ProfileInterpreter) { 662 Label method_data_continue; 663 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 664 __ testptr(rdx, rdx); 665 __ jcc(Assembler::zero, method_data_continue); 666 __ addptr(rdx, in_bytes(MethodData::data_offset())); 667 __ bind(method_data_continue); 668 __ push(rdx); // set the mdp (method data pointer) 669 } else { 670 __ push(0); 671 } 672 673 __ movptr(rdx, Address(rbx, Method::const_offset())); 674 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 675 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset())); 676 __ push(rdx); // set constant pool cache 677 678 __ movptr(rax, rlocals); 679 __ subptr(rax, rbp); 680 __ shrptr(rax, Interpreter::logStackElementSize); // rax = rlocals - fp(); 681 __ push(rax); // set relativized rlocals, see frame::interpreter_frame_locals() 682 683 if (native_call) { 684 __ push(0); // no bcp 685 } else { 686 __ push(rbcp); // set bcp 687 } 688 // initialize relativized pointer to expression stack bottom 689 __ push(frame::interpreter_frame_initial_sp_offset); 690 } 691 692 // End of helpers 693 694 // Method entry for java.lang.ref.Reference.get. 695 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 696 // Code: _aload_0, _getfield, _areturn 697 // parameter size = 1 698 // 699 // The code that gets generated by this routine is split into 2 parts: 700 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 701 // 2. The slow path - which is an expansion of the regular method entry. 702 // 703 // Notes:- 704 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 705 // * We may jump to the slow path iff the receiver is null. If the 706 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 707 // Thus we can use the regular method entry code to generate the NPE. 708 // 709 // rbx: Method* 710 711 // r13: senderSP must preserve for slow path, set SP to it on fast path 712 713 address entry = __ pc(); 714 715 const int referent_offset = java_lang_ref_Reference::referent_offset(); 716 717 Label slow_path; 718 // rbx: method 719 720 // Check if local 0 != null 721 // If the receiver is null then it is OK to jump to the slow path. 722 __ movptr(rax, Address(rsp, wordSize)); 723 724 __ testptr(rax, rax); 725 __ jcc(Assembler::zero, slow_path); 726 727 // rax: local 0 728 // rbx: method (but can be used as scratch now) 729 // rdx: scratch 730 // rdi: scratch 731 732 // Preserve the sender sp in case the load barrier 733 // calls the runtime 734 NOT_LP64(__ push(rsi)); 735 736 // Load the value of the referent field. 737 const Address field_address(rax, referent_offset); 738 __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF); 739 740 // _areturn 741 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 742 NOT_LP64(__ pop(rsi)); // get sender sp 743 __ pop(rdi); // get return address 744 __ mov(rsp, sender_sp); // set sp to sender sp 745 __ jmp(rdi); 746 __ ret(0); 747 748 // generate a vanilla interpreter entry as the slow path 749 __ bind(slow_path); 750 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 751 return entry; 752 } 753 754 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 755 // See more discussion in stackOverflow.hpp. 756 757 // Note that we do the banging after the frame is setup, since the exception 758 // handling code expects to find a valid interpreter frame on the stack. 759 // Doing the banging earlier fails if the caller frame is not an interpreter 760 // frame. 761 // (Also, the exception throwing code expects to unlock any synchronized 762 // method receiver, so do the banging after locking the receiver.) 763 764 const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size()); 765 const int page_size = (int)os::vm_page_size(); 766 const int n_shadow_pages = shadow_zone_size / page_size; 767 768 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 769 #ifndef _LP64 770 __ push(thread); 771 __ get_thread(thread); 772 #endif 773 774 #ifdef ASSERT 775 Label L_good_limit; 776 __ cmpptr(Address(thread, JavaThread::shadow_zone_safe_limit()), NULL_WORD); 777 __ jcc(Assembler::notEqual, L_good_limit); 778 __ stop("shadow zone safe limit is not initialized"); 779 __ bind(L_good_limit); 780 781 Label L_good_watermark; 782 __ cmpptr(Address(thread, JavaThread::shadow_zone_growth_watermark()), NULL_WORD); 783 __ jcc(Assembler::notEqual, L_good_watermark); 784 __ stop("shadow zone growth watermark is not initialized"); 785 __ bind(L_good_watermark); 786 #endif 787 788 Label L_done; 789 790 __ cmpptr(rsp, Address(thread, JavaThread::shadow_zone_growth_watermark())); 791 __ jcc(Assembler::above, L_done); 792 793 for (int p = 1; p <= n_shadow_pages; p++) { 794 __ bang_stack_with_offset(p*page_size); 795 } 796 797 // Record the new watermark, but only if update is above the safe limit. 798 // Otherwise, the next time around the check above would pass the safe limit. 799 __ cmpptr(rsp, Address(thread, JavaThread::shadow_zone_safe_limit())); 800 __ jccb(Assembler::belowEqual, L_done); 801 __ movptr(Address(thread, JavaThread::shadow_zone_growth_watermark()), rsp); 802 803 __ bind(L_done); 804 805 #ifndef _LP64 806 __ pop(thread); 807 #endif 808 } 809 810 // Interpreter stub for calling a native method. (asm interpreter) 811 // This sets up a somewhat different looking stack for calling the 812 // native method than the typical interpreter frame setup. 813 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized, bool runtime_upcalls) { 814 // determine code generation flags 815 bool inc_counter = UseCompiler || CountCompiledCalls; 816 817 // rbx: Method* 818 // rbcp: sender sp 819 820 address entry_point = __ pc(); 821 822 const Address constMethod (rbx, Method::const_offset()); 823 const Address access_flags (rbx, Method::access_flags_offset()); 824 const Address size_of_parameters(rcx, ConstMethod:: 825 size_of_parameters_offset()); 826 827 828 // get parameter size (always needed) 829 __ movptr(rcx, constMethod); 830 __ load_unsigned_short(rcx, size_of_parameters); 831 832 // native calls don't need the stack size check since they have no 833 // expression stack and the arguments are already on the stack and 834 // we only add a handful of words to the stack 835 836 // rbx: Method* 837 // rcx: size of parameters 838 // rbcp: sender sp 839 __ pop(rax); // get return address 840 841 // for natives the size of locals is zero 842 843 // compute beginning of parameters 844 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 845 846 // add 2 zero-initialized slots for native calls 847 // initialize result_handler slot 848 __ push(NULL_WORD); 849 // slot for oop temp 850 // (static native method holder mirror/jni oop result) 851 __ push(NULL_WORD); 852 853 // initialize fixed part of activation frame 854 generate_fixed_frame(true); 855 856 // make sure method is native & not abstract 857 #ifdef ASSERT 858 __ movl(rax, access_flags); 859 { 860 Label L; 861 __ testl(rax, JVM_ACC_NATIVE); 862 __ jcc(Assembler::notZero, L); 863 __ stop("tried to execute non-native method as native"); 864 __ bind(L); 865 } 866 { 867 Label L; 868 __ testl(rax, JVM_ACC_ABSTRACT); 869 __ jcc(Assembler::zero, L); 870 __ stop("tried to execute abstract method in interpreter"); 871 __ bind(L); 872 } 873 #endif 874 875 // Since at this point in the method invocation the exception handler 876 // would try to exit the monitor of synchronized methods which hasn't 877 // been entered yet, we set the thread local variable 878 // _do_not_unlock_if_synchronized to true. The remove_activation will 879 // check this flag. 880 881 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 882 NOT_LP64(__ get_thread(thread1)); 883 const Address do_not_unlock_if_synchronized(thread1, 884 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 885 __ movbool(do_not_unlock_if_synchronized, true); 886 887 // increment invocation count & check for overflow 888 Label invocation_counter_overflow; 889 if (inc_counter) { 890 generate_counter_incr(&invocation_counter_overflow); 891 } 892 893 Label continue_after_compile; 894 __ bind(continue_after_compile); 895 896 bang_stack_shadow_pages(true); 897 898 // reset the _do_not_unlock_if_synchronized flag 899 NOT_LP64(__ get_thread(thread1)); 900 __ movbool(do_not_unlock_if_synchronized, false); 901 902 // check for synchronized methods 903 // Must happen AFTER invocation_counter check and stack overflow check, 904 // so method is not locked if overflows. 905 if (synchronized) { 906 lock_method(); 907 } else { 908 // no synchronization necessary 909 #ifdef ASSERT 910 { 911 Label L; 912 __ movl(rax, access_flags); 913 __ testl(rax, JVM_ACC_SYNCHRONIZED); 914 __ jcc(Assembler::zero, L); 915 __ stop("method needs synchronization"); 916 __ bind(L); 917 } 918 #endif 919 } 920 921 // start execution 922 #ifdef ASSERT 923 { 924 Label L; 925 const Address monitor_block_top(rbp, 926 frame::interpreter_frame_monitor_block_top_offset * wordSize); 927 __ movptr(rax, monitor_block_top); 928 __ lea(rax, Address(rbp, rax, Address::times_ptr)); 929 __ cmpptr(rax, rsp); 930 __ jcc(Assembler::equal, L); 931 __ stop("broken stack frame setup in interpreter 5"); 932 __ bind(L); 933 } 934 #endif 935 936 // jvmti support 937 __ notify_method_entry(); 938 939 // runtime upcalls 940 if (runtime_upcalls) { 941 __ generate_runtime_upcalls_on_method_entry(); 942 } 943 944 // work registers 945 const Register method = rbx; 946 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 947 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 948 949 // allocate space for parameters 950 __ get_method(method); 951 __ movptr(t, Address(method, Method::const_offset())); 952 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 953 954 #ifndef _LP64 955 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 956 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 957 __ subptr(rsp, t); 958 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 959 #else 960 __ shll(t, Interpreter::logStackElementSize); 961 962 __ subptr(rsp, t); 963 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 964 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 965 #endif // _LP64 966 967 // get signature handler 968 { 969 Label L; 970 __ movptr(t, Address(method, Method::signature_handler_offset())); 971 __ testptr(t, t); 972 __ jcc(Assembler::notZero, L); 973 __ call_VM(noreg, 974 CAST_FROM_FN_PTR(address, 975 InterpreterRuntime::prepare_native_call), 976 method); 977 __ get_method(method); 978 __ movptr(t, Address(method, Method::signature_handler_offset())); 979 __ bind(L); 980 } 981 982 // call signature handler 983 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 984 "adjust this code"); 985 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 986 "adjust this code"); 987 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 988 "adjust this code"); 989 990 // The generated handlers do not touch RBX (the method). 991 // However, large signatures cannot be cached and are generated 992 // each time here. The slow-path generator can do a GC on return, 993 // so we must reload it after the call. 994 __ call(t); 995 __ get_method(method); // slow path can do a GC, reload RBX 996 997 998 // result handler is in rax 999 // set result handler 1000 __ movptr(Address(rbp, 1001 (frame::interpreter_frame_result_handler_offset) * wordSize), 1002 rax); 1003 1004 // pass mirror handle if static call 1005 { 1006 Label L; 1007 __ movl(t, Address(method, Method::access_flags_offset())); 1008 __ testl(t, JVM_ACC_STATIC); 1009 __ jcc(Assembler::zero, L); 1010 // get mirror 1011 __ load_mirror(t, method, rax); 1012 // copy mirror into activation frame 1013 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1014 t); 1015 // pass handle to mirror 1016 #ifndef _LP64 1017 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1018 __ movptr(Address(rsp, wordSize), t); 1019 #else 1020 __ lea(c_rarg1, 1021 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1022 #endif // _LP64 1023 __ bind(L); 1024 } 1025 1026 // get native function entry point 1027 { 1028 Label L; 1029 __ movptr(rax, Address(method, Method::native_function_offset())); 1030 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1031 __ cmpptr(rax, unsatisfied.addr(), rscratch1); 1032 __ jcc(Assembler::notEqual, L); 1033 __ call_VM(noreg, 1034 CAST_FROM_FN_PTR(address, 1035 InterpreterRuntime::prepare_native_call), 1036 method); 1037 __ get_method(method); 1038 __ movptr(rax, Address(method, Method::native_function_offset())); 1039 __ bind(L); 1040 } 1041 1042 // pass JNIEnv 1043 #ifndef _LP64 1044 __ get_thread(thread); 1045 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1046 __ movptr(Address(rsp, 0), t); 1047 1048 // set_last_Java_frame_before_call 1049 // It is enough that the pc() 1050 // points into the right code segment. It does not have to be the correct return pc. 1051 __ set_last_Java_frame(thread, noreg, rbp, __ pc(), noreg); 1052 #else 1053 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1054 1055 // It is enough that the pc() points into the right code 1056 // segment. It does not have to be the correct return pc. 1057 // For convenience we use the pc we want to resume to in 1058 // case of preemption on Object.wait. 1059 Label native_return; 1060 __ set_last_Java_frame(rsp, rbp, native_return, rscratch1); 1061 #endif // _LP64 1062 1063 // change thread state 1064 #ifdef ASSERT 1065 { 1066 Label L; 1067 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1068 __ cmpl(t, _thread_in_Java); 1069 __ jcc(Assembler::equal, L); 1070 __ stop("Wrong thread state in native stub"); 1071 __ bind(L); 1072 } 1073 #endif 1074 1075 // Change state to native 1076 1077 __ movl(Address(thread, JavaThread::thread_state_offset()), 1078 _thread_in_native); 1079 1080 __ push_cont_fastpath(); 1081 1082 // Call the native method. 1083 __ call(rax); 1084 // 32: result potentially in rdx:rax or ST0 1085 // 64: result potentially in rax or xmm0 1086 1087 __ pop_cont_fastpath(); 1088 1089 // Verify or restore cpu control state after JNI call 1090 __ restore_cpu_control_state_after_jni(rscratch1); 1091 1092 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1093 // in order to extract the result of a method call. If the order of these 1094 // pushes change or anything else is added to the stack then the code in 1095 // interpreter_frame_result must also change. 1096 1097 #ifndef _LP64 1098 // save potential result in ST(0) & rdx:rax 1099 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1100 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1101 // It is safe to do this push because state is _thread_in_native and return address will be found 1102 // via _last_native_pc and not via _last_jave_sp 1103 1104 // NOTE: the order of these push(es) is known to frame::interpreter_frame_result. 1105 // If the order changes or anything else is added to the stack the code in 1106 // interpreter_frame_result will have to be changed. 1107 1108 { Label L; 1109 Label push_double; 1110 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1111 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1112 __ cmpptr(Address(rbp, (frame::interpreter_frame_result_handler_offset)*wordSize), 1113 float_handler.addr(), noreg); 1114 __ jcc(Assembler::equal, push_double); 1115 __ cmpptr(Address(rbp, (frame::interpreter_frame_result_handler_offset)*wordSize), 1116 double_handler.addr(), noreg); 1117 __ jcc(Assembler::notEqual, L); 1118 __ bind(push_double); 1119 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1120 __ bind(L); 1121 } 1122 #else 1123 __ push(dtos); 1124 #endif // _LP64 1125 1126 __ push(ltos); 1127 1128 // change thread state 1129 NOT_LP64(__ get_thread(thread)); 1130 __ movl(Address(thread, JavaThread::thread_state_offset()), 1131 _thread_in_native_trans); 1132 1133 // Force this write out before the read below 1134 if (!UseSystemMemoryBarrier) { 1135 __ membar(Assembler::Membar_mask_bits( 1136 Assembler::LoadLoad | Assembler::LoadStore | 1137 Assembler::StoreLoad | Assembler::StoreStore)); 1138 } 1139 #ifndef _LP64 1140 if (AlwaysRestoreFPU) { 1141 // Make sure the control word is correct. 1142 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std())); 1143 } 1144 #endif // _LP64 1145 1146 // check for safepoint operation in progress and/or pending suspend requests 1147 { 1148 Label Continue; 1149 Label slow_path; 1150 1151 __ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */); 1152 1153 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1154 __ jcc(Assembler::equal, Continue); 1155 __ bind(slow_path); 1156 1157 // Don't use call_VM as it will see a possible pending exception 1158 // and forward it and never return here preventing us from 1159 // clearing _last_native_pc down below. Also can't use 1160 // call_VM_leaf either as it will check to see if r13 & r14 are 1161 // preserved and correspond to the bcp/locals pointers. So we do a 1162 // runtime call by hand. 1163 // 1164 #ifndef _LP64 1165 __ push(thread); 1166 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1167 JavaThread::check_special_condition_for_native_trans))); 1168 __ increment(rsp, wordSize); 1169 __ get_thread(thread); 1170 #else 1171 __ mov(c_rarg0, r15_thread); 1172 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1173 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1174 __ andptr(rsp, -16); // align stack as required by ABI 1175 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1176 __ mov(rsp, r12); // restore sp 1177 __ reinit_heapbase(); 1178 #endif // _LP64 1179 __ bind(Continue); 1180 } 1181 1182 // change thread state 1183 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1184 1185 #ifdef _LP64 1186 if (LockingMode != LM_LEGACY) { 1187 // Check preemption for Object.wait() 1188 Label not_preempted; 1189 __ movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset())); 1190 __ cmpptr(rscratch1, NULL_WORD); 1191 __ jccb(Assembler::equal, not_preempted); 1192 __ movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD); 1193 __ jmp(rscratch1); 1194 __ bind(native_return); 1195 __ restore_after_resume(true /* is_native */); 1196 __ bind(not_preempted); 1197 } else { 1198 // any pc will do so just use this one for LM_LEGACY to keep code together. 1199 __ bind(native_return); 1200 } 1201 #endif // _LP64 1202 1203 // reset_last_Java_frame 1204 __ reset_last_Java_frame(thread, true); 1205 1206 if (CheckJNICalls) { 1207 // clear_pending_jni_exception_check 1208 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1209 } 1210 1211 // reset handle block 1212 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1213 __ movl(Address(t, JNIHandleBlock::top_offset()), NULL_WORD); 1214 1215 // If result is an oop unbox and store it in frame where gc will see it 1216 // and result handler will pick it up 1217 1218 { 1219 Label no_oop; 1220 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1221 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1222 __ jcc(Assembler::notEqual, no_oop); 1223 // retrieve result 1224 __ pop(ltos); 1225 // Unbox oop result, e.g. JNIHandles::resolve value. 1226 __ resolve_jobject(rax /* value */, 1227 thread /* thread */, 1228 t /* tmp */); 1229 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1230 // keep stack depth as expected by pushing oop which will eventually be discarded 1231 __ push(ltos); 1232 __ bind(no_oop); 1233 } 1234 1235 1236 { 1237 Label no_reguard; 1238 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1239 StackOverflow::stack_guard_yellow_reserved_disabled); 1240 __ jcc(Assembler::notEqual, no_reguard); 1241 1242 __ pusha(); // XXX only save smashed registers 1243 #ifndef _LP64 1244 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1245 __ popa(); 1246 #else 1247 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1248 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1249 __ andptr(rsp, -16); // align stack as required by ABI 1250 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1251 __ mov(rsp, r12); // restore sp 1252 __ popa(); // XXX only restore smashed registers 1253 __ reinit_heapbase(); 1254 #endif // _LP64 1255 1256 __ bind(no_reguard); 1257 } 1258 1259 1260 // The method register is junk from after the thread_in_native transition 1261 // until here. Also can't call_VM until the bcp has been 1262 // restored. Need bcp for throwing exception below so get it now. 1263 __ get_method(method); 1264 1265 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1266 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1267 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1268 1269 // handle exceptions (exception handling will handle unlocking!) 1270 { 1271 Label L; 1272 __ cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 1273 __ jcc(Assembler::zero, L); 1274 // Note: At some point we may want to unify this with the code 1275 // used in call_VM_base(); i.e., we should use the 1276 // StubRoutines::forward_exception code. For now this doesn't work 1277 // here because the rsp is not correctly set at this point. 1278 __ MacroAssembler::call_VM(noreg, 1279 CAST_FROM_FN_PTR(address, 1280 InterpreterRuntime::throw_pending_exception)); 1281 __ should_not_reach_here(); 1282 __ bind(L); 1283 } 1284 1285 // do unlocking if necessary 1286 { 1287 Label L; 1288 __ movl(t, Address(method, Method::access_flags_offset())); 1289 __ testl(t, JVM_ACC_SYNCHRONIZED); 1290 __ jcc(Assembler::zero, L); 1291 // the code below should be shared with interpreter macro 1292 // assembler implementation 1293 { 1294 Label unlock; 1295 // BasicObjectLock will be first in list, since this is a 1296 // synchronized method. However, need to check that the object 1297 // has not been unlocked by an explicit monitorexit bytecode. 1298 const Address monitor(rbp, 1299 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1300 wordSize - (int)sizeof(BasicObjectLock))); 1301 1302 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1303 1304 // monitor expect in c_rarg1 for slow unlock path 1305 __ lea(regmon, monitor); // address of first monitor 1306 1307 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset())); 1308 __ testptr(t, t); 1309 __ jcc(Assembler::notZero, unlock); 1310 1311 // Entry already unlocked, need to throw exception 1312 __ MacroAssembler::call_VM(noreg, 1313 CAST_FROM_FN_PTR(address, 1314 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1315 __ should_not_reach_here(); 1316 1317 __ bind(unlock); 1318 __ unlock_object(regmon); 1319 } 1320 __ bind(L); 1321 } 1322 1323 // jvmti support 1324 // Note: This must happen _after_ handling/throwing any exceptions since 1325 // the exception handler code notifies the runtime of method exits 1326 // too. If this happens before, method entry/exit notifications are 1327 // not properly paired (was bug - gri 11/22/99). 1328 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1329 1330 // restore potential result in edx:eax, call result handler to 1331 // restore potential result in ST0 & handle result 1332 1333 __ pop(ltos); 1334 LP64_ONLY( __ pop(dtos)); 1335 1336 __ movptr(t, Address(rbp, 1337 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1338 __ call(t); 1339 1340 // remove activation 1341 __ movptr(t, Address(rbp, 1342 frame::interpreter_frame_sender_sp_offset * 1343 wordSize)); // get sender sp 1344 __ leave(); // remove frame anchor 1345 __ pop(rdi); // get return address 1346 __ mov(rsp, t); // set sp to sender sp 1347 __ jmp(rdi); 1348 1349 if (inc_counter) { 1350 // Handle overflow of counter and compile method 1351 __ bind(invocation_counter_overflow); 1352 generate_counter_overflow(continue_after_compile); 1353 } 1354 1355 return entry_point; 1356 } 1357 1358 // Abstract method entry 1359 // Attempt to execute abstract method. Throw exception 1360 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1361 1362 address entry_point = __ pc(); 1363 1364 // abstract method entry 1365 1366 // pop return address, reset last_sp to null 1367 __ empty_expression_stack(); 1368 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1369 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1370 1371 // throw exception 1372 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx); 1373 // the call_VM checks for exception, so we should never return here. 1374 __ should_not_reach_here(); 1375 1376 return entry_point; 1377 } 1378 1379 // 1380 // Generic interpreted method entry to (asm) interpreter 1381 // 1382 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, bool runtime_upcalls) { 1383 // determine code generation flags 1384 bool inc_counter = UseCompiler || CountCompiledCalls; 1385 1386 // ebx: Method* 1387 // rbcp: sender sp (set in InterpreterMacroAssembler::prepare_to_jump_from_interpreted / generate_call_stub) 1388 address entry_point = __ pc(); 1389 1390 const Address constMethod(rbx, Method::const_offset()); 1391 const Address access_flags(rbx, Method::access_flags_offset()); 1392 const Address size_of_parameters(rdx, 1393 ConstMethod::size_of_parameters_offset()); 1394 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1395 1396 1397 // get parameter size (always needed) 1398 __ movptr(rdx, constMethod); 1399 __ load_unsigned_short(rcx, size_of_parameters); 1400 1401 // rbx: Method* 1402 // rcx: size of parameters 1403 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1404 1405 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1406 __ subl(rdx, rcx); // rdx = no. of additional locals 1407 1408 // YYY 1409 // __ incrementl(rdx); 1410 // __ andl(rdx, -2); 1411 1412 // see if we've got enough room on the stack for locals plus overhead. 1413 generate_stack_overflow_check(); 1414 1415 // get return address 1416 __ pop(rax); 1417 1418 // compute beginning of parameters 1419 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1420 1421 // rdx - # of additional locals 1422 // allocate space for locals 1423 // explicitly initialize locals 1424 { 1425 Label exit, loop; 1426 __ testl(rdx, rdx); 1427 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1428 __ bind(loop); 1429 __ push(NULL_WORD); // initialize local variables 1430 __ decrementl(rdx); // until everything initialized 1431 __ jcc(Assembler::greater, loop); 1432 __ bind(exit); 1433 } 1434 1435 // initialize fixed part of activation frame 1436 generate_fixed_frame(false); 1437 1438 // make sure method is not native & not abstract 1439 #ifdef ASSERT 1440 __ movl(rax, access_flags); 1441 { 1442 Label L; 1443 __ testl(rax, JVM_ACC_NATIVE); 1444 __ jcc(Assembler::zero, L); 1445 __ stop("tried to execute native method as non-native"); 1446 __ bind(L); 1447 } 1448 { 1449 Label L; 1450 __ testl(rax, JVM_ACC_ABSTRACT); 1451 __ jcc(Assembler::zero, L); 1452 __ stop("tried to execute abstract method in interpreter"); 1453 __ bind(L); 1454 } 1455 #endif 1456 1457 // Since at this point in the method invocation the exception 1458 // handler would try to exit the monitor of synchronized methods 1459 // which hasn't been entered yet, we set the thread local variable 1460 // _do_not_unlock_if_synchronized to true. The remove_activation 1461 // will check this flag. 1462 1463 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1464 NOT_LP64(__ get_thread(thread)); 1465 const Address do_not_unlock_if_synchronized(thread, 1466 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1467 __ movbool(do_not_unlock_if_synchronized, true); 1468 1469 __ profile_parameters_type(rax, rcx, rdx); 1470 // increment invocation count & check for overflow 1471 Label invocation_counter_overflow; 1472 if (inc_counter) { 1473 generate_counter_incr(&invocation_counter_overflow); 1474 } 1475 1476 Label continue_after_compile; 1477 __ bind(continue_after_compile); 1478 1479 // check for synchronized interpreted methods 1480 bang_stack_shadow_pages(false); 1481 1482 // reset the _do_not_unlock_if_synchronized flag 1483 NOT_LP64(__ get_thread(thread)); 1484 __ movbool(do_not_unlock_if_synchronized, false); 1485 1486 // check for synchronized methods 1487 // Must happen AFTER invocation_counter check and stack overflow check, 1488 // so method is not locked if overflows. 1489 if (synchronized) { 1490 // Allocate monitor and lock method 1491 lock_method(); 1492 } else { 1493 // no synchronization necessary 1494 #ifdef ASSERT 1495 { 1496 Label L; 1497 __ movl(rax, access_flags); 1498 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1499 __ jcc(Assembler::zero, L); 1500 __ stop("method needs synchronization"); 1501 __ bind(L); 1502 } 1503 #endif 1504 } 1505 1506 // start execution 1507 #ifdef ASSERT 1508 { 1509 Label L; 1510 const Address monitor_block_top (rbp, 1511 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1512 __ movptr(rax, monitor_block_top); 1513 __ lea(rax, Address(rbp, rax, Address::times_ptr)); 1514 __ cmpptr(rax, rsp); 1515 __ jcc(Assembler::equal, L); 1516 __ stop("broken stack frame setup in interpreter 6"); 1517 __ bind(L); 1518 } 1519 #endif 1520 1521 // jvmti support 1522 __ notify_method_entry(); 1523 1524 if (runtime_upcalls) { 1525 __ generate_runtime_upcalls_on_method_entry(); 1526 } 1527 1528 __ dispatch_next(vtos); 1529 1530 // invocation counter overflow 1531 if (inc_counter) { 1532 // Handle overflow of counter and compile method 1533 __ bind(invocation_counter_overflow); 1534 generate_counter_overflow(continue_after_compile); 1535 } 1536 1537 return entry_point; 1538 } 1539 1540 //----------------------------------------------------------------------------- 1541 // Exceptions 1542 1543 void TemplateInterpreterGenerator::generate_throw_exception() { 1544 // Entry point in previous activation (i.e., if the caller was 1545 // interpreted) 1546 Interpreter::_rethrow_exception_entry = __ pc(); 1547 // Restore sp to interpreter_frame_last_sp even though we are going 1548 // to empty the expression stack for the exception processing. 1549 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1550 // rax: exception 1551 // rdx: return address/pc that threw exception 1552 __ restore_bcp(); // r13/rsi points to call/send 1553 __ restore_locals(); 1554 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1555 // Entry point for exceptions thrown within interpreter code 1556 Interpreter::_throw_exception_entry = __ pc(); 1557 // expression stack is undefined here 1558 // rax: exception 1559 // r13/rsi: exception bcp 1560 __ verify_oop(rax); 1561 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1562 LP64_ONLY(__ mov(c_rarg1, rax)); 1563 1564 // expression stack must be empty before entering the VM in case of 1565 // an exception 1566 __ empty_expression_stack(); 1567 // find exception handler address and preserve exception oop 1568 __ call_VM(rdx, 1569 CAST_FROM_FN_PTR(address, 1570 InterpreterRuntime::exception_handler_for_exception), 1571 rarg); 1572 // rax: exception handler entry point 1573 // rdx: preserved exception oop 1574 // r13/rsi: bcp for exception handler 1575 __ push_ptr(rdx); // push exception which is now the only value on the stack 1576 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1577 1578 // If the exception is not handled in the current frame the frame is 1579 // removed and the exception is rethrown (i.e. exception 1580 // continuation is _rethrow_exception). 1581 // 1582 // Note: At this point the bci is still the bxi for the instruction 1583 // which caused the exception and the expression stack is 1584 // empty. Thus, for any VM calls at this point, GC will find a legal 1585 // oop map (with empty expression stack). 1586 1587 // In current activation 1588 // tos: exception 1589 // esi: exception bcp 1590 1591 // 1592 // JVMTI PopFrame support 1593 // 1594 1595 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1596 __ empty_expression_stack(); 1597 // Set the popframe_processing bit in pending_popframe_condition 1598 // indicating that we are currently handling popframe, so that 1599 // call_VMs that may happen later do not trigger new popframe 1600 // handling cycles. 1601 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1602 NOT_LP64(__ get_thread(thread)); 1603 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1604 __ orl(rdx, JavaThread::popframe_processing_bit); 1605 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1606 1607 { 1608 // Check to see whether we are returning to a deoptimized frame. 1609 // (The PopFrame call ensures that the caller of the popped frame is 1610 // either interpreted or compiled and deoptimizes it if compiled.) 1611 // In this case, we can't call dispatch_next() after the frame is 1612 // popped, but instead must save the incoming arguments and restore 1613 // them after deoptimization has occurred. 1614 // 1615 // Note that we don't compare the return PC against the 1616 // deoptimization blob's unpack entry because of the presence of 1617 // adapter frames in C2. 1618 Label caller_not_deoptimized; 1619 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1620 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1621 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1622 InterpreterRuntime::interpreter_contains), rarg); 1623 __ testl(rax, rax); 1624 __ jcc(Assembler::notZero, caller_not_deoptimized); 1625 1626 // Compute size of arguments for saving when returning to 1627 // deoptimized caller 1628 __ get_method(rax); 1629 __ movptr(rax, Address(rax, Method::const_offset())); 1630 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1631 size_of_parameters_offset()))); 1632 __ shll(rax, Interpreter::logStackElementSize); 1633 __ restore_locals(); 1634 __ subptr(rlocals, rax); 1635 __ addptr(rlocals, wordSize); 1636 // Save these arguments 1637 NOT_LP64(__ get_thread(thread)); 1638 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1639 Deoptimization:: 1640 popframe_preserve_args), 1641 thread, rax, rlocals); 1642 1643 __ remove_activation(vtos, rdx, 1644 /* throw_monitor_exception */ false, 1645 /* install_monitor_exception */ false, 1646 /* notify_jvmdi */ false); 1647 1648 // Inform deoptimization that it is responsible for restoring 1649 // these arguments 1650 NOT_LP64(__ get_thread(thread)); 1651 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1652 JavaThread::popframe_force_deopt_reexecution_bit); 1653 1654 // Continue in deoptimization handler 1655 __ jmp(rdx); 1656 1657 __ bind(caller_not_deoptimized); 1658 } 1659 1660 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1661 /* throw_monitor_exception */ false, 1662 /* install_monitor_exception */ false, 1663 /* notify_jvmdi */ false); 1664 1665 // Finish with popframe handling 1666 // A previous I2C followed by a deoptimization might have moved the 1667 // outgoing arguments further up the stack. PopFrame expects the 1668 // mutations to those outgoing arguments to be preserved and other 1669 // constraints basically require this frame to look exactly as 1670 // though it had previously invoked an interpreted activation with 1671 // no space between the top of the expression stack (current 1672 // last_sp) and the top of stack. Rather than force deopt to 1673 // maintain this kind of invariant all the time we call a small 1674 // fixup routine to move the mutated arguments onto the top of our 1675 // expression stack if necessary. 1676 #ifndef _LP64 1677 __ mov(rax, rsp); 1678 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1679 __ lea(rbx, Address(rbp, rbx, Address::times_ptr)); 1680 __ get_thread(thread); 1681 // PC must point into interpreter here 1682 __ set_last_Java_frame(thread, noreg, rbp, __ pc(), noreg); 1683 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1684 __ get_thread(thread); 1685 #else 1686 __ mov(c_rarg1, rsp); 1687 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1688 __ lea(c_rarg2, Address(rbp, c_rarg2, Address::times_ptr)); 1689 // PC must point into interpreter here 1690 __ set_last_Java_frame(noreg, rbp, __ pc(), rscratch1); 1691 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1692 #endif 1693 __ reset_last_Java_frame(thread, true); 1694 1695 // Restore the last_sp and null it out 1696 __ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1697 __ lea(rsp, Address(rbp, rcx, Address::times_ptr)); 1698 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1699 1700 __ restore_bcp(); 1701 __ restore_locals(); 1702 // The method data pointer was incremented already during 1703 // call profiling. We have to restore the mdp for the current bcp. 1704 if (ProfileInterpreter) { 1705 __ set_method_data_pointer_for_bcp(); 1706 } 1707 1708 // Clear the popframe condition flag 1709 NOT_LP64(__ get_thread(thread)); 1710 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1711 JavaThread::popframe_inactive); 1712 1713 #if INCLUDE_JVMTI 1714 { 1715 Label L_done; 1716 const Register local0 = rlocals; 1717 1718 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1719 __ jcc(Assembler::notEqual, L_done); 1720 1721 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1722 // Detect such a case in the InterpreterRuntime function and return the member name argument, or null. 1723 1724 __ get_method(rdx); 1725 __ movptr(rax, Address(local0, 0)); 1726 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1727 1728 __ testptr(rax, rax); 1729 __ jcc(Assembler::zero, L_done); 1730 1731 __ movptr(Address(rbx, 0), rax); 1732 __ bind(L_done); 1733 } 1734 #endif // INCLUDE_JVMTI 1735 1736 __ dispatch_next(vtos); 1737 // end of PopFrame support 1738 1739 Interpreter::_remove_activation_entry = __ pc(); 1740 1741 // preserve exception over this code sequence 1742 __ pop_ptr(rax); 1743 NOT_LP64(__ get_thread(thread)); 1744 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1745 // remove the activation (without doing throws on illegalMonitorExceptions) 1746 __ remove_activation(vtos, rdx, false, true, false); 1747 // restore exception 1748 NOT_LP64(__ get_thread(thread)); 1749 __ get_vm_result(rax, thread); 1750 1751 // In between activations - previous activation type unknown yet 1752 // compute continuation point - the continuation point expects the 1753 // following registers set up: 1754 // 1755 // rax: exception 1756 // rdx: return address/pc that threw exception 1757 // rsp: expression stack of caller 1758 // rbp: ebp of caller 1759 __ push(rax); // save exception 1760 __ push(rdx); // save return address 1761 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1762 SharedRuntime::exception_handler_for_return_address), 1763 thread, rdx); 1764 __ mov(rbx, rax); // save exception handler 1765 __ pop(rdx); // restore return address 1766 __ pop(rax); // restore exception 1767 // Note that an "issuing PC" is actually the next PC after the call 1768 __ jmp(rbx); // jump to exception 1769 // handler of caller 1770 } 1771 1772 1773 // 1774 // JVMTI ForceEarlyReturn support 1775 // 1776 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1777 address entry = __ pc(); 1778 1779 __ restore_bcp(); 1780 __ restore_locals(); 1781 __ empty_expression_stack(); 1782 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1783 1784 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1785 NOT_LP64(__ get_thread(thread)); 1786 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1787 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1788 1789 // Clear the earlyret state 1790 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1791 1792 __ remove_activation(state, rsi, 1793 false, /* throw_monitor_exception */ 1794 false, /* install_monitor_exception */ 1795 true); /* notify_jvmdi */ 1796 __ jmp(rsi); 1797 1798 return entry; 1799 } // end of ForceEarlyReturn support 1800 1801 1802 //----------------------------------------------------------------------------- 1803 // Helper for vtos entry point generation 1804 1805 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1806 address& bep, 1807 address& cep, 1808 address& sep, 1809 address& aep, 1810 address& iep, 1811 address& lep, 1812 address& fep, 1813 address& dep, 1814 address& vep) { 1815 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1816 Label L; 1817 #ifndef _LP64 1818 fep = __ pc(); // ftos entry point 1819 __ push(ftos); 1820 __ jmpb(L); 1821 dep = __ pc(); // dtos entry point 1822 __ push(dtos); 1823 __ jmpb(L); 1824 #else 1825 fep = __ pc(); // ftos entry point 1826 __ push_f(xmm0); 1827 __ jmpb(L); 1828 dep = __ pc(); // dtos entry point 1829 __ push_d(xmm0); 1830 __ jmpb(L); 1831 #endif // _LP64 1832 lep = __ pc(); // ltos entry point 1833 __ push_l(); 1834 __ jmpb(L); 1835 aep = bep = cep = sep = iep = __ pc(); // [abcsi]tos entry point 1836 __ push_i_or_ptr(); 1837 vep = __ pc(); // vtos entry point 1838 __ bind(L); 1839 generate_and_dispatch(t); 1840 } 1841 1842 //----------------------------------------------------------------------------- 1843 1844 void TemplateInterpreterGenerator::count_bytecode() { 1845 #ifdef _LP64 1846 __ incrementq(ExternalAddress((address) &BytecodeCounter::_counter_value), rscratch1); 1847 #else 1848 Unimplemented(); 1849 #endif 1850 } 1851 1852 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1853 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]), rscratch1); 1854 } 1855 1856 // Non-product code 1857 #ifndef PRODUCT 1858 1859 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1860 address entry = __ pc(); 1861 1862 #ifndef _LP64 1863 // prepare expression stack 1864 __ pop(rcx); // pop return address so expression stack is 'pure' 1865 __ push(state); // save tosca 1866 1867 // pass tosca registers as arguments & call tracer 1868 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1869 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1870 __ pop(state); // restore tosca 1871 1872 // return 1873 __ jmp(rcx); 1874 #else 1875 __ push(state); 1876 __ push(c_rarg0); 1877 __ push(c_rarg1); 1878 __ push(c_rarg2); 1879 __ push(c_rarg3); 1880 __ mov(c_rarg2, rax); // Pass itos 1881 #ifdef _WIN64 1882 __ movflt(xmm3, xmm0); // Pass ftos 1883 #endif 1884 __ call_VM(noreg, 1885 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1886 c_rarg1, c_rarg2, c_rarg3); 1887 __ pop(c_rarg3); 1888 __ pop(c_rarg2); 1889 __ pop(c_rarg1); 1890 __ pop(c_rarg0); 1891 __ pop(state); 1892 __ ret(0); // return from result handler 1893 #endif // _LP64 1894 1895 return entry; 1896 } 1897 1898 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1899 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1900 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1901 __ orl(rbx, 1902 ((int) t->bytecode()) << 1903 BytecodePairHistogram::log2_number_of_codes); 1904 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx, rscratch1); 1905 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1906 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1907 } 1908 1909 1910 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1911 // Call a little run-time stub to avoid blow-up for each bytecode. 1912 // The run-time runtime saves the right registers, depending on 1913 // the tosca in-state for the given template. 1914 1915 assert(Interpreter::trace_code(t->tos_in()) != nullptr, 1916 "entry must have been generated"); 1917 #ifndef _LP64 1918 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1919 #else 1920 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1921 __ andptr(rsp, -16); // align stack as required by ABI 1922 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1923 __ mov(rsp, r12); // restore sp 1924 __ reinit_heapbase(); 1925 #endif // _LP64 1926 } 1927 1928 1929 void TemplateInterpreterGenerator::stop_interpreter_at() { 1930 Label L; 1931 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1932 StopInterpreterAt, 1933 rscratch1); 1934 __ jcc(Assembler::notEqual, L); 1935 __ int3(); 1936 __ bind(L); 1937 } 1938 #endif // !PRODUCT