1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/method.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 47 #include "utilities/macros.hpp" 48 49 #define __ _masm-> 50 51 #ifndef CC_INTERP 52 53 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 54 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; 55 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 56 57 //----------------------------------------------------------------------------- 58 59 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 60 address entry = __ pc(); 61 62 #ifdef ASSERT 63 { 64 Label L; 65 __ lea(rax, Address(rbp, 66 frame::interpreter_frame_monitor_block_top_offset * 67 wordSize)); 68 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 69 // grows negative) 70 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 71 __ stop ("interpreter frame not set up"); 72 __ bind(L); 73 } 74 #endif // ASSERT 75 // Restore bcp under the assumption that the current frame is still 76 // interpreted 77 __ restore_bcp(); 78 79 // expression stack must be empty before entering the VM if an 80 // exception happened 81 __ empty_expression_stack(); 82 // throw exception 83 __ call_VM(noreg, 84 CAST_FROM_FN_PTR(address, 85 InterpreterRuntime::throw_StackOverflowError)); 86 return entry; 87 } 88 89 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 90 const char* name) { 91 address entry = __ pc(); 92 // expression stack must be empty before entering the VM if an 93 // exception happened 94 __ empty_expression_stack(); 95 // setup parameters 96 // ??? convention: expect aberrant index in register ebx 97 __ lea(c_rarg1, ExternalAddress((address)name)); 98 __ call_VM(noreg, 99 CAST_FROM_FN_PTR(address, 100 InterpreterRuntime:: 101 throw_ArrayIndexOutOfBoundsException), 102 c_rarg1, rbx); 103 return entry; 104 } 105 106 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 107 address entry = __ pc(); 108 109 // object is at TOS 110 __ pop(c_rarg1); 111 112 // expression stack must be empty before entering the VM if an 113 // exception happened 114 __ empty_expression_stack(); 115 116 __ call_VM(noreg, 117 CAST_FROM_FN_PTR(address, 118 InterpreterRuntime:: 119 throw_ClassCastException), 120 c_rarg1); 121 return entry; 122 } 123 124 address TemplateInterpreterGenerator::generate_exception_handler_common( 125 const char* name, const char* message, bool pass_oop) { 126 assert(!pass_oop || message == NULL, "either oop or message but not both"); 127 address entry = __ pc(); 128 if (pass_oop) { 129 // object is at TOS 130 __ pop(c_rarg2); 131 } 132 // expression stack must be empty before entering the VM if an 133 // exception happened 134 __ empty_expression_stack(); 135 // setup parameters 136 __ lea(c_rarg1, ExternalAddress((address)name)); 137 if (pass_oop) { 138 __ call_VM(rax, CAST_FROM_FN_PTR(address, 139 InterpreterRuntime:: 140 create_klass_exception), 141 c_rarg1, c_rarg2); 142 } else { 143 // kind of lame ExternalAddress can't take NULL because 144 // external_word_Relocation will assert. 145 if (message != NULL) { 146 __ lea(c_rarg2, ExternalAddress((address)message)); 147 } else { 148 __ movptr(c_rarg2, NULL_WORD); 149 } 150 __ call_VM(rax, 151 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 152 c_rarg1, c_rarg2); 153 } 154 // throw exception 155 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 156 return entry; 157 } 158 159 160 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 161 address entry = __ pc(); 162 // NULL last_sp until next java call 163 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 164 __ dispatch_next(state); 165 return entry; 166 } 167 168 169 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 170 address entry = __ pc(); 171 172 // Restore stack bottom in case i2c adjusted stack 173 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 174 // and NULL it as marker that esp is now tos until next java call 175 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 176 177 __ restore_bcp(); 178 __ restore_locals(); 179 180 if (state == atos) { 181 Register mdp = rbx; 182 Register tmp = rcx; 183 __ profile_return_type(mdp, rax, tmp); 184 } 185 186 const Register cache = rbx; 187 const Register index = rcx; 188 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 189 190 const Register flags = cache; 191 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 192 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 193 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 194 __ dispatch_next(state, step); 195 196 return entry; 197 } 198 199 200 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 201 int step) { 202 address entry = __ pc(); 203 // NULL last_sp until next java call 204 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 205 __ restore_bcp(); 206 __ restore_locals(); 207 // handle exceptions 208 { 209 Label L; 210 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 211 __ jcc(Assembler::zero, L); 212 __ call_VM(noreg, 213 CAST_FROM_FN_PTR(address, 214 InterpreterRuntime::throw_pending_exception)); 215 __ should_not_reach_here(); 216 __ bind(L); 217 } 218 __ dispatch_next(state, step); 219 return entry; 220 } 221 222 int AbstractInterpreter::BasicType_as_index(BasicType type) { 223 int i = 0; 224 switch (type) { 225 case T_BOOLEAN: i = 0; break; 226 case T_CHAR : i = 1; break; 227 case T_BYTE : i = 2; break; 228 case T_SHORT : i = 3; break; 229 case T_INT : i = 4; break; 230 case T_LONG : i = 5; break; 231 case T_VOID : i = 6; break; 232 case T_FLOAT : i = 7; break; 233 case T_DOUBLE : i = 8; break; 234 case T_OBJECT : i = 9; break; 235 case T_ARRAY : i = 9; break; 236 default : ShouldNotReachHere(); 237 } 238 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, 239 "index out of bounds"); 240 return i; 241 } 242 243 244 address TemplateInterpreterGenerator::generate_result_handler_for( 245 BasicType type) { 246 address entry = __ pc(); 247 switch (type) { 248 case T_BOOLEAN: __ c2bool(rax); break; 249 case T_CHAR : __ movzwl(rax, rax); break; 250 case T_BYTE : __ sign_extend_byte(rax); break; 251 case T_SHORT : __ sign_extend_short(rax); break; 252 case T_INT : /* nothing to do */ break; 253 case T_LONG : /* nothing to do */ break; 254 case T_VOID : /* nothing to do */ break; 255 case T_FLOAT : /* nothing to do */ break; 256 case T_DOUBLE : /* nothing to do */ break; 257 case T_OBJECT : 258 // retrieve result from frame 259 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 260 // and verify it 261 __ verify_oop(rax); 262 break; 263 default : ShouldNotReachHere(); 264 } 265 __ ret(0); // return from result handler 266 return entry; 267 } 268 269 address TemplateInterpreterGenerator::generate_safept_entry_for( 270 TosState state, 271 address runtime_entry) { 272 address entry = __ pc(); 273 __ push(state); 274 __ call_VM(noreg, runtime_entry); 275 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 276 return entry; 277 } 278 279 280 281 // Helpers for commoning out cases in the various type of method entries. 282 // 283 284 285 // increment invocation count & check for overflow 286 // 287 // Note: checking for negative value instead of overflow 288 // so we have a 'sticky' overflow test 289 // 290 // rbx: method 291 // ecx: invocation counter 292 // 293 void InterpreterGenerator::generate_counter_incr( 294 Label* overflow, 295 Label* profile_method, 296 Label* profile_method_continue) { 297 Label done; 298 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 299 if (TieredCompilation) { 300 int increment = InvocationCounter::count_increment; 301 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 302 Label no_mdo; 303 if (ProfileInterpreter) { 304 // Are we profiling? 305 __ movptr(rax, Address(rbx, Method::method_data_offset())); 306 __ testptr(rax, rax); 307 __ jccb(Assembler::zero, no_mdo); 308 // Increment counter in the MDO 309 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 310 in_bytes(InvocationCounter::counter_offset())); 311 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 312 __ jmp(done); 313 } 314 __ bind(no_mdo); 315 // Increment counter in MethodCounters 316 const Address invocation_counter(rax, 317 MethodCounters::invocation_counter_offset() + 318 InvocationCounter::counter_offset()); 319 __ get_method_counters(rbx, rax, done); 320 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 321 false, Assembler::zero, overflow); 322 __ bind(done); 323 } else { 324 const Address backedge_counter(rax, 325 MethodCounters::backedge_counter_offset() + 326 InvocationCounter::counter_offset()); 327 const Address invocation_counter(rax, 328 MethodCounters::invocation_counter_offset() + 329 InvocationCounter::counter_offset()); 330 331 __ get_method_counters(rbx, rax, done); 332 333 if (ProfileInterpreter) { 334 __ incrementl(Address(rax, 335 MethodCounters::interpreter_invocation_counter_offset())); 336 } 337 // Update standard invocation counters 338 __ movl(rcx, invocation_counter); 339 __ incrementl(rcx, InvocationCounter::count_increment); 340 __ movl(invocation_counter, rcx); // save invocation count 341 342 __ movl(rax, backedge_counter); // load backedge counter 343 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 344 345 __ addl(rcx, rax); // add both counters 346 347 // profile_method is non-null only for interpreted method so 348 // profile_method != NULL == !native_call 349 350 if (ProfileInterpreter && profile_method != NULL) { 351 // Test to see if we should create a method data oop 352 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 353 __ jcc(Assembler::less, *profile_method_continue); 354 355 // if no method data exists, go to profile_method 356 __ test_method_data_pointer(rax, *profile_method); 357 } 358 359 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 360 __ jcc(Assembler::aboveEqual, *overflow); 361 __ bind(done); 362 } 363 } 364 365 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 366 367 // Asm interpreter on entry 368 // r14 - locals 369 // r13 - bcp 370 // rbx - method 371 // edx - cpool --- DOES NOT APPEAR TO BE TRUE 372 // rbp - interpreter frame 373 374 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 375 // Everything as it was on entry 376 // rdx is not restored. Doesn't appear to really be set. 377 378 // InterpreterRuntime::frequency_counter_overflow takes two 379 // arguments, the first (thread) is passed by call_VM, the second 380 // indicates if the counter overflow occurs at a backwards branch 381 // (NULL bcp). We pass zero for it. The call returns the address 382 // of the verified entry point for the method or NULL if the 383 // compilation did not complete (either went background or bailed 384 // out). 385 __ movl(c_rarg1, 0); 386 __ call_VM(noreg, 387 CAST_FROM_FN_PTR(address, 388 InterpreterRuntime::frequency_counter_overflow), 389 c_rarg1); 390 391 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 392 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 393 // and jump to the interpreted entry. 394 __ jmp(*do_continue, relocInfo::none); 395 } 396 397 // See if we've got enough room on the stack for locals plus overhead. 398 // The expression stack grows down incrementally, so the normal guard 399 // page mechanism will work for that. 400 // 401 // NOTE: Since the additional locals are also always pushed (wasn't 402 // obvious in generate_method_entry) so the guard should work for them 403 // too. 404 // 405 // Args: 406 // rdx: number of additional locals this frame needs (what we must check) 407 // rbx: Method* 408 // 409 // Kills: 410 // rax 411 void InterpreterGenerator::generate_stack_overflow_check(void) { 412 413 // monitor entry size: see picture of stack set 414 // (generate_method_entry) and frame_amd64.hpp 415 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 416 417 // total overhead size: entry_size + (saved rbp through expr stack 418 // bottom). be sure to change this if you add/subtract anything 419 // to/from the overhead area 420 const int overhead_size = 421 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 422 423 const int page_size = os::vm_page_size(); 424 425 Label after_frame_check; 426 427 // see if the frame is greater than one page in size. If so, 428 // then we need to verify there is enough stack space remaining 429 // for the additional locals. 430 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 431 __ jcc(Assembler::belowEqual, after_frame_check); 432 433 // compute rsp as if this were going to be the last frame on 434 // the stack before the red zone 435 436 const Address stack_base(r15_thread, Thread::stack_base_offset()); 437 const Address stack_size(r15_thread, Thread::stack_size_offset()); 438 439 // locals + overhead, in bytes 440 __ mov(rax, rdx); 441 __ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter. 442 __ addptr(rax, overhead_size); 443 444 #ifdef ASSERT 445 Label stack_base_okay, stack_size_okay; 446 // verify that thread stack base is non-zero 447 __ cmpptr(stack_base, (int32_t)NULL_WORD); 448 __ jcc(Assembler::notEqual, stack_base_okay); 449 __ stop("stack base is zero"); 450 __ bind(stack_base_okay); 451 // verify that thread stack size is non-zero 452 __ cmpptr(stack_size, 0); 453 __ jcc(Assembler::notEqual, stack_size_okay); 454 __ stop("stack size is zero"); 455 __ bind(stack_size_okay); 456 #endif 457 458 // Add stack base to locals and subtract stack size 459 __ addptr(rax, stack_base); 460 __ subptr(rax, stack_size); 461 462 // Use the maximum number of pages we might bang. 463 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 464 (StackRedPages+StackYellowPages); 465 466 // add in the red and yellow zone sizes 467 __ addptr(rax, max_pages * page_size); 468 469 // check against the current stack bottom 470 __ cmpptr(rsp, rax); 471 __ jcc(Assembler::above, after_frame_check); 472 473 // Restore sender's sp as SP. This is necessary if the sender's 474 // frame is an extended compiled frame (see gen_c2i_adapter()) 475 // and safer anyway in case of JSR292 adaptations. 476 477 __ pop(rax); // return address must be moved if SP is changed 478 __ mov(rsp, r13); 479 __ push(rax); 480 481 // Note: the restored frame is not necessarily interpreted. 482 // Use the shared runtime version of the StackOverflowError. 483 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 484 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 485 486 // all done with frame size check 487 __ bind(after_frame_check); 488 } 489 490 // Allocate monitor and lock method (asm interpreter) 491 // 492 // Args: 493 // rbx: Method* 494 // r14: locals 495 // 496 // Kills: 497 // rax 498 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 499 // rscratch1, rscratch2 (scratch regs) 500 void InterpreterGenerator::lock_method(void) { 501 // synchronize method 502 const Address access_flags(rbx, Method::access_flags_offset()); 503 const Address monitor_block_top( 504 rbp, 505 frame::interpreter_frame_monitor_block_top_offset * wordSize); 506 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 507 508 #ifdef ASSERT 509 { 510 Label L; 511 __ movl(rax, access_flags); 512 __ testl(rax, JVM_ACC_SYNCHRONIZED); 513 __ jcc(Assembler::notZero, L); 514 __ stop("method doesn't need synchronization"); 515 __ bind(L); 516 } 517 #endif // ASSERT 518 519 // get synchronization object 520 { 521 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 522 Label done; 523 __ movl(rax, access_flags); 524 __ testl(rax, JVM_ACC_STATIC); 525 // get receiver (assume this is frequent case) 526 __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0))); 527 __ jcc(Assembler::zero, done); 528 __ movptr(rax, Address(rbx, Method::const_offset())); 529 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); 530 __ movptr(rax, Address(rax, 531 ConstantPool::pool_holder_offset_in_bytes())); 532 __ movptr(rax, Address(rax, mirror_offset)); 533 534 #ifdef ASSERT 535 { 536 Label L; 537 __ testptr(rax, rax); 538 __ jcc(Assembler::notZero, L); 539 __ stop("synchronization object is NULL"); 540 __ bind(L); 541 } 542 #endif // ASSERT 543 544 __ bind(done); 545 } 546 547 // add space for monitor & lock 548 __ subptr(rsp, entry_size); // add space for a monitor entry 549 __ movptr(monitor_block_top, rsp); // set new monitor block top 550 // store object 551 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 552 __ movptr(c_rarg1, rsp); // object address 553 __ lock_object(c_rarg1); 554 } 555 556 // Generate a fixed interpreter frame. This is identical setup for 557 // interpreted methods and for native methods hence the shared code. 558 // 559 // Args: 560 // rax: return address 561 // rbx: Method* 562 // r14: pointer to locals 563 // r13: sender sp 564 // rdx: cp cache 565 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 566 // initialize fixed part of activation frame 567 __ push(rax); // save return address 568 __ enter(); // save old & set new rbp 569 __ push(r13); // set sender sp 570 __ push((int)NULL_WORD); // leave last_sp as null 571 __ movptr(r13, Address(rbx, Method::const_offset())); // get ConstMethod* 572 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase 573 __ push(rbx); // save Method* 574 if (ProfileInterpreter) { 575 Label method_data_continue; 576 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 577 __ testptr(rdx, rdx); 578 __ jcc(Assembler::zero, method_data_continue); 579 __ addptr(rdx, in_bytes(MethodData::data_offset())); 580 __ bind(method_data_continue); 581 __ push(rdx); // set the mdp (method data pointer) 582 } else { 583 __ push(0); 584 } 585 586 __ movptr(rdx, Address(rbx, Method::const_offset())); 587 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 588 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 589 __ push(rdx); // set constant pool cache 590 __ push(r14); // set locals pointer 591 if (native_call) { 592 __ push(0); // no bcp 593 } else { 594 __ push(r13); // set bcp 595 } 596 __ push(0); // reserve word for pointer to expression stack bottom 597 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 598 } 599 600 // End of helpers 601 602 // Various method entries 603 //------------------------------------------------------------------------------------------------------------------------ 604 // 605 // 606 607 // Call an accessor method (assuming it is resolved, otherwise drop 608 // into vanilla (slow path) entry 609 address InterpreterGenerator::generate_accessor_entry(void) { 610 // rbx: Method* 611 612 // r13: senderSP must preserver for slow path, set SP to it on fast path 613 614 address entry_point = __ pc(); 615 Label xreturn_path; 616 617 // do fastpath for resolved accessor methods 618 if (UseFastAccessorMethods) { 619 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites 620 // thereof; parameter size = 1 621 // Note: We can only use this code if the getfield has been resolved 622 // and if we don't have a null-pointer exception => check for 623 // these conditions first and use slow path if necessary. 624 Label slow_path; 625 // If we need a safepoint check, generate full interpreter entry. 626 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 627 SafepointSynchronize::_not_synchronized); 628 629 __ jcc(Assembler::notEqual, slow_path); 630 // rbx: method 631 __ movptr(rax, Address(rsp, wordSize)); 632 633 // check if local 0 != NULL and read field 634 __ testptr(rax, rax); 635 __ jcc(Assembler::zero, slow_path); 636 637 // read first instruction word and extract bytecode @ 1 and index @ 2 638 __ movptr(rdx, Address(rbx, Method::const_offset())); 639 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset())); 640 __ movl(rdx, Address(rdx, ConstMethod::codes_offset())); 641 // Shift codes right to get the index on the right. 642 // The bytecode fetched looks like <index><0xb4><0x2a> 643 __ shrl(rdx, 2 * BitsPerByte); 644 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); 645 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes())); 646 647 // rax: local 0 648 // rbx: method 649 // rdx: constant pool cache index 650 // rdi: constant pool cache 651 652 // check if getfield has been resolved and read constant pool cache entry 653 // check the validity of the cache entry by testing whether _indices field 654 // contains Bytecode::_getfield in b1 byte. 655 assert(in_words(ConstantPoolCacheEntry::size()) == 4, 656 "adjust shift below"); 657 __ movl(rcx, 658 Address(rdi, 659 rdx, 660 Address::times_8, 661 ConstantPoolCache::base_offset() + 662 ConstantPoolCacheEntry::indices_offset())); 663 __ shrl(rcx, 2 * BitsPerByte); 664 __ andl(rcx, 0xFF); 665 __ cmpl(rcx, Bytecodes::_getfield); 666 __ jcc(Assembler::notEqual, slow_path); 667 668 // Note: constant pool entry is not valid before bytecode is resolved 669 __ movptr(rcx, 670 Address(rdi, 671 rdx, 672 Address::times_8, 673 ConstantPoolCache::base_offset() + 674 ConstantPoolCacheEntry::f2_offset())); 675 // edx: flags 676 __ movl(rdx, 677 Address(rdi, 678 rdx, 679 Address::times_8, 680 ConstantPoolCache::base_offset() + 681 ConstantPoolCacheEntry::flags_offset())); 682 683 Label notObj, notInt, notByte, notBool, notShort; 684 const Address field_address(rax, rcx, Address::times_1); 685 686 // Need to differentiate between igetfield, agetfield, bgetfield etc. 687 // because they are different sizes. 688 // Use the type from the constant pool cache 689 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); 690 // Make sure we don't need to mask edx after the above shift 691 ConstantPoolCacheEntry::verify_tos_state_shift(); 692 693 __ cmpl(rdx, atos); 694 __ jcc(Assembler::notEqual, notObj); 695 // atos 696 __ load_heap_oop(rax, field_address); 697 __ jmp(xreturn_path); 698 699 __ bind(notObj); 700 __ cmpl(rdx, itos); 701 __ jcc(Assembler::notEqual, notInt); 702 // itos 703 __ movl(rax, field_address); 704 __ jmp(xreturn_path); 705 706 __ bind(notInt); 707 __ cmpl(rdx, btos); 708 __ jcc(Assembler::notEqual, notByte); 709 // btos 710 __ load_signed_byte(rax, field_address); 711 __ jmp(xreturn_path); 712 713 __ bind(notByte); 714 __ cmpl(rdx, ztos); 715 __ jcc(Assembler::notEqual, notBool); 716 // ztos 717 __ load_signed_byte(rax, field_address); 718 __ jmp(xreturn_path); 719 720 __ bind(notBool); 721 __ cmpl(rdx, stos); 722 __ jcc(Assembler::notEqual, notShort); 723 // stos 724 __ load_signed_short(rax, field_address); 725 __ jmp(xreturn_path); 726 727 __ bind(notShort); 728 #ifdef ASSERT 729 Label okay; 730 __ cmpl(rdx, ctos); 731 __ jcc(Assembler::equal, okay); 732 __ stop("what type is this?"); 733 __ bind(okay); 734 #endif 735 // ctos 736 __ load_unsigned_short(rax, field_address); 737 738 __ bind(xreturn_path); 739 740 // _ireturn/_areturn 741 __ pop(rdi); 742 __ mov(rsp, r13); 743 __ jmp(rdi); 744 __ ret(0); 745 746 // generate a vanilla interpreter entry as the slow path 747 __ bind(slow_path); 748 (void) generate_normal_entry(false); 749 } else { 750 (void) generate_normal_entry(false); 751 } 752 753 return entry_point; 754 } 755 756 // Method entry for java.lang.ref.Reference.get. 757 address InterpreterGenerator::generate_Reference_get_entry(void) { 758 #if INCLUDE_ALL_GCS 759 // Code: _aload_0, _getfield, _areturn 760 // parameter size = 1 761 // 762 // The code that gets generated by this routine is split into 2 parts: 763 // 1. The "intrinsified" code for G1 (or any SATB based GC), 764 // 2. The slow path - which is an expansion of the regular method entry. 765 // 766 // Notes:- 767 // * In the G1 code we do not check whether we need to block for 768 // a safepoint. If G1 is enabled then we must execute the specialized 769 // code for Reference.get (except when the Reference object is null) 770 // so that we can log the value in the referent field with an SATB 771 // update buffer. 772 // If the code for the getfield template is modified so that the 773 // G1 pre-barrier code is executed when the current method is 774 // Reference.get() then going through the normal method entry 775 // will be fine. 776 // * The G1 code can, however, check the receiver object (the instance 777 // of java.lang.Reference) and jump to the slow path if null. If the 778 // Reference object is null then we obviously cannot fetch the referent 779 // and so we don't need to call the G1 pre-barrier. Thus we can use the 780 // regular method entry code to generate the NPE. 781 // 782 // This code is based on generate_accessor_enty. 783 // 784 // rbx: Method* 785 786 // r13: senderSP must preserve for slow path, set SP to it on fast path 787 788 address entry = __ pc(); 789 790 const int referent_offset = java_lang_ref_Reference::referent_offset; 791 guarantee(referent_offset > 0, "referent offset not initialized"); 792 793 if (UseG1GC) { 794 Label slow_path; 795 // rbx: method 796 797 // Check if local 0 != NULL 798 // If the receiver is null then it is OK to jump to the slow path. 799 __ movptr(rax, Address(rsp, wordSize)); 800 801 __ testptr(rax, rax); 802 __ jcc(Assembler::zero, slow_path); 803 804 // rax: local 0 805 // rbx: method (but can be used as scratch now) 806 // rdx: scratch 807 // rdi: scratch 808 809 // Generate the G1 pre-barrier code to log the value of 810 // the referent field in an SATB buffer. 811 812 // Load the value of the referent field. 813 const Address field_address(rax, referent_offset); 814 __ load_heap_oop(rax, field_address); 815 816 // Generate the G1 pre-barrier code to log the value of 817 // the referent field in an SATB buffer. 818 __ g1_write_barrier_pre(noreg /* obj */, 819 rax /* pre_val */, 820 r15_thread /* thread */, 821 rbx /* tmp */, 822 true /* tosca_live */, 823 true /* expand_call */); 824 825 // _areturn 826 __ pop(rdi); // get return address 827 __ mov(rsp, r13); // set sp to sender sp 828 __ jmp(rdi); 829 __ ret(0); 830 831 // generate a vanilla interpreter entry as the slow path 832 __ bind(slow_path); 833 (void) generate_normal_entry(false); 834 835 return entry; 836 } 837 #endif // INCLUDE_ALL_GCS 838 839 // If G1 is not enabled then attempt to go through the accessor entry point 840 // Reference.get is an accessor 841 return generate_accessor_entry(); 842 } 843 844 /** 845 * Method entry for static native methods: 846 * int java.util.zip.CRC32.update(int crc, int b) 847 */ 848 address InterpreterGenerator::generate_CRC32_update_entry() { 849 if (UseCRC32Intrinsics) { 850 address entry = __ pc(); 851 852 // rbx,: Method* 853 // r13: senderSP must preserved for slow path, set SP to it on fast path 854 // c_rarg0: scratch (rdi on non-Win64, rcx on Win64) 855 // c_rarg1: scratch (rsi on non-Win64, rdx on Win64) 856 857 Label slow_path; 858 // If we need a safepoint check, generate full interpreter entry. 859 ExternalAddress state(SafepointSynchronize::address_of_state()); 860 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 861 SafepointSynchronize::_not_synchronized); 862 __ jcc(Assembler::notEqual, slow_path); 863 864 // We don't generate local frame and don't align stack because 865 // we call stub code and there is no safepoint on this path. 866 867 // Load parameters 868 const Register crc = rax; // crc 869 const Register val = c_rarg0; // source java byte value 870 const Register tbl = c_rarg1; // scratch 871 872 // Arguments are reversed on java expression stack 873 __ movl(val, Address(rsp, wordSize)); // byte value 874 __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC 875 876 __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); 877 __ notl(crc); // ~crc 878 __ update_byte_crc32(crc, val, tbl); 879 __ notl(crc); // ~crc 880 // result in rax 881 882 // _areturn 883 __ pop(rdi); // get return address 884 __ mov(rsp, r13); // set sp to sender sp 885 __ jmp(rdi); 886 887 // generate a vanilla native entry as the slow path 888 __ bind(slow_path); 889 890 (void) generate_native_entry(false); 891 892 return entry; 893 } 894 return generate_native_entry(false); 895 } 896 897 /** 898 * Method entry for static native methods: 899 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 900 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 901 */ 902 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 903 if (UseCRC32Intrinsics) { 904 address entry = __ pc(); 905 906 // rbx,: Method* 907 // r13: senderSP must preserved for slow path, set SP to it on fast path 908 909 Label slow_path; 910 // If we need a safepoint check, generate full interpreter entry. 911 ExternalAddress state(SafepointSynchronize::address_of_state()); 912 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 913 SafepointSynchronize::_not_synchronized); 914 __ jcc(Assembler::notEqual, slow_path); 915 916 // We don't generate local frame and don't align stack because 917 // we call stub code and there is no safepoint on this path. 918 919 // Load parameters 920 const Register crc = c_rarg0; // crc 921 const Register buf = c_rarg1; // source java byte array address 922 const Register len = c_rarg2; // length 923 const Register off = len; // offset (never overlaps with 'len') 924 925 // Arguments are reversed on java expression stack 926 // Calculate address of start element 927 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 928 __ movptr(buf, Address(rsp, 3*wordSize)); // long buf 929 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset 930 __ addq(buf, off); // + offset 931 __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC 932 } else { 933 __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array 934 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 935 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset 936 __ addq(buf, off); // + offset 937 __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC 938 } 939 // Can now load 'len' since we're finished with 'off' 940 __ movl(len, Address(rsp, wordSize)); // Length 941 942 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); 943 // result in rax 944 945 // _areturn 946 __ pop(rdi); // get return address 947 __ mov(rsp, r13); // set sp to sender sp 948 __ jmp(rdi); 949 950 // generate a vanilla native entry as the slow path 951 __ bind(slow_path); 952 953 (void) generate_native_entry(false); 954 955 return entry; 956 } 957 return generate_native_entry(false); 958 } 959 960 // Interpreter stub for calling a native method. (asm interpreter) 961 // This sets up a somewhat different looking stack for calling the 962 // native method than the typical interpreter frame setup. 963 address InterpreterGenerator::generate_native_entry(bool synchronized) { 964 // determine code generation flags 965 bool inc_counter = UseCompiler || CountCompiledCalls; 966 967 // rbx: Method* 968 // r13: sender sp 969 970 address entry_point = __ pc(); 971 972 const Address constMethod (rbx, Method::const_offset()); 973 const Address access_flags (rbx, Method::access_flags_offset()); 974 const Address size_of_parameters(rcx, ConstMethod:: 975 size_of_parameters_offset()); 976 977 978 // get parameter size (always needed) 979 __ movptr(rcx, constMethod); 980 __ load_unsigned_short(rcx, size_of_parameters); 981 982 // native calls don't need the stack size check since they have no 983 // expression stack and the arguments are already on the stack and 984 // we only add a handful of words to the stack 985 986 // rbx: Method* 987 // rcx: size of parameters 988 // r13: sender sp 989 __ pop(rax); // get return address 990 991 // for natives the size of locals is zero 992 993 // compute beginning of parameters (r14) 994 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); 995 996 // add 2 zero-initialized slots for native calls 997 // initialize result_handler slot 998 __ push((int) NULL_WORD); 999 // slot for oop temp 1000 // (static native method holder mirror/jni oop result) 1001 __ push((int) NULL_WORD); 1002 1003 // initialize fixed part of activation frame 1004 generate_fixed_frame(true); 1005 1006 // make sure method is native & not abstract 1007 #ifdef ASSERT 1008 __ movl(rax, access_flags); 1009 { 1010 Label L; 1011 __ testl(rax, JVM_ACC_NATIVE); 1012 __ jcc(Assembler::notZero, L); 1013 __ stop("tried to execute non-native method as native"); 1014 __ bind(L); 1015 } 1016 { 1017 Label L; 1018 __ testl(rax, JVM_ACC_ABSTRACT); 1019 __ jcc(Assembler::zero, L); 1020 __ stop("tried to execute abstract method in interpreter"); 1021 __ bind(L); 1022 } 1023 #endif 1024 1025 // Since at this point in the method invocation the exception handler 1026 // would try to exit the monitor of synchronized methods which hasn't 1027 // been entered yet, we set the thread local variable 1028 // _do_not_unlock_if_synchronized to true. The remove_activation will 1029 // check this flag. 1030 1031 const Address do_not_unlock_if_synchronized(r15_thread, 1032 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1033 __ movbool(do_not_unlock_if_synchronized, true); 1034 1035 // increment invocation count & check for overflow 1036 Label invocation_counter_overflow; 1037 if (inc_counter) { 1038 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1039 } 1040 1041 Label continue_after_compile; 1042 __ bind(continue_after_compile); 1043 1044 bang_stack_shadow_pages(true); 1045 1046 // reset the _do_not_unlock_if_synchronized flag 1047 __ movbool(do_not_unlock_if_synchronized, false); 1048 1049 // check for synchronized methods 1050 // Must happen AFTER invocation_counter check and stack overflow check, 1051 // so method is not locked if overflows. 1052 if (synchronized) { 1053 lock_method(); 1054 } else { 1055 // no synchronization necessary 1056 #ifdef ASSERT 1057 { 1058 Label L; 1059 __ movl(rax, access_flags); 1060 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1061 __ jcc(Assembler::zero, L); 1062 __ stop("method needs synchronization"); 1063 __ bind(L); 1064 } 1065 #endif 1066 } 1067 1068 // start execution 1069 #ifdef ASSERT 1070 { 1071 Label L; 1072 const Address monitor_block_top(rbp, 1073 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1074 __ movptr(rax, monitor_block_top); 1075 __ cmpptr(rax, rsp); 1076 __ jcc(Assembler::equal, L); 1077 __ stop("broken stack frame setup in interpreter"); 1078 __ bind(L); 1079 } 1080 #endif 1081 1082 // jvmti support 1083 __ notify_method_entry(); 1084 1085 // work registers 1086 const Register method = rbx; 1087 const Register t = r11; 1088 1089 // allocate space for parameters 1090 __ get_method(method); 1091 __ movptr(t, Address(method, Method::const_offset())); 1092 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1093 __ shll(t, Interpreter::logStackElementSize); 1094 1095 __ subptr(rsp, t); 1096 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1097 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 1098 1099 // get signature handler 1100 { 1101 Label L; 1102 __ movptr(t, Address(method, Method::signature_handler_offset())); 1103 __ testptr(t, t); 1104 __ jcc(Assembler::notZero, L); 1105 __ call_VM(noreg, 1106 CAST_FROM_FN_PTR(address, 1107 InterpreterRuntime::prepare_native_call), 1108 method); 1109 __ get_method(method); 1110 __ movptr(t, Address(method, Method::signature_handler_offset())); 1111 __ bind(L); 1112 } 1113 1114 // call signature handler 1115 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14, 1116 "adjust this code"); 1117 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 1118 "adjust this code"); 1119 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, 1120 "adjust this code"); 1121 1122 // The generated handlers do not touch RBX (the method oop). 1123 // However, large signatures cannot be cached and are generated 1124 // each time here. The slow-path generator can do a GC on return, 1125 // so we must reload it after the call. 1126 __ call(t); 1127 __ get_method(method); // slow path can do a GC, reload RBX 1128 1129 1130 // result handler is in rax 1131 // set result handler 1132 __ movptr(Address(rbp, 1133 (frame::interpreter_frame_result_handler_offset) * wordSize), 1134 rax); 1135 1136 // pass mirror handle if static call 1137 { 1138 Label L; 1139 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 1140 __ movl(t, Address(method, Method::access_flags_offset())); 1141 __ testl(t, JVM_ACC_STATIC); 1142 __ jcc(Assembler::zero, L); 1143 // get mirror 1144 __ movptr(t, Address(method, Method::const_offset())); 1145 __ movptr(t, Address(t, ConstMethod::constants_offset())); 1146 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); 1147 __ movptr(t, Address(t, mirror_offset)); 1148 // copy mirror into activation frame 1149 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1150 t); 1151 // pass handle to mirror 1152 __ lea(c_rarg1, 1153 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1154 __ bind(L); 1155 } 1156 1157 // get native function entry point 1158 { 1159 Label L; 1160 __ movptr(rax, Address(method, Method::native_function_offset())); 1161 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1162 __ movptr(rscratch2, unsatisfied.addr()); 1163 __ cmpptr(rax, rscratch2); 1164 __ jcc(Assembler::notEqual, L); 1165 __ call_VM(noreg, 1166 CAST_FROM_FN_PTR(address, 1167 InterpreterRuntime::prepare_native_call), 1168 method); 1169 __ get_method(method); 1170 __ movptr(rax, Address(method, Method::native_function_offset())); 1171 __ bind(L); 1172 } 1173 1174 // pass JNIEnv 1175 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1176 1177 // It is enough that the pc() points into the right code 1178 // segment. It does not have to be the correct return pc. 1179 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1180 1181 // change thread state 1182 #ifdef ASSERT 1183 { 1184 Label L; 1185 __ movl(t, Address(r15_thread, JavaThread::thread_state_offset())); 1186 __ cmpl(t, _thread_in_Java); 1187 __ jcc(Assembler::equal, L); 1188 __ stop("Wrong thread state in native stub"); 1189 __ bind(L); 1190 } 1191 #endif 1192 1193 // Change state to native 1194 1195 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), 1196 _thread_in_native); 1197 1198 // Call the native method. 1199 __ call(rax); 1200 // result potentially in rax or xmm0 1201 1202 // Verify or restore cpu control state after JNI call 1203 __ restore_cpu_control_state_after_jni(); 1204 1205 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1206 // in order to extract the result of a method call. If the order of these 1207 // pushes change or anything else is added to the stack then the code in 1208 // interpreter_frame_result must also change. 1209 1210 __ push(dtos); 1211 __ push(ltos); 1212 1213 // change thread state 1214 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), 1215 _thread_in_native_trans); 1216 1217 if (os::is_MP()) { 1218 if (UseMembar) { 1219 // Force this write out before the read below 1220 __ membar(Assembler::Membar_mask_bits( 1221 Assembler::LoadLoad | Assembler::LoadStore | 1222 Assembler::StoreLoad | Assembler::StoreStore)); 1223 } else { 1224 // Write serialization page so VM thread can do a pseudo remote membar. 1225 // We use the current thread pointer to calculate a thread specific 1226 // offset to write to within the page. This minimizes bus traffic 1227 // due to cache line collision. 1228 __ serialize_memory(r15_thread, rscratch2); 1229 } 1230 } 1231 1232 // check for safepoint operation in progress and/or pending suspend requests 1233 { 1234 Label Continue; 1235 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1236 SafepointSynchronize::_not_synchronized); 1237 1238 Label L; 1239 __ jcc(Assembler::notEqual, L); 1240 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0); 1241 __ jcc(Assembler::equal, Continue); 1242 __ bind(L); 1243 1244 // Don't use call_VM as it will see a possible pending exception 1245 // and forward it and never return here preventing us from 1246 // clearing _last_native_pc down below. Also can't use 1247 // call_VM_leaf either as it will check to see if r13 & r14 are 1248 // preserved and correspond to the bcp/locals pointers. So we do a 1249 // runtime call by hand. 1250 // 1251 __ mov(c_rarg0, r15_thread); 1252 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1253 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1254 __ andptr(rsp, -16); // align stack as required by ABI 1255 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1256 __ mov(rsp, r12); // restore sp 1257 __ reinit_heapbase(); 1258 __ bind(Continue); 1259 } 1260 1261 // change thread state 1262 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); 1263 1264 // reset_last_Java_frame 1265 __ reset_last_Java_frame(r15_thread, true); 1266 1267 // reset handle block 1268 __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset())); 1269 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1270 1271 // If result is an oop unbox and store it in frame where gc will see it 1272 // and result handler will pick it up 1273 1274 { 1275 Label no_oop; 1276 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1277 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1278 __ jcc(Assembler::notEqual, no_oop); 1279 // retrieve result 1280 __ pop(ltos); 1281 // Unbox oop result, e.g. JNIHandles::resolve value. 1282 __ resolve_jobject(rax /* value */, 1283 r15_thread /* thread */, 1284 t /* tmp */); 1285 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1286 // keep stack depth as expected by pushing oop which will eventually be discarde 1287 __ push(ltos); 1288 __ bind(no_oop); 1289 } 1290 1291 1292 { 1293 Label no_reguard; 1294 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), 1295 JavaThread::stack_guard_yellow_disabled); 1296 __ jcc(Assembler::notEqual, no_reguard); 1297 1298 __ pusha(); // XXX only save smashed registers 1299 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1300 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1301 __ andptr(rsp, -16); // align stack as required by ABI 1302 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1303 __ mov(rsp, r12); // restore sp 1304 __ popa(); // XXX only restore smashed registers 1305 __ reinit_heapbase(); 1306 1307 __ bind(no_reguard); 1308 } 1309 1310 1311 // The method register is junk from after the thread_in_native transition 1312 // until here. Also can't call_VM until the bcp has been 1313 // restored. Need bcp for throwing exception below so get it now. 1314 __ get_method(method); 1315 1316 // restore r13 to have legal interpreter frame, i.e., bci == 0 <=> 1317 // r13 == code_base() 1318 __ movptr(r13, Address(method, Method::const_offset())); // get ConstMethod* 1319 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase 1320 // handle exceptions (exception handling will handle unlocking!) 1321 { 1322 Label L; 1323 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1324 __ jcc(Assembler::zero, L); 1325 // Note: At some point we may want to unify this with the code 1326 // used in call_VM_base(); i.e., we should use the 1327 // StubRoutines::forward_exception code. For now this doesn't work 1328 // here because the rsp is not correctly set at this point. 1329 __ MacroAssembler::call_VM(noreg, 1330 CAST_FROM_FN_PTR(address, 1331 InterpreterRuntime::throw_pending_exception)); 1332 __ should_not_reach_here(); 1333 __ bind(L); 1334 } 1335 1336 // do unlocking if necessary 1337 { 1338 Label L; 1339 __ movl(t, Address(method, Method::access_flags_offset())); 1340 __ testl(t, JVM_ACC_SYNCHRONIZED); 1341 __ jcc(Assembler::zero, L); 1342 // the code below should be shared with interpreter macro 1343 // assembler implementation 1344 { 1345 Label unlock; 1346 // BasicObjectLock will be first in list, since this is a 1347 // synchronized method. However, need to check that the object 1348 // has not been unlocked by an explicit monitorexit bytecode. 1349 const Address monitor(rbp, 1350 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1351 wordSize - sizeof(BasicObjectLock))); 1352 1353 // monitor expect in c_rarg1 for slow unlock path 1354 __ lea(c_rarg1, monitor); // address of first monitor 1355 1356 __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 1357 __ testptr(t, t); 1358 __ jcc(Assembler::notZero, unlock); 1359 1360 // Entry already unlocked, need to throw exception 1361 __ MacroAssembler::call_VM(noreg, 1362 CAST_FROM_FN_PTR(address, 1363 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1364 __ should_not_reach_here(); 1365 1366 __ bind(unlock); 1367 __ unlock_object(c_rarg1); 1368 } 1369 __ bind(L); 1370 } 1371 1372 // jvmti support 1373 // Note: This must happen _after_ handling/throwing any exceptions since 1374 // the exception handler code notifies the runtime of method exits 1375 // too. If this happens before, method entry/exit notifications are 1376 // not properly paired (was bug - gri 11/22/99). 1377 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1378 1379 // restore potential result in edx:eax, call result handler to 1380 // restore potential result in ST0 & handle result 1381 1382 __ pop(ltos); 1383 __ pop(dtos); 1384 1385 __ movptr(t, Address(rbp, 1386 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1387 __ call(t); 1388 1389 // remove activation 1390 __ movptr(t, Address(rbp, 1391 frame::interpreter_frame_sender_sp_offset * 1392 wordSize)); // get sender sp 1393 __ leave(); // remove frame anchor 1394 __ pop(rdi); // get return address 1395 __ mov(rsp, t); // set sp to sender sp 1396 __ jmp(rdi); 1397 1398 if (inc_counter) { 1399 // Handle overflow of counter and compile method 1400 __ bind(invocation_counter_overflow); 1401 generate_counter_overflow(&continue_after_compile); 1402 } 1403 1404 return entry_point; 1405 } 1406 1407 // 1408 // Generic interpreted method entry to (asm) interpreter 1409 // 1410 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1411 // determine code generation flags 1412 bool inc_counter = UseCompiler || CountCompiledCalls; 1413 1414 // ebx: Method* 1415 // r13: sender sp 1416 address entry_point = __ pc(); 1417 1418 const Address constMethod(rbx, Method::const_offset()); 1419 const Address access_flags(rbx, Method::access_flags_offset()); 1420 const Address size_of_parameters(rdx, 1421 ConstMethod::size_of_parameters_offset()); 1422 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1423 1424 1425 // get parameter size (always needed) 1426 __ movptr(rdx, constMethod); 1427 __ load_unsigned_short(rcx, size_of_parameters); 1428 1429 // rbx: Method* 1430 // rcx: size of parameters 1431 // r13: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1432 1433 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1434 __ subl(rdx, rcx); // rdx = no. of additional locals 1435 1436 // YYY 1437 // __ incrementl(rdx); 1438 // __ andl(rdx, -2); 1439 1440 // see if we've got enough room on the stack for locals plus overhead. 1441 generate_stack_overflow_check(); 1442 1443 // get return address 1444 __ pop(rax); 1445 1446 // compute beginning of parameters (r14) 1447 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); 1448 1449 // rdx - # of additional locals 1450 // allocate space for locals 1451 // explicitly initialize locals 1452 { 1453 Label exit, loop; 1454 __ testl(rdx, rdx); 1455 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1456 __ bind(loop); 1457 __ push((int) NULL_WORD); // initialize local variables 1458 __ decrementl(rdx); // until everything initialized 1459 __ jcc(Assembler::greater, loop); 1460 __ bind(exit); 1461 } 1462 1463 // initialize fixed part of activation frame 1464 generate_fixed_frame(false); 1465 1466 // make sure method is not native & not abstract 1467 #ifdef ASSERT 1468 __ movl(rax, access_flags); 1469 { 1470 Label L; 1471 __ testl(rax, JVM_ACC_NATIVE); 1472 __ jcc(Assembler::zero, L); 1473 __ stop("tried to execute native method as non-native"); 1474 __ bind(L); 1475 } 1476 { 1477 Label L; 1478 __ testl(rax, JVM_ACC_ABSTRACT); 1479 __ jcc(Assembler::zero, L); 1480 __ stop("tried to execute abstract method in interpreter"); 1481 __ bind(L); 1482 } 1483 #endif 1484 1485 // Since at this point in the method invocation the exception 1486 // handler would try to exit the monitor of synchronized methods 1487 // which hasn't been entered yet, we set the thread local variable 1488 // _do_not_unlock_if_synchronized to true. The remove_activation 1489 // will check this flag. 1490 1491 const Address do_not_unlock_if_synchronized(r15_thread, 1492 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1493 __ movbool(do_not_unlock_if_synchronized, true); 1494 1495 __ profile_parameters_type(rax, rcx, rdx); 1496 // increment invocation count & check for overflow 1497 Label invocation_counter_overflow; 1498 Label profile_method; 1499 Label profile_method_continue; 1500 if (inc_counter) { 1501 generate_counter_incr(&invocation_counter_overflow, 1502 &profile_method, 1503 &profile_method_continue); 1504 if (ProfileInterpreter) { 1505 __ bind(profile_method_continue); 1506 } 1507 } 1508 1509 Label continue_after_compile; 1510 __ bind(continue_after_compile); 1511 1512 // check for synchronized interpreted methods 1513 bang_stack_shadow_pages(false); 1514 1515 // reset the _do_not_unlock_if_synchronized flag 1516 __ movbool(do_not_unlock_if_synchronized, false); 1517 1518 // check for synchronized methods 1519 // Must happen AFTER invocation_counter check and stack overflow check, 1520 // so method is not locked if overflows. 1521 if (synchronized) { 1522 // Allocate monitor and lock method 1523 lock_method(); 1524 } else { 1525 // no synchronization necessary 1526 #ifdef ASSERT 1527 { 1528 Label L; 1529 __ movl(rax, access_flags); 1530 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1531 __ jcc(Assembler::zero, L); 1532 __ stop("method needs synchronization"); 1533 __ bind(L); 1534 } 1535 #endif 1536 } 1537 1538 // start execution 1539 #ifdef ASSERT 1540 { 1541 Label L; 1542 const Address monitor_block_top (rbp, 1543 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1544 __ movptr(rax, monitor_block_top); 1545 __ cmpptr(rax, rsp); 1546 __ jcc(Assembler::equal, L); 1547 __ stop("broken stack frame setup in interpreter"); 1548 __ bind(L); 1549 } 1550 #endif 1551 1552 // jvmti support 1553 __ notify_method_entry(); 1554 1555 __ dispatch_next(vtos); 1556 1557 // invocation counter overflow 1558 if (inc_counter) { 1559 if (ProfileInterpreter) { 1560 // We have decided to profile this method in the interpreter 1561 __ bind(profile_method); 1562 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1563 __ set_method_data_pointer_for_bcp(); 1564 __ get_method(rbx); 1565 __ jmp(profile_method_continue); 1566 } 1567 // Handle overflow of counter and compile method 1568 __ bind(invocation_counter_overflow); 1569 generate_counter_overflow(&continue_after_compile); 1570 } 1571 1572 return entry_point; 1573 } 1574 1575 // Entry points 1576 // 1577 // Here we generate the various kind of entries into the interpreter. 1578 // The two main entry type are generic bytecode methods and native 1579 // call method. These both come in synchronized and non-synchronized 1580 // versions but the frame layout they create is very similar. The 1581 // other method entry types are really just special purpose entries 1582 // that are really entry and interpretation all in one. These are for 1583 // trivial methods like accessor, empty, or special math methods. 1584 // 1585 // When control flow reaches any of the entry types for the interpreter 1586 // the following holds -> 1587 // 1588 // Arguments: 1589 // 1590 // rbx: Method* 1591 // 1592 // Stack layout immediately at entry 1593 // 1594 // [ return address ] <--- rsp 1595 // [ parameter n ] 1596 // ... 1597 // [ parameter 1 ] 1598 // [ expression stack ] (caller's java expression stack) 1599 1600 // Assuming that we don't go to one of the trivial specialized entries 1601 // the stack will look like below when we are ready to execute the 1602 // first bytecode (or call the native routine). The register usage 1603 // will be as the template based interpreter expects (see 1604 // interpreter_amd64.hpp). 1605 // 1606 // local variables follow incoming parameters immediately; i.e. 1607 // the return address is moved to the end of the locals). 1608 // 1609 // [ monitor entry ] <--- rsp 1610 // ... 1611 // [ monitor entry ] 1612 // [ expr. stack bottom ] 1613 // [ saved r13 ] 1614 // [ current r14 ] 1615 // [ Method* ] 1616 // [ saved ebp ] <--- rbp 1617 // [ return address ] 1618 // [ local variable m ] 1619 // ... 1620 // [ local variable 1 ] 1621 // [ parameter n ] 1622 // ... 1623 // [ parameter 1 ] <--- r14 1624 1625 address AbstractInterpreterGenerator::generate_method_entry( 1626 AbstractInterpreter::MethodKind kind) { 1627 // determine code generation flags 1628 bool synchronized = false; 1629 address entry_point = NULL; 1630 InterpreterGenerator* ig_this = (InterpreterGenerator*)this; 1631 1632 switch (kind) { 1633 case Interpreter::zerolocals : break; 1634 case Interpreter::zerolocals_synchronized: synchronized = true; break; 1635 case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break; 1636 case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break; 1637 case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break; 1638 case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break; 1639 case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break; 1640 1641 case Interpreter::java_lang_math_sin : // fall thru 1642 case Interpreter::java_lang_math_cos : // fall thru 1643 case Interpreter::java_lang_math_tan : // fall thru 1644 case Interpreter::java_lang_math_abs : // fall thru 1645 case Interpreter::java_lang_math_log : // fall thru 1646 case Interpreter::java_lang_math_log10 : // fall thru 1647 case Interpreter::java_lang_math_sqrt : // fall thru 1648 case Interpreter::java_lang_math_pow : // fall thru 1649 case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break; 1650 case Interpreter::java_lang_ref_reference_get 1651 : entry_point = ig_this->generate_Reference_get_entry(); break; 1652 case Interpreter::java_util_zip_CRC32_update 1653 : entry_point = ig_this->generate_CRC32_update_entry(); break; 1654 case Interpreter::java_util_zip_CRC32_updateBytes 1655 : // fall thru 1656 case Interpreter::java_util_zip_CRC32_updateByteBuffer 1657 : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break; 1658 default: 1659 fatal(err_msg("unexpected method kind: %d", kind)); 1660 break; 1661 } 1662 1663 if (entry_point) { 1664 return entry_point; 1665 } 1666 1667 return ig_this->generate_normal_entry(synchronized); 1668 } 1669 1670 // These should never be compiled since the interpreter will prefer 1671 // the compiled version to the intrinsic version. 1672 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1673 switch (method_kind(m)) { 1674 case Interpreter::java_lang_math_sin : // fall thru 1675 case Interpreter::java_lang_math_cos : // fall thru 1676 case Interpreter::java_lang_math_tan : // fall thru 1677 case Interpreter::java_lang_math_abs : // fall thru 1678 case Interpreter::java_lang_math_log : // fall thru 1679 case Interpreter::java_lang_math_log10 : // fall thru 1680 case Interpreter::java_lang_math_sqrt : // fall thru 1681 case Interpreter::java_lang_math_pow : // fall thru 1682 case Interpreter::java_lang_math_exp : 1683 return false; 1684 default: 1685 return true; 1686 } 1687 } 1688 1689 // How much stack a method activation needs in words. 1690 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1691 const int entry_size = frame::interpreter_frame_monitor_size(); 1692 1693 // total overhead size: entry_size + (saved rbp thru expr stack 1694 // bottom). be sure to change this if you add/subtract anything 1695 // to/from the overhead area 1696 const int overhead_size = 1697 -(frame::interpreter_frame_initial_sp_offset) + entry_size; 1698 1699 const int stub_code = frame::entry_frame_after_call_words; 1700 const int method_stack = (method->max_locals() + method->max_stack()) * 1701 Interpreter::stackElementWords; 1702 return (overhead_size + method_stack + stub_code); 1703 } 1704 1705 //----------------------------------------------------------------------------- 1706 // Exceptions 1707 1708 void TemplateInterpreterGenerator::generate_throw_exception() { 1709 // Entry point in previous activation (i.e., if the caller was 1710 // interpreted) 1711 Interpreter::_rethrow_exception_entry = __ pc(); 1712 // Restore sp to interpreter_frame_last_sp even though we are going 1713 // to empty the expression stack for the exception processing. 1714 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1715 // rax: exception 1716 // rdx: return address/pc that threw exception 1717 __ restore_bcp(); // r13 points to call/send 1718 __ restore_locals(); 1719 __ reinit_heapbase(); // restore r12 as heapbase. 1720 // Entry point for exceptions thrown within interpreter code 1721 Interpreter::_throw_exception_entry = __ pc(); 1722 // expression stack is undefined here 1723 // rax: exception 1724 // r13: exception bcp 1725 __ verify_oop(rax); 1726 __ mov(c_rarg1, rax); 1727 1728 // expression stack must be empty before entering the VM in case of 1729 // an exception 1730 __ empty_expression_stack(); 1731 // find exception handler address and preserve exception oop 1732 __ call_VM(rdx, 1733 CAST_FROM_FN_PTR(address, 1734 InterpreterRuntime::exception_handler_for_exception), 1735 c_rarg1); 1736 // rax: exception handler entry point 1737 // rdx: preserved exception oop 1738 // r13: bcp for exception handler 1739 __ push_ptr(rdx); // push exception which is now the only value on the stack 1740 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1741 1742 // If the exception is not handled in the current frame the frame is 1743 // removed and the exception is rethrown (i.e. exception 1744 // continuation is _rethrow_exception). 1745 // 1746 // Note: At this point the bci is still the bxi for the instruction 1747 // which caused the exception and the expression stack is 1748 // empty. Thus, for any VM calls at this point, GC will find a legal 1749 // oop map (with empty expression stack). 1750 1751 // In current activation 1752 // tos: exception 1753 // esi: exception bcp 1754 1755 // 1756 // JVMTI PopFrame support 1757 // 1758 1759 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1760 __ empty_expression_stack(); 1761 // Set the popframe_processing bit in pending_popframe_condition 1762 // indicating that we are currently handling popframe, so that 1763 // call_VMs that may happen later do not trigger new popframe 1764 // handling cycles. 1765 __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset())); 1766 __ orl(rdx, JavaThread::popframe_processing_bit); 1767 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx); 1768 1769 { 1770 // Check to see whether we are returning to a deoptimized frame. 1771 // (The PopFrame call ensures that the caller of the popped frame is 1772 // either interpreted or compiled and deoptimizes it if compiled.) 1773 // In this case, we can't call dispatch_next() after the frame is 1774 // popped, but instead must save the incoming arguments and restore 1775 // them after deoptimization has occurred. 1776 // 1777 // Note that we don't compare the return PC against the 1778 // deoptimization blob's unpack entry because of the presence of 1779 // adapter frames in C2. 1780 Label caller_not_deoptimized; 1781 __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize)); 1782 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1783 InterpreterRuntime::interpreter_contains), c_rarg1); 1784 __ testl(rax, rax); 1785 __ jcc(Assembler::notZero, caller_not_deoptimized); 1786 1787 // Compute size of arguments for saving when returning to 1788 // deoptimized caller 1789 __ get_method(rax); 1790 __ movptr(rax, Address(rax, Method::const_offset())); 1791 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1792 size_of_parameters_offset()))); 1793 __ shll(rax, Interpreter::logStackElementSize); 1794 __ restore_locals(); // XXX do we need this? 1795 __ subptr(r14, rax); 1796 __ addptr(r14, wordSize); 1797 // Save these arguments 1798 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1799 Deoptimization:: 1800 popframe_preserve_args), 1801 r15_thread, rax, r14); 1802 1803 __ remove_activation(vtos, rdx, 1804 /* throw_monitor_exception */ false, 1805 /* install_monitor_exception */ false, 1806 /* notify_jvmdi */ false); 1807 1808 // Inform deoptimization that it is responsible for restoring 1809 // these arguments 1810 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), 1811 JavaThread::popframe_force_deopt_reexecution_bit); 1812 1813 // Continue in deoptimization handler 1814 __ jmp(rdx); 1815 1816 __ bind(caller_not_deoptimized); 1817 } 1818 1819 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1820 /* throw_monitor_exception */ false, 1821 /* install_monitor_exception */ false, 1822 /* notify_jvmdi */ false); 1823 1824 // Finish with popframe handling 1825 // A previous I2C followed by a deoptimization might have moved the 1826 // outgoing arguments further up the stack. PopFrame expects the 1827 // mutations to those outgoing arguments to be preserved and other 1828 // constraints basically require this frame to look exactly as 1829 // though it had previously invoked an interpreted activation with 1830 // no space between the top of the expression stack (current 1831 // last_sp) and the top of stack. Rather than force deopt to 1832 // maintain this kind of invariant all the time we call a small 1833 // fixup routine to move the mutated arguments onto the top of our 1834 // expression stack if necessary. 1835 __ mov(c_rarg1, rsp); 1836 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1837 // PC must point into interpreter here 1838 __ set_last_Java_frame(noreg, rbp, __ pc()); 1839 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1840 __ reset_last_Java_frame(r15_thread, true); 1841 // Restore the last_sp and null it out 1842 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1843 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1844 1845 __ restore_bcp(); // XXX do we need this? 1846 __ restore_locals(); // XXX do we need this? 1847 // The method data pointer was incremented already during 1848 // call profiling. We have to restore the mdp for the current bcp. 1849 if (ProfileInterpreter) { 1850 __ set_method_data_pointer_for_bcp(); 1851 } 1852 1853 // Clear the popframe condition flag 1854 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), 1855 JavaThread::popframe_inactive); 1856 1857 #if INCLUDE_JVMTI 1858 if (EnableInvokeDynamic) { 1859 Label L_done; 1860 const Register local0 = r14; 1861 1862 __ cmpb(Address(r13, 0), Bytecodes::_invokestatic); 1863 __ jcc(Assembler::notEqual, L_done); 1864 1865 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1866 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1867 1868 __ get_method(rdx); 1869 __ movptr(rax, Address(local0, 0)); 1870 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13); 1871 1872 __ testptr(rax, rax); 1873 __ jcc(Assembler::zero, L_done); 1874 1875 __ movptr(Address(rbx, 0), rax); 1876 __ bind(L_done); 1877 } 1878 #endif // INCLUDE_JVMTI 1879 1880 __ dispatch_next(vtos); 1881 // end of PopFrame support 1882 1883 Interpreter::_remove_activation_entry = __ pc(); 1884 1885 // preserve exception over this code sequence 1886 __ pop_ptr(rax); 1887 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax); 1888 // remove the activation (without doing throws on illegalMonitorExceptions) 1889 __ remove_activation(vtos, rdx, false, true, false); 1890 // restore exception 1891 __ get_vm_result(rax, r15_thread); 1892 1893 // In between activations - previous activation type unknown yet 1894 // compute continuation point - the continuation point expects the 1895 // following registers set up: 1896 // 1897 // rax: exception 1898 // rdx: return address/pc that threw exception 1899 // rsp: expression stack of caller 1900 // rbp: ebp of caller 1901 __ push(rax); // save exception 1902 __ push(rdx); // save return address 1903 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1904 SharedRuntime::exception_handler_for_return_address), 1905 r15_thread, rdx); 1906 __ mov(rbx, rax); // save exception handler 1907 __ pop(rdx); // restore return address 1908 __ pop(rax); // restore exception 1909 // Note that an "issuing PC" is actually the next PC after the call 1910 __ jmp(rbx); // jump to exception 1911 // handler of caller 1912 } 1913 1914 1915 // 1916 // JVMTI ForceEarlyReturn support 1917 // 1918 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1919 address entry = __ pc(); 1920 1921 __ restore_bcp(); 1922 __ restore_locals(); 1923 __ empty_expression_stack(); 1924 __ load_earlyret_value(state); 1925 1926 __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); 1927 Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset()); 1928 1929 // Clear the earlyret state 1930 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1931 1932 __ remove_activation(state, rsi, 1933 false, /* throw_monitor_exception */ 1934 false, /* install_monitor_exception */ 1935 true); /* notify_jvmdi */ 1936 __ jmp(rsi); 1937 1938 return entry; 1939 } // end of ForceEarlyReturn support 1940 1941 1942 //----------------------------------------------------------------------------- 1943 // Helper for vtos entry point generation 1944 1945 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1946 address& bep, 1947 address& cep, 1948 address& sep, 1949 address& aep, 1950 address& iep, 1951 address& lep, 1952 address& fep, 1953 address& dep, 1954 address& vep) { 1955 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1956 Label L; 1957 aep = __ pc(); __ push_ptr(); __ jmp(L); 1958 fep = __ pc(); __ push_f(); __ jmp(L); 1959 dep = __ pc(); __ push_d(); __ jmp(L); 1960 lep = __ pc(); __ push_l(); __ jmp(L); 1961 bep = cep = sep = 1962 iep = __ pc(); __ push_i(); 1963 vep = __ pc(); 1964 __ bind(L); 1965 generate_and_dispatch(t); 1966 } 1967 1968 1969 //----------------------------------------------------------------------------- 1970 // Generation of individual instructions 1971 1972 // helpers for generate_and_dispatch 1973 1974 1975 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 1976 : TemplateInterpreterGenerator(code) { 1977 generate_all(); // down here so it can be "virtual" 1978 } 1979 1980 //----------------------------------------------------------------------------- 1981 1982 // Non-product code 1983 #ifndef PRODUCT 1984 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1985 address entry = __ pc(); 1986 1987 __ push(state); 1988 __ push(c_rarg0); 1989 __ push(c_rarg1); 1990 __ push(c_rarg2); 1991 __ push(c_rarg3); 1992 __ mov(c_rarg2, rax); // Pass itos 1993 #ifdef _WIN64 1994 __ movflt(xmm3, xmm0); // Pass ftos 1995 #endif 1996 __ call_VM(noreg, 1997 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), 1998 c_rarg1, c_rarg2, c_rarg3); 1999 __ pop(c_rarg3); 2000 __ pop(c_rarg2); 2001 __ pop(c_rarg1); 2002 __ pop(c_rarg0); 2003 __ pop(state); 2004 __ ret(0); // return from result handler 2005 2006 return entry; 2007 } 2008 2009 void TemplateInterpreterGenerator::count_bytecode() { 2010 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 2011 } 2012 2013 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2014 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 2015 } 2016 2017 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2018 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 2019 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 2020 __ orl(rbx, 2021 ((int) t->bytecode()) << 2022 BytecodePairHistogram::log2_number_of_codes); 2023 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 2024 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 2025 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 2026 } 2027 2028 2029 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2030 // Call a little run-time stub to avoid blow-up for each bytecode. 2031 // The run-time runtime saves the right registers, depending on 2032 // the tosca in-state for the given template. 2033 2034 assert(Interpreter::trace_code(t->tos_in()) != NULL, 2035 "entry must have been generated"); 2036 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 2037 __ andptr(rsp, -16); // align stack as required by ABI 2038 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 2039 __ mov(rsp, r12); // restore sp 2040 __ reinit_heapbase(); 2041 } 2042 2043 2044 void TemplateInterpreterGenerator::stop_interpreter_at() { 2045 Label L; 2046 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 2047 StopInterpreterAt); 2048 __ jcc(Assembler::notEqual, L); 2049 __ int3(); 2050 __ bind(L); 2051 } 2052 #endif // !PRODUCT 2053 #endif // ! CC_INTERP