1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compiler_globals.hpp" 27 #include "interp_masm_x86.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "logging/log.hpp" 31 #include "oops/arrayOop.hpp" 32 #include "oops/markWord.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/method.hpp" 35 #include "oops/resolvedFieldEntry.hpp" 36 #include "oops/resolvedIndyEntry.hpp" 37 #include "oops/resolvedMethodEntry.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "prims/jvmtiThreadState.hpp" 40 #include "runtime/basicLock.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/javaThread.hpp" 43 #include "runtime/runtimeUpcalls.hpp" 44 #include "runtime/safepointMechanism.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "utilities/powerOfTwo.hpp" 47 48 // Implementation of InterpreterMacroAssembler 49 50 void InterpreterMacroAssembler::jump_to_entry(address entry) { 51 assert(entry, "Entry must have been generated by now"); 52 jump(RuntimeAddress(entry)); 53 } 54 55 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { 56 Label update, next, none; 57 58 #ifdef _LP64 59 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index()); 60 #else 61 assert_different_registers(obj, mdo_addr.base(), mdo_addr.index()); 62 #endif 63 64 interp_verify_oop(obj, atos); 65 66 testptr(obj, obj); 67 jccb(Assembler::notZero, update); 68 testptr(mdo_addr, TypeEntries::null_seen); 69 jccb(Assembler::notZero, next); // null already seen. Nothing to do anymore. 70 // atomic update to prevent overwriting Klass* with 0 71 lock(); 72 orptr(mdo_addr, TypeEntries::null_seen); 73 jmpb(next); 74 75 bind(update); 76 load_klass(obj, obj, rscratch1); 77 #ifdef _LP64 78 mov(rscratch1, obj); 79 #endif 80 81 xorptr(obj, mdo_addr); 82 testptr(obj, TypeEntries::type_klass_mask); 83 jccb(Assembler::zero, next); // klass seen before, nothing to 84 // do. The unknown bit may have been 85 // set already but no need to check. 86 87 testptr(obj, TypeEntries::type_unknown); 88 jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 89 90 cmpptr(mdo_addr, 0); 91 jccb(Assembler::equal, none); 92 cmpptr(mdo_addr, TypeEntries::null_seen); 93 jccb(Assembler::equal, none); 94 #ifdef _LP64 95 // There is a chance that the checks above (re-reading profiling 96 // data from memory) fail if another thread has just set the 97 // profiling to this obj's klass 98 mov(obj, rscratch1); 99 xorptr(obj, mdo_addr); 100 testptr(obj, TypeEntries::type_klass_mask); 101 jccb(Assembler::zero, next); 102 #endif 103 104 // different than before. Cannot keep accurate profile. 105 orptr(mdo_addr, TypeEntries::type_unknown); 106 jmpb(next); 107 108 bind(none); 109 // first time here. Set profile type. 110 movptr(mdo_addr, obj); 111 #ifdef ASSERT 112 andptr(obj, TypeEntries::type_klass_mask); 113 verify_klass_ptr(obj); 114 #endif 115 116 bind(next); 117 } 118 119 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) { 120 if (!ProfileInterpreter) { 121 return; 122 } 123 124 if (MethodData::profile_arguments() || MethodData::profile_return()) { 125 Label profile_continue; 126 127 test_method_data_pointer(mdp, profile_continue); 128 129 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 130 131 cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); 132 jcc(Assembler::notEqual, profile_continue); 133 134 if (MethodData::profile_arguments()) { 135 Label done; 136 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 137 addptr(mdp, off_to_args); 138 139 for (int i = 0; i < TypeProfileArgsLimit; i++) { 140 if (i > 0 || MethodData::profile_return()) { 141 // If return value type is profiled we may have no argument to profile 142 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args)); 143 subl(tmp, i*TypeStackSlotEntries::per_arg_count()); 144 cmpl(tmp, TypeStackSlotEntries::per_arg_count()); 145 jcc(Assembler::less, done); 146 } 147 movptr(tmp, Address(callee, Method::const_offset())); 148 load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset())); 149 // stack offset o (zero based) from the start of the argument 150 // list, for n arguments translates into offset n - o - 1 from 151 // the end of the argument list 152 subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args)); 153 subl(tmp, 1); 154 Address arg_addr = argument_address(tmp); 155 movptr(tmp, arg_addr); 156 157 Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); 158 profile_obj_type(tmp, mdo_arg_addr); 159 160 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 161 addptr(mdp, to_add); 162 off_to_args += to_add; 163 } 164 165 if (MethodData::profile_return()) { 166 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args)); 167 subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count()); 168 } 169 170 bind(done); 171 172 if (MethodData::profile_return()) { 173 // We're right after the type profile for the last 174 // argument. tmp is the number of cells left in the 175 // CallTypeData/VirtualCallTypeData to reach its end. Non null 176 // if there's a return to profile. 177 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 178 shll(tmp, log2i_exact((int)DataLayout::cell_size)); 179 addptr(mdp, tmp); 180 } 181 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp); 182 } else { 183 assert(MethodData::profile_return(), "either profile call args or call ret"); 184 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size())); 185 } 186 187 // mdp points right after the end of the 188 // CallTypeData/VirtualCallTypeData, right after the cells for the 189 // return value type if there's one 190 191 bind(profile_continue); 192 } 193 } 194 195 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) { 196 assert_different_registers(mdp, ret, tmp, _bcp_register); 197 if (ProfileInterpreter && MethodData::profile_return()) { 198 Label profile_continue; 199 200 test_method_data_pointer(mdp, profile_continue); 201 202 if (MethodData::profile_return_jsr292_only()) { 203 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); 204 205 // If we don't profile all invoke bytecodes we must make sure 206 // it's a bytecode we indeed profile. We can't go back to the 207 // beginning of the ProfileData we intend to update to check its 208 // type because we're right after it and we don't known its 209 // length 210 Label do_profile; 211 cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic); 212 jcc(Assembler::equal, do_profile); 213 cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle); 214 jcc(Assembler::equal, do_profile); 215 get_method(tmp); 216 cmpw(Address(tmp, Method::intrinsic_id_offset()), static_cast<int>(vmIntrinsics::_compiledLambdaForm)); 217 jcc(Assembler::notEqual, profile_continue); 218 219 bind(do_profile); 220 } 221 222 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size())); 223 mov(tmp, ret); 224 profile_obj_type(tmp, mdo_ret_addr); 225 226 bind(profile_continue); 227 } 228 } 229 230 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) { 231 if (ProfileInterpreter && MethodData::profile_parameters()) { 232 Label profile_continue; 233 234 test_method_data_pointer(mdp, profile_continue); 235 236 // Load the offset of the area within the MDO used for 237 // parameters. If it's negative we're not profiling any parameters 238 movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()))); 239 testl(tmp1, tmp1); 240 jcc(Assembler::negative, profile_continue); 241 242 // Compute a pointer to the area for parameters from the offset 243 // and move the pointer to the slot for the last 244 // parameters. Collect profiling from last parameter down. 245 // mdo start + parameters offset + array length - 1 246 addptr(mdp, tmp1); 247 movptr(tmp1, Address(mdp, ArrayData::array_len_offset())); 248 decrement(tmp1, TypeStackSlotEntries::per_arg_count()); 249 250 Label loop; 251 bind(loop); 252 253 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 254 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 255 Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size); 256 Address arg_off(mdp, tmp1, per_arg_scale, off_base); 257 Address arg_type(mdp, tmp1, per_arg_scale, type_base); 258 259 // load offset on the stack from the slot for this parameter 260 movptr(tmp2, arg_off); 261 negptr(tmp2); 262 // read the parameter from the local area 263 movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale())); 264 265 // profile the parameter 266 profile_obj_type(tmp2, arg_type); 267 268 // go to next parameter 269 decrement(tmp1, TypeStackSlotEntries::per_arg_count()); 270 jcc(Assembler::positive, loop); 271 272 bind(profile_continue); 273 } 274 } 275 276 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, 277 int number_of_arguments) { 278 // interpreter specific 279 // 280 // Note: No need to save/restore bcp & locals registers 281 // since these are callee saved registers and no blocking/ 282 // GC can happen in leaf calls. 283 // Further Note: DO NOT save/restore bcp/locals. If a caller has 284 // already saved them so that it can use rsi/rdi as temporaries 285 // then a save/restore here will DESTROY the copy the caller 286 // saved! There used to be a save_bcp() that only happened in 287 // the ASSERT path (no restore_bcp). Which caused bizarre failures 288 // when jvm built with ASSERTs. 289 #ifdef ASSERT 290 { 291 Label L; 292 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 293 jcc(Assembler::equal, L); 294 stop("InterpreterMacroAssembler::call_VM_leaf_base:" 295 " last_sp != null"); 296 bind(L); 297 } 298 #endif 299 // super call 300 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 301 // interpreter specific 302 // LP64: Used to ASSERT that r13/r14 were equal to frame's bcp/locals 303 // but since they may not have been saved (and we don't want to 304 // save them here (see note above) the assert is invalid. 305 } 306 307 void InterpreterMacroAssembler::call_VM_base(Register oop_result, 308 Register java_thread, 309 Register last_java_sp, 310 address entry_point, 311 int number_of_arguments, 312 bool check_exceptions) { 313 // interpreter specific 314 // 315 // Note: Could avoid restoring locals ptr (callee saved) - however doesn't 316 // really make a difference for these runtime calls, since they are 317 // slow anyway. Btw., bcp must be saved/restored since it may change 318 // due to GC. 319 NOT_LP64(assert(java_thread == noreg , "not expecting a precomputed java thread");) 320 save_bcp(); 321 #ifdef ASSERT 322 { 323 Label L; 324 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 325 jcc(Assembler::equal, L); 326 stop("InterpreterMacroAssembler::call_VM_base:" 327 " last_sp isn't null"); 328 bind(L); 329 } 330 #endif /* ASSERT */ 331 // super call 332 MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp, 333 entry_point, number_of_arguments, 334 check_exceptions); 335 // interpreter specific 336 restore_bcp(); 337 restore_locals(); 338 } 339 340 #ifdef _LP64 341 void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result, 342 address entry_point, 343 Register arg_1) { 344 assert(arg_1 == c_rarg1, ""); 345 Label resume_pc, not_preempted; 346 347 #ifdef ASSERT 348 { 349 Label L; 350 cmpptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD); 351 jcc(Assembler::equal, L); 352 stop("Should not have alternate return address set"); 353 bind(L); 354 } 355 #endif /* ASSERT */ 356 357 // Force freeze slow path. 358 push_cont_fastpath(); 359 360 // Make VM call. In case of preemption set last_pc to the one we want to resume to. 361 // Note: call_VM_helper requires last_Java_pc for anchor to be at the top of the stack. 362 lea(rscratch1, resume_pc); 363 push(rscratch1); 364 MacroAssembler::call_VM_helper(oop_result, entry_point, 1, false /*check_exceptions*/); 365 pop(rscratch1); 366 367 pop_cont_fastpath(); 368 369 // Check if preempted. 370 movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset())); 371 cmpptr(rscratch1, NULL_WORD); 372 jccb(Assembler::zero, not_preempted); 373 movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD); 374 jmp(rscratch1); 375 376 // In case of preemption, this is where we will resume once we finally acquire the monitor. 377 bind(resume_pc); 378 restore_after_resume(false /* is_native */); 379 380 bind(not_preempted); 381 } 382 383 void InterpreterMacroAssembler::restore_after_resume(bool is_native) { 384 lea(rscratch1, ExternalAddress(Interpreter::cont_resume_interpreter_adapter())); 385 call(rscratch1); 386 if (is_native) { 387 // On resume we need to set up stack as expected. 388 push(dtos); 389 push(ltos); 390 } 391 } 392 #else 393 void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result, 394 address entry_point, 395 Register arg_1) { 396 MacroAssembler::call_VM(oop_result, entry_point, arg_1); 397 } 398 #endif // _LP64 399 400 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { 401 if (JvmtiExport::can_pop_frame()) { 402 Label L; 403 // Initiate popframe handling only if it is not already being 404 // processed. If the flag has the popframe_processing bit set, it 405 // means that this code is called *during* popframe handling - we 406 // don't want to reenter. 407 // This method is only called just after the call into the vm in 408 // call_VM_base, so the arg registers are available. 409 Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit 410 LP64_ONLY(c_rarg0); 411 movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset())); 412 testl(pop_cond, JavaThread::popframe_pending_bit); 413 jcc(Assembler::zero, L); 414 testl(pop_cond, JavaThread::popframe_processing_bit); 415 jcc(Assembler::notZero, L); 416 // Call Interpreter::remove_activation_preserving_args_entry() to get the 417 // address of the same-named entrypoint in the generated interpreter code. 418 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 419 jmp(rax); 420 bind(L); 421 NOT_LP64(get_thread(java_thread);) 422 } 423 } 424 425 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 426 Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 427 NOT_LP64(get_thread(thread);) 428 movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 429 const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset()); 430 const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset()); 431 const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset()); 432 #ifdef _LP64 433 switch (state) { 434 case atos: movptr(rax, oop_addr); 435 movptr(oop_addr, NULL_WORD); 436 interp_verify_oop(rax, state); break; 437 case ltos: movptr(rax, val_addr); break; 438 case btos: // fall through 439 case ztos: // fall through 440 case ctos: // fall through 441 case stos: // fall through 442 case itos: movl(rax, val_addr); break; 443 case ftos: load_float(val_addr); break; 444 case dtos: load_double(val_addr); break; 445 case vtos: /* nothing to do */ break; 446 default : ShouldNotReachHere(); 447 } 448 // Clean up tos value in the thread object 449 movl(tos_addr, ilgl); 450 movl(val_addr, NULL_WORD); 451 #else 452 const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset() 453 + in_ByteSize(wordSize)); 454 switch (state) { 455 case atos: movptr(rax, oop_addr); 456 movptr(oop_addr, NULL_WORD); 457 interp_verify_oop(rax, state); break; 458 case ltos: 459 movl(rdx, val_addr1); // fall through 460 case btos: // fall through 461 case ztos: // fall through 462 case ctos: // fall through 463 case stos: // fall through 464 case itos: movl(rax, val_addr); break; 465 case ftos: load_float(val_addr); break; 466 case dtos: load_double(val_addr); break; 467 case vtos: /* nothing to do */ break; 468 default : ShouldNotReachHere(); 469 } 470 #endif // _LP64 471 // Clean up tos value in the thread object 472 movl(tos_addr, ilgl); 473 movptr(val_addr, NULL_WORD); 474 NOT_LP64(movptr(val_addr1, NULL_WORD);) 475 } 476 477 478 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { 479 if (JvmtiExport::can_force_early_return()) { 480 Label L; 481 Register tmp = LP64_ONLY(c_rarg0) NOT_LP64(java_thread); 482 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(java_thread); 483 484 movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); 485 testptr(tmp, tmp); 486 jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == nullptr) exit; 487 488 // Initiate earlyret handling only if it is not already being processed. 489 // If the flag has the earlyret_processing bit set, it means that this code 490 // is called *during* earlyret handling - we don't want to reenter. 491 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset())); 492 cmpl(tmp, JvmtiThreadState::earlyret_pending); 493 jcc(Assembler::notEqual, L); 494 495 // Call Interpreter::remove_activation_early_entry() to get the address of the 496 // same-named entrypoint in the generated interpreter code. 497 NOT_LP64(get_thread(java_thread);) 498 movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); 499 #ifdef _LP64 500 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 501 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), tmp); 502 #else 503 pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset())); 504 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1); 505 #endif // _LP64 506 jmp(rax); 507 bind(L); 508 NOT_LP64(get_thread(java_thread);) 509 } 510 } 511 512 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { 513 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); 514 load_unsigned_short(reg, Address(_bcp_register, bcp_offset)); 515 bswapl(reg); 516 shrl(reg, 16); 517 } 518 519 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, 520 int bcp_offset, 521 size_t index_size) { 522 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 523 if (index_size == sizeof(u2)) { 524 load_unsigned_short(index, Address(_bcp_register, bcp_offset)); 525 } else if (index_size == sizeof(u4)) { 526 movl(index, Address(_bcp_register, bcp_offset)); 527 } else if (index_size == sizeof(u1)) { 528 load_unsigned_byte(index, Address(_bcp_register, bcp_offset)); 529 } else { 530 ShouldNotReachHere(); 531 } 532 } 533 534 // Load object from cpool->resolved_references(index) 535 void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, 536 Register index, 537 Register tmp) { 538 assert_different_registers(result, index); 539 540 get_constant_pool(result); 541 // load pointer for resolved_references[] objArray 542 movptr(result, Address(result, ConstantPool::cache_offset())); 543 movptr(result, Address(result, ConstantPoolCache::resolved_references_offset())); 544 resolve_oop_handle(result, tmp); 545 load_heap_oop(result, Address(result, index, 546 UseCompressedOops ? Address::times_4 : Address::times_ptr, 547 arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp); 548 } 549 550 // load cpool->resolved_klass_at(index) 551 void InterpreterMacroAssembler::load_resolved_klass_at_index(Register klass, 552 Register cpool, 553 Register index) { 554 assert_different_registers(cpool, index); 555 556 movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool))); 557 Register resolved_klasses = cpool; 558 movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset())); 559 movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes())); 560 } 561 562 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a 563 // subtype of super_klass. 564 // 565 // Args: 566 // rax: superklass 567 // Rsub_klass: subklass 568 // 569 // Kills: 570 // rcx, rdi 571 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 572 Label& ok_is_subtype) { 573 assert(Rsub_klass != rax, "rax holds superklass"); 574 LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");) 575 LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");) 576 assert(Rsub_klass != rcx, "rcx holds 2ndary super array length"); 577 assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr"); 578 579 // Profile the not-null value's klass. 580 profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi 581 582 // Do the check. 583 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx 584 } 585 586 587 #ifndef _LP64 588 void InterpreterMacroAssembler::f2ieee() { 589 if (IEEEPrecision) { 590 fstp_s(Address(rsp, 0)); 591 fld_s(Address(rsp, 0)); 592 } 593 } 594 595 596 void InterpreterMacroAssembler::d2ieee() { 597 if (IEEEPrecision) { 598 fstp_d(Address(rsp, 0)); 599 fld_d(Address(rsp, 0)); 600 } 601 } 602 #endif // _LP64 603 604 // Java Expression Stack 605 606 void InterpreterMacroAssembler::pop_ptr(Register r) { 607 pop(r); 608 } 609 610 void InterpreterMacroAssembler::push_ptr(Register r) { 611 push(r); 612 } 613 614 void InterpreterMacroAssembler::push_i(Register r) { 615 push(r); 616 } 617 618 void InterpreterMacroAssembler::push_i_or_ptr(Register r) { 619 push(r); 620 } 621 622 void InterpreterMacroAssembler::push_f(XMMRegister r) { 623 subptr(rsp, wordSize); 624 movflt(Address(rsp, 0), r); 625 } 626 627 void InterpreterMacroAssembler::pop_f(XMMRegister r) { 628 movflt(r, Address(rsp, 0)); 629 addptr(rsp, wordSize); 630 } 631 632 void InterpreterMacroAssembler::push_d(XMMRegister r) { 633 subptr(rsp, 2 * wordSize); 634 movdbl(Address(rsp, 0), r); 635 } 636 637 void InterpreterMacroAssembler::pop_d(XMMRegister r) { 638 movdbl(r, Address(rsp, 0)); 639 addptr(rsp, 2 * Interpreter::stackElementSize); 640 } 641 642 #ifdef _LP64 643 void InterpreterMacroAssembler::pop_i(Register r) { 644 // XXX can't use pop currently, upper half non clean 645 movl(r, Address(rsp, 0)); 646 addptr(rsp, wordSize); 647 } 648 649 void InterpreterMacroAssembler::pop_l(Register r) { 650 movq(r, Address(rsp, 0)); 651 addptr(rsp, 2 * Interpreter::stackElementSize); 652 } 653 654 void InterpreterMacroAssembler::push_l(Register r) { 655 subptr(rsp, 2 * wordSize); 656 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(0)), r ); 657 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(1)), NULL_WORD ); 658 } 659 660 void InterpreterMacroAssembler::pop(TosState state) { 661 switch (state) { 662 case atos: pop_ptr(); break; 663 case btos: 664 case ztos: 665 case ctos: 666 case stos: 667 case itos: pop_i(); break; 668 case ltos: pop_l(); break; 669 case ftos: pop_f(xmm0); break; 670 case dtos: pop_d(xmm0); break; 671 case vtos: /* nothing to do */ break; 672 default: ShouldNotReachHere(); 673 } 674 interp_verify_oop(rax, state); 675 } 676 677 void InterpreterMacroAssembler::push(TosState state) { 678 interp_verify_oop(rax, state); 679 switch (state) { 680 case atos: push_ptr(); break; 681 case btos: 682 case ztos: 683 case ctos: 684 case stos: 685 case itos: push_i(); break; 686 case ltos: push_l(); break; 687 case ftos: push_f(xmm0); break; 688 case dtos: push_d(xmm0); break; 689 case vtos: /* nothing to do */ break; 690 default : ShouldNotReachHere(); 691 } 692 } 693 #else 694 void InterpreterMacroAssembler::pop_i(Register r) { 695 pop(r); 696 } 697 698 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) { 699 pop(lo); 700 pop(hi); 701 } 702 703 void InterpreterMacroAssembler::pop_f() { 704 fld_s(Address(rsp, 0)); 705 addptr(rsp, 1 * wordSize); 706 } 707 708 void InterpreterMacroAssembler::pop_d() { 709 fld_d(Address(rsp, 0)); 710 addptr(rsp, 2 * wordSize); 711 } 712 713 714 void InterpreterMacroAssembler::pop(TosState state) { 715 switch (state) { 716 case atos: pop_ptr(rax); break; 717 case btos: // fall through 718 case ztos: // fall through 719 case ctos: // fall through 720 case stos: // fall through 721 case itos: pop_i(rax); break; 722 case ltos: pop_l(rax, rdx); break; 723 case ftos: 724 if (UseSSE >= 1) { 725 pop_f(xmm0); 726 } else { 727 pop_f(); 728 } 729 break; 730 case dtos: 731 if (UseSSE >= 2) { 732 pop_d(xmm0); 733 } else { 734 pop_d(); 735 } 736 break; 737 case vtos: /* nothing to do */ break; 738 default : ShouldNotReachHere(); 739 } 740 interp_verify_oop(rax, state); 741 } 742 743 744 void InterpreterMacroAssembler::push_l(Register lo, Register hi) { 745 push(hi); 746 push(lo); 747 } 748 749 void InterpreterMacroAssembler::push_f() { 750 // Do not schedule for no AGI! Never write beyond rsp! 751 subptr(rsp, 1 * wordSize); 752 fstp_s(Address(rsp, 0)); 753 } 754 755 void InterpreterMacroAssembler::push_d() { 756 // Do not schedule for no AGI! Never write beyond rsp! 757 subptr(rsp, 2 * wordSize); 758 fstp_d(Address(rsp, 0)); 759 } 760 761 762 void InterpreterMacroAssembler::push(TosState state) { 763 interp_verify_oop(rax, state); 764 switch (state) { 765 case atos: push_ptr(rax); break; 766 case btos: // fall through 767 case ztos: // fall through 768 case ctos: // fall through 769 case stos: // fall through 770 case itos: push_i(rax); break; 771 case ltos: push_l(rax, rdx); break; 772 case ftos: 773 if (UseSSE >= 1) { 774 push_f(xmm0); 775 } else { 776 push_f(); 777 } 778 break; 779 case dtos: 780 if (UseSSE >= 2) { 781 push_d(xmm0); 782 } else { 783 push_d(); 784 } 785 break; 786 case vtos: /* nothing to do */ break; 787 default : ShouldNotReachHere(); 788 } 789 } 790 #endif // _LP64 791 792 793 // Helpers for swap and dup 794 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 795 movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); 796 } 797 798 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 799 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); 800 } 801 802 803 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { 804 // set sender sp 805 lea(_bcp_register, Address(rsp, wordSize)); 806 // record last_sp 807 mov(rcx, _bcp_register); 808 subptr(rcx, rbp); 809 sarptr(rcx, LogBytesPerWord); 810 movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rcx); 811 } 812 813 814 // Jump to from_interpreted entry of a call unless single stepping is possible 815 // in this thread in which case we must call the i2i entry 816 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { 817 prepare_to_jump_from_interpreted(); 818 819 if (JvmtiExport::can_post_interpreter_events()) { 820 Label run_compiled_code; 821 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 822 // compiled code in threads for which the event is enabled. Check here for 823 // interp_only_mode if these events CAN be enabled. 824 // interp_only is an int, on little endian it is sufficient to test the byte only 825 // Is a cmpl faster? 826 LP64_ONLY(temp = r15_thread;) 827 NOT_LP64(get_thread(temp);) 828 cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0); 829 jccb(Assembler::zero, run_compiled_code); 830 jmp(Address(method, Method::interpreter_entry_offset())); 831 bind(run_compiled_code); 832 } 833 834 jmp(Address(method, Method::from_interpreted_offset())); 835 } 836 837 // The following two routines provide a hook so that an implementation 838 // can schedule the dispatch in two parts. x86 does not do this. 839 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { 840 // Nothing x86 specific to be done here 841 } 842 843 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { 844 dispatch_next(state, step); 845 } 846 847 void InterpreterMacroAssembler::dispatch_base(TosState state, 848 address* table, 849 bool verifyoop, 850 bool generate_poll) { 851 verify_FPU(1, state); 852 if (VerifyActivationFrameSize) { 853 Label L; 854 mov(rcx, rbp); 855 subptr(rcx, rsp); 856 int32_t min_frame_size = 857 (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * 858 wordSize; 859 cmpptr(rcx, min_frame_size); 860 jcc(Assembler::greaterEqual, L); 861 stop("broken stack frame"); 862 bind(L); 863 } 864 if (verifyoop) { 865 interp_verify_oop(rax, state); 866 } 867 868 address* const safepoint_table = Interpreter::safept_table(state); 869 #ifdef _LP64 870 Label no_safepoint, dispatch; 871 if (table != safepoint_table && generate_poll) { 872 NOT_PRODUCT(block_comment("Thread-local Safepoint poll")); 873 testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 874 875 jccb(Assembler::zero, no_safepoint); 876 lea(rscratch1, ExternalAddress((address)safepoint_table)); 877 jmpb(dispatch); 878 } 879 880 bind(no_safepoint); 881 lea(rscratch1, ExternalAddress((address)table)); 882 bind(dispatch); 883 jmp(Address(rscratch1, rbx, Address::times_8)); 884 885 #else 886 Address index(noreg, rbx, Address::times_ptr); 887 if (table != safepoint_table && generate_poll) { 888 NOT_PRODUCT(block_comment("Thread-local Safepoint poll")); 889 Label no_safepoint; 890 const Register thread = rcx; 891 get_thread(thread); 892 testb(Address(thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 893 894 jccb(Assembler::zero, no_safepoint); 895 ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index); 896 jump(dispatch_addr, noreg); 897 bind(no_safepoint); 898 } 899 900 { 901 ArrayAddress dispatch_addr(ExternalAddress((address)table), index); 902 jump(dispatch_addr, noreg); 903 } 904 #endif // _LP64 905 } 906 907 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) { 908 dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll); 909 } 910 911 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { 912 dispatch_base(state, Interpreter::normal_table(state)); 913 } 914 915 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { 916 dispatch_base(state, Interpreter::normal_table(state), false); 917 } 918 919 920 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) { 921 // load next bytecode (load before advancing _bcp_register to prevent AGI) 922 load_unsigned_byte(rbx, Address(_bcp_register, step)); 923 // advance _bcp_register 924 increment(_bcp_register, step); 925 dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll); 926 } 927 928 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 929 // load current bytecode 930 load_unsigned_byte(rbx, Address(_bcp_register, 0)); 931 dispatch_base(state, table); 932 } 933 934 void InterpreterMacroAssembler::narrow(Register result) { 935 936 // Get method->_constMethod->_result_type 937 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); 938 movptr(rcx, Address(rcx, Method::const_offset())); 939 load_unsigned_byte(rcx, Address(rcx, ConstMethod::result_type_offset())); 940 941 Label done, notBool, notByte, notChar; 942 943 // common case first 944 cmpl(rcx, T_INT); 945 jcc(Assembler::equal, done); 946 947 // mask integer result to narrower return type. 948 cmpl(rcx, T_BOOLEAN); 949 jcc(Assembler::notEqual, notBool); 950 andl(result, 0x1); 951 jmp(done); 952 953 bind(notBool); 954 cmpl(rcx, T_BYTE); 955 jcc(Assembler::notEqual, notByte); 956 LP64_ONLY(movsbl(result, result);) 957 NOT_LP64(shll(result, 24);) // truncate upper 24 bits 958 NOT_LP64(sarl(result, 24);) // and sign-extend byte 959 jmp(done); 960 961 bind(notByte); 962 cmpl(rcx, T_CHAR); 963 jcc(Assembler::notEqual, notChar); 964 LP64_ONLY(movzwl(result, result);) 965 NOT_LP64(andl(result, 0xFFFF);) // truncate upper 16 bits 966 jmp(done); 967 968 bind(notChar); 969 // cmpl(rcx, T_SHORT); // all that's left 970 // jcc(Assembler::notEqual, done); 971 LP64_ONLY(movswl(result, result);) 972 NOT_LP64(shll(result, 16);) // truncate upper 16 bits 973 NOT_LP64(sarl(result, 16);) // and sign-extend short 974 975 // Nothing to do for T_INT 976 bind(done); 977 } 978 979 // remove activation 980 // 981 // Apply stack watermark barrier. 982 // Unlock the receiver if this is a synchronized method. 983 // Unlock any Java monitors from synchronized blocks. 984 // Remove the activation from the stack. 985 // 986 // If there are locked Java monitors 987 // If throw_monitor_exception 988 // throws IllegalMonitorStateException 989 // Else if install_monitor_exception 990 // installs IllegalMonitorStateException 991 // Else 992 // no error processing 993 void InterpreterMacroAssembler::remove_activation( 994 TosState state, 995 Register ret_addr, 996 bool throw_monitor_exception, 997 bool install_monitor_exception, 998 bool notify_jvmdi) { 999 // Note: Registers rdx xmm0 may be in use for the 1000 // result check if synchronized method 1001 Label unlocked, unlock, no_unlock; 1002 1003 const Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 1004 const Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rdx); 1005 const Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rcx); 1006 // monitor pointers need different register 1007 // because rdx may have the result in it 1008 NOT_LP64(get_thread(rthread);) 1009 1010 // The below poll is for the stack watermark barrier. It allows fixing up frames lazily, 1011 // that would normally not be safe to use. Such bad returns into unsafe territory of 1012 // the stack, will call InterpreterRuntime::at_unwind. 1013 Label slow_path; 1014 Label fast_path; 1015 safepoint_poll(slow_path, rthread, true /* at_return */, false /* in_nmethod */); 1016 jmp(fast_path); 1017 bind(slow_path); 1018 push(state); 1019 set_last_Java_frame(rthread, noreg, rbp, (address)pc(), rscratch1); 1020 super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread); 1021 NOT_LP64(get_thread(rthread);) // call_VM clobbered it, restore 1022 reset_last_Java_frame(rthread, true); 1023 pop(state); 1024 bind(fast_path); 1025 1026 // get the value of _do_not_unlock_if_synchronized into rdx 1027 const Address do_not_unlock_if_synchronized(rthread, 1028 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1029 movbool(rbx, do_not_unlock_if_synchronized); 1030 movbool(do_not_unlock_if_synchronized, false); // reset the flag 1031 1032 // get method access flags 1033 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); 1034 movl(rcx, Address(rcx, Method::access_flags_offset())); 1035 testl(rcx, JVM_ACC_SYNCHRONIZED); 1036 jcc(Assembler::zero, unlocked); 1037 1038 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 1039 // is set. 1040 testbool(rbx); 1041 jcc(Assembler::notZero, no_unlock); 1042 1043 // unlock monitor 1044 push(state); // save result 1045 1046 // BasicObjectLock will be first in list, since this is a 1047 // synchronized method. However, need to check that the object has 1048 // not been unlocked by an explicit monitorexit bytecode. 1049 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * 1050 wordSize - (int) sizeof(BasicObjectLock)); 1051 // We use c_rarg1/rdx so that if we go slow path it will be the correct 1052 // register for unlock_object to pass to VM directly 1053 lea(robj, monitor); // address of first monitor 1054 1055 movptr(rax, Address(robj, BasicObjectLock::obj_offset())); 1056 testptr(rax, rax); 1057 jcc(Assembler::notZero, unlock); 1058 1059 pop(state); 1060 if (throw_monitor_exception) { 1061 // Entry already unlocked, need to throw exception 1062 NOT_LP64(empty_FPU_stack();) // remove possible return value from FPU-stack, otherwise stack could overflow 1063 call_VM(noreg, CAST_FROM_FN_PTR(address, 1064 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1065 should_not_reach_here(); 1066 } else { 1067 // Monitor already unlocked during a stack unroll. If requested, 1068 // install an illegal_monitor_state_exception. Continue with 1069 // stack unrolling. 1070 if (install_monitor_exception) { 1071 NOT_LP64(empty_FPU_stack();) 1072 call_VM(noreg, CAST_FROM_FN_PTR(address, 1073 InterpreterRuntime::new_illegal_monitor_state_exception)); 1074 } 1075 jmp(unlocked); 1076 } 1077 1078 bind(unlock); 1079 unlock_object(robj); 1080 pop(state); 1081 1082 // Check that for block-structured locking (i.e., that all locked 1083 // objects has been unlocked) 1084 bind(unlocked); 1085 1086 // rax, rdx: Might contain return value 1087 1088 // Check that all monitors are unlocked 1089 { 1090 Label loop, exception, entry, restart; 1091 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 1092 const Address monitor_block_top( 1093 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 1094 const Address monitor_block_bot( 1095 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); 1096 1097 bind(restart); 1098 // We use c_rarg1 so that if we go slow path it will be the correct 1099 // register for unlock_object to pass to VM directly 1100 movptr(rmon, monitor_block_top); // derelativize pointer 1101 lea(rmon, Address(rbp, rmon, Address::times_ptr)); 1102 // c_rarg1 points to current entry, starting with top-most entry 1103 1104 lea(rbx, monitor_block_bot); // points to word before bottom of 1105 // monitor block 1106 jmp(entry); 1107 1108 // Entry already locked, need to throw exception 1109 bind(exception); 1110 1111 if (throw_monitor_exception) { 1112 // Throw exception 1113 NOT_LP64(empty_FPU_stack();) 1114 MacroAssembler::call_VM(noreg, 1115 CAST_FROM_FN_PTR(address, InterpreterRuntime:: 1116 throw_illegal_monitor_state_exception)); 1117 should_not_reach_here(); 1118 } else { 1119 // Stack unrolling. Unlock object and install illegal_monitor_exception. 1120 // Unlock does not block, so don't have to worry about the frame. 1121 // We don't have to preserve c_rarg1 since we are going to throw an exception. 1122 1123 push(state); 1124 mov(robj, rmon); // nop if robj and rmon are the same 1125 unlock_object(robj); 1126 pop(state); 1127 1128 if (install_monitor_exception) { 1129 NOT_LP64(empty_FPU_stack();) 1130 call_VM(noreg, CAST_FROM_FN_PTR(address, 1131 InterpreterRuntime:: 1132 new_illegal_monitor_state_exception)); 1133 } 1134 1135 jmp(restart); 1136 } 1137 1138 bind(loop); 1139 // check if current entry is used 1140 cmpptr(Address(rmon, BasicObjectLock::obj_offset()), NULL_WORD); 1141 jcc(Assembler::notEqual, exception); 1142 1143 addptr(rmon, entry_size); // otherwise advance to next entry 1144 bind(entry); 1145 cmpptr(rmon, rbx); // check if bottom reached 1146 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry 1147 } 1148 1149 bind(no_unlock); 1150 1151 // jvmti support 1152 if (notify_jvmdi) { 1153 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA 1154 } else { 1155 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA 1156 } 1157 1158 // remove activation 1159 // get sender sp 1160 movptr(rbx, 1161 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1162 if (StackReservedPages > 0) { 1163 // testing if reserved zone needs to be re-enabled 1164 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 1165 Label no_reserved_zone_enabling; 1166 1167 NOT_LP64(get_thread(rthread);) 1168 1169 // check if already enabled - if so no re-enabling needed 1170 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size"); 1171 cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled); 1172 jcc(Assembler::equal, no_reserved_zone_enabling); 1173 1174 cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset())); 1175 jcc(Assembler::lessEqual, no_reserved_zone_enabling); 1176 1177 call_VM_leaf( 1178 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread); 1179 call_VM(noreg, CAST_FROM_FN_PTR(address, 1180 InterpreterRuntime::throw_delayed_StackOverflowError)); 1181 should_not_reach_here(); 1182 1183 bind(no_reserved_zone_enabling); 1184 } 1185 leave(); // remove frame anchor 1186 pop(ret_addr); // get return address 1187 mov(rsp, rbx); // set sp to sender sp 1188 pop_cont_fastpath(); 1189 } 1190 1191 void InterpreterMacroAssembler::get_method_counters(Register method, 1192 Register mcs, Label& skip) { 1193 Label has_counters; 1194 movptr(mcs, Address(method, Method::method_counters_offset())); 1195 testptr(mcs, mcs); 1196 jcc(Assembler::notZero, has_counters); 1197 call_VM(noreg, CAST_FROM_FN_PTR(address, 1198 InterpreterRuntime::build_method_counters), method); 1199 movptr(mcs, Address(method,Method::method_counters_offset())); 1200 testptr(mcs, mcs); 1201 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory 1202 bind(has_counters); 1203 } 1204 1205 1206 // Lock object 1207 // 1208 // Args: 1209 // rdx, c_rarg1: BasicObjectLock to be used for locking 1210 // 1211 // Kills: 1212 // rax, rbx 1213 void InterpreterMacroAssembler::lock_object(Register lock_reg) { 1214 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), 1215 "The argument is only for looks. It must be c_rarg1"); 1216 1217 if (LockingMode == LM_MONITOR) { 1218 call_VM_preemptable(noreg, 1219 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 1220 lock_reg); 1221 } else { 1222 Label count_locking, done, slow_case; 1223 1224 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 1225 const Register tmp_reg = rbx; 1226 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop 1227 const Register rklass_decode_tmp = rscratch1; 1228 1229 const int obj_offset = in_bytes(BasicObjectLock::obj_offset()); 1230 const int lock_offset = in_bytes(BasicObjectLock::lock_offset()); 1231 const int mark_offset = lock_offset + 1232 BasicLock::displaced_header_offset_in_bytes(); 1233 1234 // Load object pointer into obj_reg 1235 movptr(obj_reg, Address(lock_reg, obj_offset)); 1236 1237 if (DiagnoseSyncOnValueBasedClasses != 0) { 1238 load_klass(tmp_reg, obj_reg, rklass_decode_tmp); 1239 testb(Address(tmp_reg, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class); 1240 jcc(Assembler::notZero, slow_case); 1241 } 1242 1243 if (LockingMode == LM_LIGHTWEIGHT) { 1244 #ifdef _LP64 1245 const Register thread = r15_thread; 1246 lightweight_lock(lock_reg, obj_reg, swap_reg, thread, tmp_reg, slow_case); 1247 #else 1248 // Lacking registers and thread on x86_32. Always take slow path. 1249 jmp(slow_case); 1250 #endif 1251 } else if (LockingMode == LM_LEGACY) { 1252 // Load immediate 1 into swap_reg %rax 1253 movl(swap_reg, 1); 1254 1255 // Load (object->mark() | 1) into swap_reg %rax 1256 orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1257 1258 // Save (object->mark() | 1) into BasicLock's displaced header 1259 movptr(Address(lock_reg, mark_offset), swap_reg); 1260 1261 assert(lock_offset == 0, 1262 "displaced header must be first word in BasicObjectLock"); 1263 1264 lock(); 1265 cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1266 jcc(Assembler::zero, count_locking); 1267 1268 const int zero_bits = LP64_ONLY(7) NOT_LP64(3); 1269 1270 // Fast check for recursive lock. 1271 // 1272 // Can apply the optimization only if this is a stack lock 1273 // allocated in this thread. For efficiency, we can focus on 1274 // recently allocated stack locks (instead of reading the stack 1275 // base and checking whether 'mark' points inside the current 1276 // thread stack): 1277 // 1) (mark & zero_bits) == 0, and 1278 // 2) rsp <= mark < mark + os::pagesize() 1279 // 1280 // Warning: rsp + os::pagesize can overflow the stack base. We must 1281 // neither apply the optimization for an inflated lock allocated 1282 // just above the thread stack (this is why condition 1 matters) 1283 // nor apply the optimization if the stack lock is inside the stack 1284 // of another thread. The latter is avoided even in case of overflow 1285 // because we have guard pages at the end of all stacks. Hence, if 1286 // we go over the stack base and hit the stack of another thread, 1287 // this should not be in a writeable area that could contain a 1288 // stack lock allocated by that thread. As a consequence, a stack 1289 // lock less than page size away from rsp is guaranteed to be 1290 // owned by the current thread. 1291 // 1292 // These 3 tests can be done by evaluating the following 1293 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())), 1294 // assuming both stack pointer and pagesize have their 1295 // least significant bits clear. 1296 // NOTE: the mark is in swap_reg %rax as the result of cmpxchg 1297 subptr(swap_reg, rsp); 1298 andptr(swap_reg, zero_bits - (int)os::vm_page_size()); 1299 1300 // Save the test result, for recursive case, the result is zero 1301 movptr(Address(lock_reg, mark_offset), swap_reg); 1302 jcc(Assembler::notZero, slow_case); 1303 1304 bind(count_locking); 1305 inc_held_monitor_count(); 1306 } 1307 jmp(done); 1308 1309 bind(slow_case); 1310 1311 // Call the runtime routine for slow case 1312 call_VM_preemptable(noreg, 1313 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 1314 lock_reg); 1315 bind(done); 1316 } 1317 } 1318 1319 1320 // Unlocks an object. Used in monitorexit bytecode and 1321 // remove_activation. Throws an IllegalMonitorException if object is 1322 // not locked by current thread. 1323 // 1324 // Args: 1325 // rdx, c_rarg1: BasicObjectLock for lock 1326 // 1327 // Kills: 1328 // rax 1329 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) 1330 // rscratch1 (scratch reg) 1331 // rax, rbx, rcx, rdx 1332 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 1333 assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), 1334 "The argument is only for looks. It must be c_rarg1"); 1335 1336 if (LockingMode == LM_MONITOR) { 1337 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1338 } else { 1339 Label count_locking, done, slow_case; 1340 1341 const Register swap_reg = rax; // Must use rax for cmpxchg instruction 1342 const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark 1343 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop 1344 1345 save_bcp(); // Save in case of exception 1346 1347 if (LockingMode != LM_LIGHTWEIGHT) { 1348 // Convert from BasicObjectLock structure to object and BasicLock 1349 // structure Store the BasicLock address into %rax 1350 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset())); 1351 } 1352 1353 // Load oop into obj_reg(%c_rarg3) 1354 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); 1355 1356 // Free entry 1357 movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD); 1358 1359 if (LockingMode == LM_LIGHTWEIGHT) { 1360 #ifdef _LP64 1361 lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case); 1362 #else 1363 // Lacking registers and thread on x86_32. Always take slow path. 1364 jmp(slow_case); 1365 #endif 1366 } else if (LockingMode == LM_LEGACY) { 1367 // Load the old header from BasicLock structure 1368 movptr(header_reg, Address(swap_reg, 1369 BasicLock::displaced_header_offset_in_bytes())); 1370 1371 // Test for recursion 1372 testptr(header_reg, header_reg); 1373 1374 // zero for recursive case 1375 jcc(Assembler::zero, count_locking); 1376 1377 // Atomic swap back the old header 1378 lock(); 1379 cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1380 1381 // zero for simple unlock of a stack-lock case 1382 jcc(Assembler::notZero, slow_case); 1383 1384 bind(count_locking); 1385 dec_held_monitor_count(); 1386 } 1387 jmp(done); 1388 1389 bind(slow_case); 1390 // Call the runtime routine for slow case. 1391 movptr(Address(lock_reg, BasicObjectLock::obj_offset()), obj_reg); // restore obj 1392 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1393 1394 bind(done); 1395 1396 restore_bcp(); 1397 } 1398 } 1399 1400 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, 1401 Label& zero_continue) { 1402 assert(ProfileInterpreter, "must be profiling interpreter"); 1403 movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize)); 1404 testptr(mdp, mdp); 1405 jcc(Assembler::zero, zero_continue); 1406 } 1407 1408 1409 // Set the method data pointer for the current bcp. 1410 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1411 assert(ProfileInterpreter, "must be profiling interpreter"); 1412 Label set_mdp; 1413 push(rax); 1414 push(rbx); 1415 1416 get_method(rbx); 1417 // Test MDO to avoid the call if it is null. 1418 movptr(rax, Address(rbx, in_bytes(Method::method_data_offset()))); 1419 testptr(rax, rax); 1420 jcc(Assembler::zero, set_mdp); 1421 // rbx: method 1422 // _bcp_register: bcp 1423 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, _bcp_register); 1424 // rax: mdi 1425 // mdo is guaranteed to be non-zero here, we checked for it before the call. 1426 movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset()))); 1427 addptr(rbx, in_bytes(MethodData::data_offset())); 1428 addptr(rax, rbx); 1429 bind(set_mdp); 1430 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), rax); 1431 pop(rbx); 1432 pop(rax); 1433 } 1434 1435 void InterpreterMacroAssembler::verify_method_data_pointer() { 1436 assert(ProfileInterpreter, "must be profiling interpreter"); 1437 #ifdef ASSERT 1438 Label verify_continue; 1439 push(rax); 1440 push(rbx); 1441 Register arg3_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); 1442 Register arg2_reg = LP64_ONLY(c_rarg2) NOT_LP64(rdx); 1443 push(arg3_reg); 1444 push(arg2_reg); 1445 test_method_data_pointer(arg3_reg, verify_continue); // If mdp is zero, continue 1446 get_method(rbx); 1447 1448 // If the mdp is valid, it will point to a DataLayout header which is 1449 // consistent with the bcp. The converse is highly probable also. 1450 load_unsigned_short(arg2_reg, 1451 Address(arg3_reg, in_bytes(DataLayout::bci_offset()))); 1452 addptr(arg2_reg, Address(rbx, Method::const_offset())); 1453 lea(arg2_reg, Address(arg2_reg, ConstMethod::codes_offset())); 1454 cmpptr(arg2_reg, _bcp_register); 1455 jcc(Assembler::equal, verify_continue); 1456 // rbx: method 1457 // _bcp_register: bcp 1458 // c_rarg3: mdp 1459 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), 1460 rbx, _bcp_register, arg3_reg); 1461 bind(verify_continue); 1462 pop(arg2_reg); 1463 pop(arg3_reg); 1464 pop(rbx); 1465 pop(rax); 1466 #endif // ASSERT 1467 } 1468 1469 1470 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, 1471 int constant, 1472 Register value) { 1473 assert(ProfileInterpreter, "must be profiling interpreter"); 1474 Address data(mdp_in, constant); 1475 movptr(data, value); 1476 } 1477 1478 1479 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1480 int constant, 1481 bool decrement) { 1482 // Counter address 1483 Address data(mdp_in, constant); 1484 1485 increment_mdp_data_at(data, decrement); 1486 } 1487 1488 void InterpreterMacroAssembler::increment_mdp_data_at(Address data, 1489 bool decrement) { 1490 assert(ProfileInterpreter, "must be profiling interpreter"); 1491 // %%% this does 64bit counters at best it is wasting space 1492 // at worst it is a rare bug when counters overflow 1493 1494 if (decrement) { 1495 // Decrement the register. Set condition codes. 1496 addptr(data, -DataLayout::counter_increment); 1497 // If the decrement causes the counter to overflow, stay negative 1498 Label L; 1499 jcc(Assembler::negative, L); 1500 addptr(data, DataLayout::counter_increment); 1501 bind(L); 1502 } else { 1503 assert(DataLayout::counter_increment == 1, 1504 "flow-free idiom only works with 1"); 1505 // Increment the register. Set carry flag. 1506 addptr(data, DataLayout::counter_increment); 1507 // If the increment causes the counter to overflow, pull back by 1. 1508 sbbptr(data, 0); 1509 } 1510 } 1511 1512 1513 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1514 Register reg, 1515 int constant, 1516 bool decrement) { 1517 Address data(mdp_in, reg, Address::times_1, constant); 1518 1519 increment_mdp_data_at(data, decrement); 1520 } 1521 1522 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, 1523 int flag_byte_constant) { 1524 assert(ProfileInterpreter, "must be profiling interpreter"); 1525 int header_offset = in_bytes(DataLayout::flags_offset()); 1526 int header_bits = flag_byte_constant; 1527 // Set the flag 1528 orb(Address(mdp_in, header_offset), header_bits); 1529 } 1530 1531 1532 1533 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, 1534 int offset, 1535 Register value, 1536 Register test_value_out, 1537 Label& not_equal_continue) { 1538 assert(ProfileInterpreter, "must be profiling interpreter"); 1539 if (test_value_out == noreg) { 1540 cmpptr(value, Address(mdp_in, offset)); 1541 } else { 1542 // Put the test value into a register, so caller can use it: 1543 movptr(test_value_out, Address(mdp_in, offset)); 1544 cmpptr(test_value_out, value); 1545 } 1546 jcc(Assembler::notEqual, not_equal_continue); 1547 } 1548 1549 1550 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1551 int offset_of_disp) { 1552 assert(ProfileInterpreter, "must be profiling interpreter"); 1553 Address disp_address(mdp_in, offset_of_disp); 1554 addptr(mdp_in, disp_address); 1555 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1556 } 1557 1558 1559 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1560 Register reg, 1561 int offset_of_disp) { 1562 assert(ProfileInterpreter, "must be profiling interpreter"); 1563 Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); 1564 addptr(mdp_in, disp_address); 1565 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1566 } 1567 1568 1569 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, 1570 int constant) { 1571 assert(ProfileInterpreter, "must be profiling interpreter"); 1572 addptr(mdp_in, constant); 1573 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp_in); 1574 } 1575 1576 1577 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { 1578 assert(ProfileInterpreter, "must be profiling interpreter"); 1579 push(return_bci); // save/restore across call_VM 1580 call_VM(noreg, 1581 CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), 1582 return_bci); 1583 pop(return_bci); 1584 } 1585 1586 1587 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, 1588 Register bumped_count) { 1589 if (ProfileInterpreter) { 1590 Label profile_continue; 1591 1592 // If no method data exists, go to profile_continue. 1593 // Otherwise, assign to mdp 1594 test_method_data_pointer(mdp, profile_continue); 1595 1596 // We are taking a branch. Increment the taken count. 1597 // We inline increment_mdp_data_at to return bumped_count in a register 1598 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); 1599 Address data(mdp, in_bytes(JumpData::taken_offset())); 1600 movptr(bumped_count, data); 1601 assert(DataLayout::counter_increment == 1, 1602 "flow-free idiom only works with 1"); 1603 addptr(bumped_count, DataLayout::counter_increment); 1604 sbbptr(bumped_count, 0); 1605 movptr(data, bumped_count); // Store back out 1606 1607 // The method data pointer needs to be updated to reflect the new target. 1608 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); 1609 bind(profile_continue); 1610 } 1611 } 1612 1613 1614 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { 1615 if (ProfileInterpreter) { 1616 Label profile_continue; 1617 1618 // If no method data exists, go to profile_continue. 1619 test_method_data_pointer(mdp, profile_continue); 1620 1621 // We are taking a branch. Increment the not taken count. 1622 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); 1623 1624 // The method data pointer needs to be updated to correspond to 1625 // the next bytecode 1626 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); 1627 bind(profile_continue); 1628 } 1629 } 1630 1631 void InterpreterMacroAssembler::profile_call(Register mdp) { 1632 if (ProfileInterpreter) { 1633 Label profile_continue; 1634 1635 // If no method data exists, go to profile_continue. 1636 test_method_data_pointer(mdp, profile_continue); 1637 1638 // We are making a call. Increment the count. 1639 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1640 1641 // The method data pointer needs to be updated to reflect the new target. 1642 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); 1643 bind(profile_continue); 1644 } 1645 } 1646 1647 1648 void InterpreterMacroAssembler::profile_final_call(Register mdp) { 1649 if (ProfileInterpreter) { 1650 Label profile_continue; 1651 1652 // If no method data exists, go to profile_continue. 1653 test_method_data_pointer(mdp, profile_continue); 1654 1655 // We are making a call. Increment the count. 1656 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1657 1658 // The method data pointer needs to be updated to reflect the new target. 1659 update_mdp_by_constant(mdp, 1660 in_bytes(VirtualCallData:: 1661 virtual_call_data_size())); 1662 bind(profile_continue); 1663 } 1664 } 1665 1666 1667 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1668 Register mdp, 1669 Register reg2, 1670 bool receiver_can_be_null) { 1671 if (ProfileInterpreter) { 1672 Label profile_continue; 1673 1674 // If no method data exists, go to profile_continue. 1675 test_method_data_pointer(mdp, profile_continue); 1676 1677 Label skip_receiver_profile; 1678 if (receiver_can_be_null) { 1679 Label not_null; 1680 testptr(receiver, receiver); 1681 jccb(Assembler::notZero, not_null); 1682 // We are making a call. Increment the count for null receiver. 1683 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1684 jmp(skip_receiver_profile); 1685 bind(not_null); 1686 } 1687 1688 // Record the receiver type. 1689 record_klass_in_profile(receiver, mdp, reg2, true); 1690 bind(skip_receiver_profile); 1691 1692 // The method data pointer needs to be updated to reflect the new target. 1693 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1694 bind(profile_continue); 1695 } 1696 } 1697 1698 // This routine creates a state machine for updating the multi-row 1699 // type profile at a virtual call site (or other type-sensitive bytecode). 1700 // The machine visits each row (of receiver/count) until the receiver type 1701 // is found, or until it runs out of rows. At the same time, it remembers 1702 // the location of the first empty row. (An empty row records null for its 1703 // receiver, and can be allocated for a newly-observed receiver type.) 1704 // Because there are two degrees of freedom in the state, a simple linear 1705 // search will not work; it must be a decision tree. Hence this helper 1706 // function is recursive, to generate the required tree structured code. 1707 // It's the interpreter, so we are trading off code space for speed. 1708 // See below for example code. 1709 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1710 Register receiver, Register mdp, 1711 Register reg2, int start_row, 1712 Label& done, bool is_virtual_call) { 1713 if (TypeProfileWidth == 0) { 1714 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1715 } else { 1716 record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth, 1717 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset); 1718 } 1719 } 1720 1721 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, Register reg2, int start_row, 1722 Label& done, int total_rows, 1723 OffsetFunction item_offset_fn, 1724 OffsetFunction item_count_offset_fn) { 1725 int last_row = total_rows - 1; 1726 assert(start_row <= last_row, "must be work left to do"); 1727 // Test this row for both the item and for null. 1728 // Take any of three different outcomes: 1729 // 1. found item => increment count and goto done 1730 // 2. found null => keep looking for case 1, maybe allocate this cell 1731 // 3. found something else => keep looking for cases 1 and 2 1732 // Case 3 is handled by a recursive call. 1733 for (int row = start_row; row <= last_row; row++) { 1734 Label next_test; 1735 bool test_for_null_also = (row == start_row); 1736 1737 // See if the item is item[n]. 1738 int item_offset = in_bytes(item_offset_fn(row)); 1739 test_mdp_data_at(mdp, item_offset, item, 1740 (test_for_null_also ? reg2 : noreg), 1741 next_test); 1742 // (Reg2 now contains the item from the CallData.) 1743 1744 // The item is item[n]. Increment count[n]. 1745 int count_offset = in_bytes(item_count_offset_fn(row)); 1746 increment_mdp_data_at(mdp, count_offset); 1747 jmp(done); 1748 bind(next_test); 1749 1750 if (test_for_null_also) { 1751 // Failed the equality check on item[n]... Test for null. 1752 testptr(reg2, reg2); 1753 if (start_row == last_row) { 1754 // The only thing left to do is handle the null case. 1755 Label found_null; 1756 jccb(Assembler::zero, found_null); 1757 // Item did not match any saved item and there is no empty row for it. 1758 // Increment total counter to indicate polymorphic case. 1759 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1760 jmp(done); 1761 bind(found_null); 1762 break; 1763 } 1764 Label found_null; 1765 // Since null is rare, make it be the branch-taken case. 1766 jcc(Assembler::zero, found_null); 1767 1768 // Put all the "Case 3" tests here. 1769 record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows, 1770 item_offset_fn, item_count_offset_fn); 1771 1772 // Found a null. Keep searching for a matching item, 1773 // but remember that this is an empty (unused) slot. 1774 bind(found_null); 1775 } 1776 } 1777 1778 // In the fall-through case, we found no matching item, but we 1779 // observed the item[start_row] is null. 1780 1781 // Fill in the item field and increment the count. 1782 int item_offset = in_bytes(item_offset_fn(start_row)); 1783 set_mdp_data_at(mdp, item_offset, item); 1784 int count_offset = in_bytes(item_count_offset_fn(start_row)); 1785 movl(reg2, DataLayout::counter_increment); 1786 set_mdp_data_at(mdp, count_offset, reg2); 1787 if (start_row > 0) { 1788 jmp(done); 1789 } 1790 } 1791 1792 // Example state machine code for three profile rows: 1793 // // main copy of decision tree, rooted at row[1] 1794 // if (row[0].rec == rec) { row[0].incr(); goto done; } 1795 // if (row[0].rec != nullptr) { 1796 // // inner copy of decision tree, rooted at row[1] 1797 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1798 // if (row[1].rec != nullptr) { 1799 // // degenerate decision tree, rooted at row[2] 1800 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1801 // if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow 1802 // row[2].init(rec); goto done; 1803 // } else { 1804 // // remember row[1] is empty 1805 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1806 // row[1].init(rec); goto done; 1807 // } 1808 // } else { 1809 // // remember row[0] is empty 1810 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1811 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1812 // row[0].init(rec); goto done; 1813 // } 1814 // done: 1815 1816 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1817 Register mdp, Register reg2, 1818 bool is_virtual_call) { 1819 assert(ProfileInterpreter, "must be profiling"); 1820 Label done; 1821 1822 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call); 1823 1824 bind (done); 1825 } 1826 1827 void InterpreterMacroAssembler::profile_ret(Register return_bci, 1828 Register mdp) { 1829 if (ProfileInterpreter) { 1830 Label profile_continue; 1831 uint row; 1832 1833 // If no method data exists, go to profile_continue. 1834 test_method_data_pointer(mdp, profile_continue); 1835 1836 // Update the total ret count. 1837 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1838 1839 for (row = 0; row < RetData::row_limit(); row++) { 1840 Label next_test; 1841 1842 // See if return_bci is equal to bci[n]: 1843 test_mdp_data_at(mdp, 1844 in_bytes(RetData::bci_offset(row)), 1845 return_bci, noreg, 1846 next_test); 1847 1848 // return_bci is equal to bci[n]. Increment the count. 1849 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); 1850 1851 // The method data pointer needs to be updated to reflect the new target. 1852 update_mdp_by_offset(mdp, 1853 in_bytes(RetData::bci_displacement_offset(row))); 1854 jmp(profile_continue); 1855 bind(next_test); 1856 } 1857 1858 update_mdp_for_ret(return_bci); 1859 1860 bind(profile_continue); 1861 } 1862 } 1863 1864 1865 void InterpreterMacroAssembler::profile_null_seen(Register mdp) { 1866 if (ProfileInterpreter) { 1867 Label profile_continue; 1868 1869 // If no method data exists, go to profile_continue. 1870 test_method_data_pointer(mdp, profile_continue); 1871 1872 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); 1873 1874 // The method data pointer needs to be updated. 1875 int mdp_delta = in_bytes(BitData::bit_data_size()); 1876 if (TypeProfileCasts) { 1877 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1878 } 1879 update_mdp_by_constant(mdp, mdp_delta); 1880 1881 bind(profile_continue); 1882 } 1883 } 1884 1885 1886 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) { 1887 if (ProfileInterpreter) { 1888 Label profile_continue; 1889 1890 // If no method data exists, go to profile_continue. 1891 test_method_data_pointer(mdp, profile_continue); 1892 1893 // The method data pointer needs to be updated. 1894 int mdp_delta = in_bytes(BitData::bit_data_size()); 1895 if (TypeProfileCasts) { 1896 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1897 1898 // Record the object type. 1899 record_klass_in_profile(klass, mdp, reg2, false); 1900 NOT_LP64(assert(reg2 == rdi, "we know how to fix this blown reg");) 1901 NOT_LP64(restore_locals();) // Restore EDI 1902 } 1903 update_mdp_by_constant(mdp, mdp_delta); 1904 1905 bind(profile_continue); 1906 } 1907 } 1908 1909 1910 void InterpreterMacroAssembler::profile_switch_default(Register mdp) { 1911 if (ProfileInterpreter) { 1912 Label profile_continue; 1913 1914 // If no method data exists, go to profile_continue. 1915 test_method_data_pointer(mdp, profile_continue); 1916 1917 // Update the default case count 1918 increment_mdp_data_at(mdp, 1919 in_bytes(MultiBranchData::default_count_offset())); 1920 1921 // The method data pointer needs to be updated. 1922 update_mdp_by_offset(mdp, 1923 in_bytes(MultiBranchData:: 1924 default_displacement_offset())); 1925 1926 bind(profile_continue); 1927 } 1928 } 1929 1930 1931 void InterpreterMacroAssembler::profile_switch_case(Register index, 1932 Register mdp, 1933 Register reg2) { 1934 if (ProfileInterpreter) { 1935 Label profile_continue; 1936 1937 // If no method data exists, go to profile_continue. 1938 test_method_data_pointer(mdp, profile_continue); 1939 1940 // Build the base (index * per_case_size_in_bytes()) + 1941 // case_array_offset_in_bytes() 1942 movl(reg2, in_bytes(MultiBranchData::per_case_size())); 1943 imulptr(index, reg2); // XXX l ? 1944 addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ? 1945 1946 // Update the case count 1947 increment_mdp_data_at(mdp, 1948 index, 1949 in_bytes(MultiBranchData::relative_count_offset())); 1950 1951 // The method data pointer needs to be updated. 1952 update_mdp_by_offset(mdp, 1953 index, 1954 in_bytes(MultiBranchData:: 1955 relative_displacement_offset())); 1956 1957 bind(profile_continue); 1958 } 1959 } 1960 1961 1962 1963 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) { 1964 if (state == atos) { 1965 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line); 1966 } 1967 } 1968 1969 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 1970 #ifndef _LP64 1971 if ((state == ftos && UseSSE < 1) || 1972 (state == dtos && UseSSE < 2)) { 1973 MacroAssembler::verify_FPU(stack_depth); 1974 } 1975 #endif 1976 } 1977 1978 // Jump if ((*counter_addr += increment) & mask) == 0 1979 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask, 1980 Register scratch, Label* where) { 1981 // This update is actually not atomic and can lose a number of updates 1982 // under heavy contention, but the alternative of using the (contended) 1983 // atomic update here penalizes profiling paths too much. 1984 movl(scratch, counter_addr); 1985 incrementl(scratch, InvocationCounter::count_increment); 1986 movl(counter_addr, scratch); 1987 andl(scratch, mask); 1988 if (where != nullptr) { 1989 jcc(Assembler::zero, *where); 1990 } 1991 } 1992 1993 void InterpreterMacroAssembler::generate_runtime_upcalls_on_method_entry() 1994 { 1995 address upcall = RuntimeUpcalls::on_method_entry_upcall_address(); 1996 if (RuntimeUpcalls::does_upcall_need_method_parameter(upcall)) { 1997 get_method(c_rarg1); 1998 call_VM(noreg,upcall, c_rarg1); 1999 } else { 2000 call_VM(noreg,upcall); 2001 } 2002 } 2003 2004 void InterpreterMacroAssembler::notify_method_entry() { 2005 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 2006 // track stack depth. If it is possible to enter interp_only_mode we add 2007 // the code to check if the event should be sent. 2008 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 2009 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx); 2010 if (JvmtiExport::can_post_interpreter_events()) { 2011 Label L; 2012 NOT_LP64(get_thread(rthread);) 2013 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset())); 2014 testl(rdx, rdx); 2015 jcc(Assembler::zero, L); 2016 call_VM(noreg, CAST_FROM_FN_PTR(address, 2017 InterpreterRuntime::post_method_entry)); 2018 bind(L); 2019 } 2020 2021 if (DTraceMethodProbes) { 2022 NOT_LP64(get_thread(rthread);) 2023 get_method(rarg); 2024 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2025 rthread, rarg); 2026 } 2027 2028 // RedefineClasses() tracing support for obsolete method entry 2029 if (log_is_enabled(Trace, redefine, class, obsolete)) { 2030 NOT_LP64(get_thread(rthread);) 2031 get_method(rarg); 2032 call_VM_leaf( 2033 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2034 rthread, rarg); 2035 } 2036 } 2037 2038 2039 void InterpreterMacroAssembler::notify_method_exit( 2040 TosState state, NotifyMethodExitMode mode) { 2041 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 2042 // track stack depth. If it is possible to enter interp_only_mode we add 2043 // the code to check if the event should be sent. 2044 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 2045 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx); 2046 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2047 Label L; 2048 // Note: frame::interpreter_frame_result has a dependency on how the 2049 // method result is saved across the call to post_method_exit. If this 2050 // is changed then the interpreter_frame_result implementation will 2051 // need to be updated too. 2052 2053 // template interpreter will leave the result on the top of the stack. 2054 push(state); 2055 NOT_LP64(get_thread(rthread);) 2056 movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset())); 2057 testl(rdx, rdx); 2058 jcc(Assembler::zero, L); 2059 call_VM(noreg, 2060 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 2061 bind(L); 2062 pop(state); 2063 } 2064 2065 if (DTraceMethodProbes) { 2066 push(state); 2067 NOT_LP64(get_thread(rthread);) 2068 get_method(rarg); 2069 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2070 rthread, rarg); 2071 pop(state); 2072 } 2073 } 2074 2075 void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) { 2076 // Get index out of bytecode pointer 2077 get_cache_index_at_bcp(index, 1, sizeof(u4)); 2078 // Get address of invokedynamic array 2079 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 2080 movptr(cache, Address(cache, in_bytes(ConstantPoolCache::invokedynamic_entries_offset()))); 2081 if (is_power_of_2(sizeof(ResolvedIndyEntry))) { 2082 shll(index, log2i_exact(sizeof(ResolvedIndyEntry))); // Scale index by power of 2 2083 } else { 2084 imull(index, index, sizeof(ResolvedIndyEntry)); // Scale the index to be the entry index * sizeof(ResolvedIndyEntry) 2085 } 2086 lea(cache, Address(cache, index, Address::times_1, Array<ResolvedIndyEntry>::base_offset_in_bytes())); 2087 } 2088 2089 void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) { 2090 // Get index out of bytecode pointer 2091 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 2092 get_cache_index_at_bcp(index, bcp_offset, sizeof(u2)); 2093 2094 movptr(cache, Address(cache, ConstantPoolCache::field_entries_offset())); 2095 // Take shortcut if the size is a power of 2 2096 if (is_power_of_2(sizeof(ResolvedFieldEntry))) { 2097 shll(index, log2i_exact(sizeof(ResolvedFieldEntry))); // Scale index by power of 2 2098 } else { 2099 imull(index, index, sizeof(ResolvedFieldEntry)); // Scale the index to be the entry index * sizeof(ResolvedFieldEntry) 2100 } 2101 lea(cache, Address(cache, index, Address::times_1, Array<ResolvedFieldEntry>::base_offset_in_bytes())); 2102 } 2103 2104 void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) { 2105 // Get index out of bytecode pointer 2106 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 2107 get_cache_index_at_bcp(index, bcp_offset, sizeof(u2)); 2108 2109 movptr(cache, Address(cache, ConstantPoolCache::method_entries_offset())); 2110 imull(index, index, sizeof(ResolvedMethodEntry)); // Scale the index to be the entry index * sizeof(ResolvedMethodEntry) 2111 lea(cache, Address(cache, index, Address::times_1, Array<ResolvedMethodEntry>::base_offset_in_bytes())); 2112 }