1 /* 2 * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/cardTable.hpp" 29 #include "gc/shared/cardTableBarrierSet.inline.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "interp_masm_arm.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "jvm.h" 35 #include "logging/log.hpp" 36 #include "oops/arrayOop.hpp" 37 #include "oops/markWord.hpp" 38 #include "oops/method.hpp" 39 #include "oops/methodData.hpp" 40 #include "oops/resolvedFieldEntry.hpp" 41 #include "oops/resolvedIndyEntry.hpp" 42 #include "oops/resolvedMethodEntry.hpp" 43 #include "prims/jvmtiExport.hpp" 44 #include "prims/jvmtiThreadState.hpp" 45 #include "runtime/basicLock.hpp" 46 #include "runtime/frame.inline.hpp" 47 #include "runtime/safepointMechanism.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "utilities/powerOfTwo.hpp" 50 51 //-------------------------------------------------------------------- 52 // Implementation of InterpreterMacroAssembler 53 54 55 56 57 InterpreterMacroAssembler::InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) { 58 } 59 60 void InterpreterMacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 61 #ifdef ASSERT 62 // Ensure that last_sp is not filled. 63 { Label L; 64 ldr(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 65 cbz(Rtemp, L); 66 stop("InterpreterMacroAssembler::call_VM_helper: last_sp != nullptr"); 67 bind(L); 68 } 69 #endif // ASSERT 70 71 // Rbcp must be saved/restored since it may change due to GC. 72 save_bcp(); 73 74 75 // super call 76 MacroAssembler::call_VM_helper(oop_result, entry_point, number_of_arguments, check_exceptions); 77 78 79 // Restore interpreter specific registers. 80 restore_bcp(); 81 restore_method(); 82 } 83 84 void InterpreterMacroAssembler::jump_to_entry(address entry) { 85 assert(entry, "Entry must have been generated by now"); 86 b(entry); 87 } 88 89 void InterpreterMacroAssembler::check_and_handle_popframe() { 90 if (can_pop_frame()) { 91 Label L; 92 const Register popframe_cond = R2_tmp; 93 94 // Initiate popframe handling only if it is not already being processed. If the flag 95 // has the popframe_processing bit set, it means that this code is called *during* popframe 96 // handling - we don't want to reenter. 97 98 ldr_s32(popframe_cond, Address(Rthread, JavaThread::popframe_condition_offset())); 99 tbz(popframe_cond, exact_log2(JavaThread::popframe_pending_bit), L); 100 tbnz(popframe_cond, exact_log2(JavaThread::popframe_processing_bit), L); 101 102 // Call Interpreter::remove_activation_preserving_args_entry() to get the 103 // address of the same-named entrypoint in the generated interpreter code. 104 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 105 106 // Call indirectly to avoid generation ordering problem. 107 jump(R0); 108 109 bind(L); 110 } 111 } 112 113 114 // Blows R2, Rtemp. Sets TOS cached value. 115 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 116 const Register thread_state = R2_tmp; 117 118 ldr(thread_state, Address(Rthread, JavaThread::jvmti_thread_state_offset())); 119 120 const Address tos_addr(thread_state, JvmtiThreadState::earlyret_tos_offset()); 121 const Address oop_addr(thread_state, JvmtiThreadState::earlyret_oop_offset()); 122 const Address val_addr(thread_state, JvmtiThreadState::earlyret_value_offset()); 123 const Address val_addr_hi(thread_state, JvmtiThreadState::earlyret_value_offset() 124 + in_ByteSize(wordSize)); 125 126 Register zero = zero_register(Rtemp); 127 128 switch (state) { 129 case atos: ldr(R0_tos, oop_addr); 130 str(zero, oop_addr); 131 interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 132 break; 133 134 case ltos: ldr(R1_tos_hi, val_addr_hi); // fall through 135 case btos: // fall through 136 case ztos: // fall through 137 case ctos: // fall through 138 case stos: // fall through 139 case itos: ldr_s32(R0_tos, val_addr); break; 140 #ifdef __SOFTFP__ 141 case dtos: ldr(R1_tos_hi, val_addr_hi); // fall through 142 case ftos: ldr(R0_tos, val_addr); break; 143 #else 144 case ftos: ldr_float (S0_tos, val_addr); break; 145 case dtos: ldr_double(D0_tos, val_addr); break; 146 #endif // __SOFTFP__ 147 case vtos: /* nothing to do */ break; 148 default : ShouldNotReachHere(); 149 } 150 // Clean up tos value in the thread object 151 str(zero, val_addr); 152 str(zero, val_addr_hi); 153 154 mov(Rtemp, (int) ilgl); 155 str_32(Rtemp, tos_addr); 156 } 157 158 159 // Blows R2, Rtemp. 160 void InterpreterMacroAssembler::check_and_handle_earlyret() { 161 if (can_force_early_return()) { 162 Label L; 163 const Register thread_state = R2_tmp; 164 165 ldr(thread_state, Address(Rthread, JavaThread::jvmti_thread_state_offset())); 166 cbz(thread_state, L); // if (thread->jvmti_thread_state() == nullptr) exit; 167 168 // Initiate earlyret handling only if it is not already being processed. 169 // If the flag has the earlyret_processing bit set, it means that this code 170 // is called *during* earlyret handling - we don't want to reenter. 171 172 ldr_s32(Rtemp, Address(thread_state, JvmtiThreadState::earlyret_state_offset())); 173 cmp(Rtemp, JvmtiThreadState::earlyret_pending); 174 b(L, ne); 175 176 // Call Interpreter::remove_activation_early_entry() to get the address of the 177 // same-named entrypoint in the generated interpreter code. 178 179 ldr_s32(R0, Address(thread_state, JvmtiThreadState::earlyret_tos_offset())); 180 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), R0); 181 182 jump(R0); 183 184 bind(L); 185 } 186 } 187 188 189 // Sets reg. Blows Rtemp. 190 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { 191 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); 192 assert(reg != Rtemp, "should be different registers"); 193 194 ldrb(Rtemp, Address(Rbcp, bcp_offset)); 195 ldrb(reg, Address(Rbcp, bcp_offset+1)); 196 orr(reg, reg, AsmOperand(Rtemp, lsl, BitsPerByte)); 197 } 198 199 void InterpreterMacroAssembler::get_index_at_bcp(Register index, int bcp_offset, Register tmp_reg, size_t index_size) { 200 assert_different_registers(index, tmp_reg); 201 if (index_size == sizeof(u2)) { 202 // load bytes of index separately to avoid unaligned access 203 ldrb(index, Address(Rbcp, bcp_offset+1)); 204 ldrb(tmp_reg, Address(Rbcp, bcp_offset)); 205 orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte)); 206 } else if (index_size == sizeof(u4)) { 207 ldrb(index, Address(Rbcp, bcp_offset+3)); 208 ldrb(tmp_reg, Address(Rbcp, bcp_offset+2)); 209 orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte)); 210 ldrb(tmp_reg, Address(Rbcp, bcp_offset+1)); 211 orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte)); 212 ldrb(tmp_reg, Address(Rbcp, bcp_offset)); 213 orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte)); 214 } else if (index_size == sizeof(u1)) { 215 ldrb(index, Address(Rbcp, bcp_offset)); 216 } else { 217 ShouldNotReachHere(); 218 } 219 } 220 221 // Load object from cpool->resolved_references(index) 222 void InterpreterMacroAssembler::load_resolved_reference_at_index( 223 Register result, Register index) { 224 assert_different_registers(result, index); 225 get_constant_pool(result); 226 227 Register cache = result; 228 // load pointer for resolved_references[] objArray 229 ldr(cache, Address(result, ConstantPool::cache_offset())); 230 ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset())); 231 resolve_oop_handle(cache); 232 // Add in the index 233 // convert from field index to resolved_references() index and from 234 // word index to byte offset. Since this is a java object, it can be compressed 235 logical_shift_left(index, index, LogBytesPerHeapOop); 236 add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 237 load_heap_oop(result, Address(cache, index)); 238 } 239 240 void InterpreterMacroAssembler::load_resolved_klass_at_offset( 241 Register Rcpool, Register Rindex, Register Rklass) { 242 add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord)); 243 ldrh(Rtemp, Address(Rtemp, sizeof(ConstantPool))); // Rtemp = resolved_klass_index 244 ldr(Rklass, Address(Rcpool, ConstantPool::resolved_klasses_offset())); // Rklass = cpool->_resolved_klasses 245 add(Rklass, Rklass, AsmOperand(Rtemp, lsl, LogBytesPerWord)); 246 ldr(Rklass, Address(Rklass, Array<Klass*>::base_offset_in_bytes())); 247 } 248 249 void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) { 250 // Get index out of bytecode pointer, get_cache_entry_pointer_at_bcp 251 assert_different_registers(cache, index, Rtemp); 252 253 get_index_at_bcp(index, 1, Rtemp, sizeof(u4)); 254 255 // load constant pool cache pointer 256 ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize)); 257 258 // Get address of invokedynamic array 259 ldr(cache, Address(cache, in_bytes(ConstantPoolCache::invokedynamic_entries_offset()))); 260 261 // Scale the index to be the entry index * sizeof(ResolvedInvokeDynamicInfo) 262 // On ARM32 sizeof(ResolvedIndyEntry) is 12, use mul instead of lsl 263 mov(Rtemp, sizeof(ResolvedIndyEntry)); 264 mul(index, index, Rtemp); 265 266 add(cache, cache, Array<ResolvedIndyEntry>::base_offset_in_bytes()); 267 add(cache, cache, index); 268 } 269 270 void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) { 271 // Get index out of bytecode pointer 272 assert_different_registers(cache, index); 273 274 get_index_at_bcp(index, bcp_offset, cache /*as tmp*/, sizeof(u2)); 275 276 // Scale the index to be the entry index * sizeof(ResolvedFieldEntry) 277 // sizeof(ResolvedFieldEntry) is 16 on Arm, so using shift 278 if (is_power_of_2(sizeof(ResolvedFieldEntry))) { 279 // load constant pool cache pointer 280 ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize)); 281 // Get address of field entries array 282 ldr(cache, Address(cache, in_bytes(ConstantPoolCache::field_entries_offset()))); 283 284 add(cache, cache, Array<ResolvedFieldEntry>::base_offset_in_bytes()); 285 add(cache, cache, AsmOperand(index, lsl, log2i_exact(sizeof(ResolvedFieldEntry)))); 286 } 287 else { 288 mov(cache, sizeof(ResolvedFieldEntry)); 289 mul(index, index, cache); 290 // load constant pool cache pointer 291 ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize)); 292 293 // Get address of field entries array 294 ldr(cache, Address(cache, in_bytes(ConstantPoolCache::field_entries_offset()))); 295 add(cache, cache, Array<ResolvedFieldEntry>::base_offset_in_bytes()); 296 add(cache, cache, index); 297 } 298 } 299 300 void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) { 301 assert_different_registers(cache, index); 302 303 // Get index out of bytecode pointer 304 get_index_at_bcp(index, bcp_offset, cache /* as tmp */, sizeof(u2)); 305 306 // sizeof(ResolvedMethodEntry) is not a power of 2 on Arm, so can't use shift 307 mov(cache, sizeof(ResolvedMethodEntry)); 308 mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedMethodEntry) 309 310 // load constant pool cache pointer 311 ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize)); 312 // Get address of method entries array 313 ldr(cache, Address(cache, in_bytes(ConstantPoolCache::method_entries_offset()))); 314 add(cache, cache, Array<ResolvedMethodEntry>::base_offset_in_bytes()); 315 add(cache, cache, index); 316 } 317 318 // Generate a subtype check: branch to not_subtype if sub_klass is 319 // not a subtype of super_klass. 320 // Profiling code for the subtype check failure (profile_typecheck_failed) 321 // should be explicitly generated by the caller in the not_subtype case. 322 // Blows Rtemp, tmp1, tmp2. 323 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 324 Register Rsuper_klass, 325 Label ¬_subtype, 326 Register tmp1, 327 Register tmp2) { 328 329 assert_different_registers(Rsub_klass, Rsuper_klass, tmp1, tmp2, Rtemp); 330 Label ok_is_subtype, loop, update_cache; 331 332 const Register super_check_offset = tmp1; 333 const Register cached_super = tmp2; 334 335 // Profile the not-null value's klass. 336 profile_typecheck(tmp1, Rsub_klass); 337 338 // Load the super-klass's check offset into 339 ldr_u32(super_check_offset, Address(Rsuper_klass, Klass::super_check_offset_offset())); 340 341 // Check for self 342 cmp(Rsub_klass, Rsuper_klass); 343 344 // Load from the sub-klass's super-class display list, or a 1-word cache of 345 // the secondary superclass list, or a failing value with a sentinel offset 346 // if the super-klass is an interface or exceptionally deep in the Java 347 // hierarchy and we have to scan the secondary superclass list the hard way. 348 // See if we get an immediate positive hit 349 ldr(cached_super, Address(Rsub_klass, super_check_offset)); 350 351 cond_cmp(Rsuper_klass, cached_super, ne); 352 b(ok_is_subtype, eq); 353 354 // Check for immediate negative hit 355 cmp(super_check_offset, in_bytes(Klass::secondary_super_cache_offset())); 356 b(not_subtype, ne); 357 358 // Now do a linear scan of the secondary super-klass chain. 359 const Register supers_arr = tmp1; 360 const Register supers_cnt = tmp2; 361 const Register cur_super = Rtemp; 362 363 // Load objArrayOop of secondary supers. 364 ldr(supers_arr, Address(Rsub_klass, Klass::secondary_supers_offset())); 365 366 ldr_u32(supers_cnt, Address(supers_arr, Array<Klass*>::length_offset_in_bytes())); // Load the array length 367 cmp(supers_cnt, 0); 368 369 // Skip to the start of array elements and prefetch the first super-klass. 370 ldr(cur_super, Address(supers_arr, Array<Klass*>::base_offset_in_bytes(), pre_indexed), ne); 371 b(not_subtype, eq); 372 373 bind(loop); 374 375 376 cmp(cur_super, Rsuper_klass); 377 b(update_cache, eq); 378 379 subs(supers_cnt, supers_cnt, 1); 380 381 ldr(cur_super, Address(supers_arr, wordSize, pre_indexed), ne); 382 383 b(loop, ne); 384 385 b(not_subtype); 386 387 bind(update_cache); 388 // Must be equal but missed in cache. Update cache. 389 str(Rsuper_klass, Address(Rsub_klass, Klass::secondary_super_cache_offset())); 390 391 bind(ok_is_subtype); 392 } 393 394 395 ////////////////////////////////////////////////////////////////////////////////// 396 397 398 // Java Expression Stack 399 400 void InterpreterMacroAssembler::pop_ptr(Register r) { 401 assert(r != Rstack_top, "unpredictable instruction"); 402 ldr(r, Address(Rstack_top, wordSize, post_indexed)); 403 } 404 405 void InterpreterMacroAssembler::pop_i(Register r) { 406 assert(r != Rstack_top, "unpredictable instruction"); 407 ldr_s32(r, Address(Rstack_top, wordSize, post_indexed)); 408 zap_high_non_significant_bits(r); 409 } 410 411 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) { 412 assert_different_registers(lo, hi); 413 assert(lo < hi, "lo must be < hi"); 414 pop(RegisterSet(lo) | RegisterSet(hi)); 415 } 416 417 void InterpreterMacroAssembler::pop_f(FloatRegister fd) { 418 fpops(fd); 419 } 420 421 void InterpreterMacroAssembler::pop_d(FloatRegister fd) { 422 fpopd(fd); 423 } 424 425 426 // Transition vtos -> state. Blows R0, R1. Sets TOS cached value. 427 void InterpreterMacroAssembler::pop(TosState state) { 428 switch (state) { 429 case atos: pop_ptr(R0_tos); break; 430 case btos: // fall through 431 case ztos: // fall through 432 case ctos: // fall through 433 case stos: // fall through 434 case itos: pop_i(R0_tos); break; 435 case ltos: pop_l(R0_tos_lo, R1_tos_hi); break; 436 #ifdef __SOFTFP__ 437 case ftos: pop_i(R0_tos); break; 438 case dtos: pop_l(R0_tos_lo, R1_tos_hi); break; 439 #else 440 case ftos: pop_f(S0_tos); break; 441 case dtos: pop_d(D0_tos); break; 442 #endif // __SOFTFP__ 443 case vtos: /* nothing to do */ break; 444 default : ShouldNotReachHere(); 445 } 446 interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 447 } 448 449 void InterpreterMacroAssembler::push_ptr(Register r) { 450 assert(r != Rstack_top, "unpredictable instruction"); 451 str(r, Address(Rstack_top, -wordSize, pre_indexed)); 452 check_stack_top_on_expansion(); 453 } 454 455 void InterpreterMacroAssembler::push_i(Register r) { 456 assert(r != Rstack_top, "unpredictable instruction"); 457 str_32(r, Address(Rstack_top, -wordSize, pre_indexed)); 458 check_stack_top_on_expansion(); 459 } 460 461 void InterpreterMacroAssembler::push_l(Register lo, Register hi) { 462 assert_different_registers(lo, hi); 463 assert(lo < hi, "lo must be < hi"); 464 push(RegisterSet(lo) | RegisterSet(hi)); 465 } 466 467 void InterpreterMacroAssembler::push_f() { 468 fpushs(S0_tos); 469 } 470 471 void InterpreterMacroAssembler::push_d() { 472 fpushd(D0_tos); 473 } 474 475 // Transition state -> vtos. Blows Rtemp. 476 void InterpreterMacroAssembler::push(TosState state) { 477 interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 478 switch (state) { 479 case atos: push_ptr(R0_tos); break; 480 case btos: // fall through 481 case ztos: // fall through 482 case ctos: // fall through 483 case stos: // fall through 484 case itos: push_i(R0_tos); break; 485 case ltos: push_l(R0_tos_lo, R1_tos_hi); break; 486 #ifdef __SOFTFP__ 487 case ftos: push_i(R0_tos); break; 488 case dtos: push_l(R0_tos_lo, R1_tos_hi); break; 489 #else 490 case ftos: push_f(); break; 491 case dtos: push_d(); break; 492 #endif // __SOFTFP__ 493 case vtos: /* nothing to do */ break; 494 default : ShouldNotReachHere(); 495 } 496 } 497 498 499 500 // Converts return value in R0/R1 (interpreter calling conventions) to TOS cached value. 501 void InterpreterMacroAssembler::convert_retval_to_tos(TosState state) { 502 #if (!defined __SOFTFP__ && !defined __ABI_HARD__) 503 // According to interpreter calling conventions, result is returned in R0/R1, 504 // but templates expect ftos in S0, and dtos in D0. 505 if (state == ftos) { 506 fmsr(S0_tos, R0); 507 } else if (state == dtos) { 508 fmdrr(D0_tos, R0, R1); 509 } 510 #endif // !__SOFTFP__ && !__ABI_HARD__ 511 } 512 513 // Converts TOS cached value to return value in R0/R1 (according to interpreter calling conventions). 514 void InterpreterMacroAssembler::convert_tos_to_retval(TosState state) { 515 #if (!defined __SOFTFP__ && !defined __ABI_HARD__) 516 // According to interpreter calling conventions, result is returned in R0/R1, 517 // so ftos (S0) and dtos (D0) are moved to R0/R1. 518 if (state == ftos) { 519 fmrs(R0, S0_tos); 520 } else if (state == dtos) { 521 fmrrd(R0, R1, D0_tos); 522 } 523 #endif // !__SOFTFP__ && !__ABI_HARD__ 524 } 525 526 527 528 // Helpers for swap and dup 529 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 530 ldr(val, Address(Rstack_top, Interpreter::expr_offset_in_bytes(n))); 531 } 532 533 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 534 str(val, Address(Rstack_top, Interpreter::expr_offset_in_bytes(n))); 535 } 536 537 538 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { 539 540 // set sender sp 541 mov(Rsender_sp, SP); 542 543 // record last_sp 544 str(Rsender_sp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 545 } 546 547 // Jump to from_interpreted entry of a call unless single stepping is possible 548 // in this thread in which case we must call the i2i entry 549 void InterpreterMacroAssembler::jump_from_interpreted(Register method) { 550 assert_different_registers(method, Rtemp); 551 552 prepare_to_jump_from_interpreted(); 553 554 if (can_post_interpreter_events()) { 555 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 556 // compiled code in threads for which the event is enabled. Check here for 557 // interp_only_mode if these events CAN be enabled. 558 559 ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset())); 560 cmp(Rtemp, 0); 561 ldr(PC, Address(method, Method::interpreter_entry_offset()), ne); 562 } 563 564 indirect_jump(Address(method, Method::from_interpreted_offset()), Rtemp); 565 } 566 567 568 void InterpreterMacroAssembler::restore_dispatch() { 569 mov_slow(RdispatchTable, (address)Interpreter::dispatch_table(vtos)); 570 } 571 572 573 // The following two routines provide a hook so that an implementation 574 // can schedule the dispatch in two parts. 575 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { 576 // Nothing ARM-specific to be done here. 577 } 578 579 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { 580 dispatch_next(state, step); 581 } 582 583 void InterpreterMacroAssembler::dispatch_base(TosState state, 584 DispatchTableMode table_mode, 585 bool verifyoop, bool generate_poll) { 586 if (VerifyActivationFrameSize) { 587 Label L; 588 sub(Rtemp, FP, SP); 589 int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize; 590 cmp(Rtemp, min_frame_size); 591 b(L, ge); 592 stop("broken stack frame"); 593 bind(L); 594 } 595 596 if (verifyoop) { 597 interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 598 } 599 600 Label safepoint; 601 address* const safepoint_table = Interpreter::safept_table(state); 602 address* const table = Interpreter::dispatch_table(state); 603 bool needs_thread_local_poll = generate_poll && table != safepoint_table; 604 605 if (needs_thread_local_poll) { 606 NOT_PRODUCT(block_comment("Thread-local Safepoint poll")); 607 ldr(Rtemp, Address(Rthread, JavaThread::polling_word_offset())); 608 tbnz(Rtemp, exact_log2(SafepointMechanism::poll_bit()), safepoint); 609 } 610 611 if((state == itos) || (state == btos) || (state == ztos) || (state == ctos) || (state == stos)) { 612 zap_high_non_significant_bits(R0_tos); 613 } 614 615 #ifdef ASSERT 616 Label L; 617 mov_slow(Rtemp, (address)Interpreter::dispatch_table(vtos)); 618 cmp(Rtemp, RdispatchTable); 619 b(L, eq); 620 stop("invalid RdispatchTable"); 621 bind(L); 622 #endif 623 624 if (table_mode == DispatchDefault) { 625 if (state == vtos) { 626 indirect_jump(Address::indexed_ptr(RdispatchTable, R3_bytecode), Rtemp); 627 } else { 628 // on 32-bit ARM this method is faster than the one above. 629 sub(Rtemp, RdispatchTable, (Interpreter::distance_from_dispatch_table(vtos) - 630 Interpreter::distance_from_dispatch_table(state)) * wordSize); 631 indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp); 632 } 633 } else { 634 assert(table_mode == DispatchNormal, "invalid dispatch table mode"); 635 address table = (address) Interpreter::normal_table(state); 636 mov_slow(Rtemp, table); 637 indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp); 638 } 639 640 if (needs_thread_local_poll) { 641 bind(safepoint); 642 lea(Rtemp, ExternalAddress((address)safepoint_table)); 643 indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp); 644 } 645 646 nop(); // to avoid filling CPU pipeline with invalid instructions 647 nop(); 648 } 649 650 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) { 651 dispatch_base(state, DispatchDefault, true, generate_poll); 652 } 653 654 655 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { 656 dispatch_base(state, DispatchNormal); 657 } 658 659 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { 660 dispatch_base(state, DispatchNormal, false); 661 } 662 663 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) { 664 // load next bytecode and advance Rbcp 665 ldrb(R3_bytecode, Address(Rbcp, step, pre_indexed)); 666 dispatch_base(state, DispatchDefault, true, generate_poll); 667 } 668 669 void InterpreterMacroAssembler::narrow(Register result) { 670 // mask integer result to narrower return type. 671 const Register Rtmp = R2; 672 673 // get method type 674 ldr(Rtmp, Address(Rmethod, Method::const_offset())); 675 ldrb(Rtmp, Address(Rtmp, ConstMethod::result_type_offset())); 676 677 Label notBool, notByte, notChar, done; 678 cmp(Rtmp, T_INT); 679 b(done, eq); 680 681 cmp(Rtmp, T_BOOLEAN); 682 b(notBool, ne); 683 and_32(result, result, 1); 684 b(done); 685 686 bind(notBool); 687 cmp(Rtmp, T_BYTE); 688 b(notByte, ne); 689 sign_extend(result, result, 8); 690 b(done); 691 692 bind(notByte); 693 cmp(Rtmp, T_CHAR); 694 b(notChar, ne); 695 zero_extend(result, result, 16); 696 b(done); 697 698 bind(notChar); 699 // cmp(Rtmp, T_SHORT); 700 // b(done, ne); 701 sign_extend(result, result, 16); 702 703 // Nothing to do 704 bind(done); 705 } 706 707 // remove activation 708 // 709 // Unlock the receiver if this is a synchronized method. 710 // Unlock any Java monitors from synchronized blocks. 711 // Remove the activation from the stack. 712 // 713 // If there are locked Java monitors 714 // If throw_monitor_exception 715 // throws IllegalMonitorStateException 716 // Else if install_monitor_exception 717 // installs IllegalMonitorStateException 718 // Else 719 // no error processing 720 void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_addr, 721 bool throw_monitor_exception, 722 bool install_monitor_exception, 723 bool notify_jvmdi) { 724 Label unlock, unlocked, no_unlock; 725 726 // Note: Registers R0, R1, S0 and D0 (TOS cached value) may be in use for the result. 727 728 const Address do_not_unlock_if_synchronized(Rthread, 729 JavaThread::do_not_unlock_if_synchronized_offset()); 730 731 const Register Rflag = R2; 732 const Register Raccess_flags = R3; 733 734 restore_method(); 735 736 ldrb(Rflag, do_not_unlock_if_synchronized); 737 738 // get method access flags 739 ldr_u32(Raccess_flags, Address(Rmethod, Method::access_flags_offset())); 740 741 strb(zero_register(Rtemp), do_not_unlock_if_synchronized); // reset the flag 742 743 // check if method is synchronized 744 745 tbz(Raccess_flags, JVM_ACC_SYNCHRONIZED_BIT, unlocked); 746 747 // Don't unlock anything if the _do_not_unlock_if_synchronized flag is set. 748 cbnz(Rflag, no_unlock); 749 750 // unlock monitor 751 push(state); // save result 752 753 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 754 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 755 756 const Register Rmonitor = R0; // fixed in unlock_object() 757 const Register Robj = R2; 758 759 // address of first monitor 760 sub(Rmonitor, FP, - frame::interpreter_frame_monitor_block_bottom_offset * wordSize + (int)sizeof(BasicObjectLock)); 761 762 ldr(Robj, Address(Rmonitor, BasicObjectLock::obj_offset())); 763 cbnz(Robj, unlock); 764 765 pop(state); 766 767 if (throw_monitor_exception) { 768 // Entry already unlocked, need to throw exception 769 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 770 should_not_reach_here(); 771 } else { 772 // Monitor already unlocked during a stack unroll. 773 // If requested, install an illegal_monitor_state_exception. 774 // Continue with stack unrolling. 775 if (install_monitor_exception) { 776 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 777 } 778 b(unlocked); 779 } 780 781 782 // Exception case for the check that all monitors are unlocked. 783 const Register Rcur = R2; 784 Label restart_check_monitors_unlocked, exception_monitor_is_still_locked; 785 786 bind(exception_monitor_is_still_locked); 787 // Monitor entry is still locked, need to throw exception. 788 // Rcur: monitor entry. 789 790 if (throw_monitor_exception) { 791 // Throw exception 792 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 793 should_not_reach_here(); 794 } else { 795 // Stack unrolling. Unlock object and install illegal_monitor_exception 796 // Unlock does not block, so don't have to worry about the frame 797 798 push(state); 799 mov(Rmonitor, Rcur); 800 unlock_object(Rmonitor); 801 802 if (install_monitor_exception) { 803 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 804 } 805 806 pop(state); 807 b(restart_check_monitors_unlocked); 808 } 809 810 bind(unlock); 811 unlock_object(Rmonitor); 812 pop(state); 813 814 // Check that for block-structured locking (i.e., that all locked objects has been unlocked) 815 bind(unlocked); 816 817 // Check that all monitors are unlocked 818 { 819 Label loop; 820 821 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 822 const Register Rbottom = R3; 823 const Register Rcur_obj = Rtemp; 824 825 bind(restart_check_monitors_unlocked); 826 827 ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 828 // points to current entry, starting with top-most entry 829 sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize); 830 // points to word before bottom of monitor block 831 832 cmp(Rcur, Rbottom); // check if there are no monitors 833 ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne); 834 // prefetch monitor's object 835 b(no_unlock, eq); 836 837 bind(loop); 838 // check if current entry is used 839 cbnz(Rcur_obj, exception_monitor_is_still_locked); 840 841 add(Rcur, Rcur, entry_size); // otherwise advance to next entry 842 cmp(Rcur, Rbottom); // check if bottom reached 843 ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne); 844 // prefetch monitor's object 845 b(loop, ne); // if not at bottom then check this entry 846 } 847 848 bind(no_unlock); 849 850 // jvmti support 851 if (notify_jvmdi) { 852 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA 853 } else { 854 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA 855 } 856 857 // remove activation 858 mov(Rtemp, FP); 859 ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); 860 ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize)); 861 862 if (ret_addr != LR) { 863 mov(ret_addr, LR); 864 } 865 } 866 867 868 // At certain points in the method invocation the monitor of 869 // synchronized methods hasn't been entered yet. 870 // To correctly handle exceptions at these points, we set the thread local 871 // variable _do_not_unlock_if_synchronized to true. The remove_activation will 872 // check this flag. 873 void InterpreterMacroAssembler::set_do_not_unlock_if_synchronized(bool flag, Register tmp) { 874 const Address do_not_unlock_if_synchronized(Rthread, 875 JavaThread::do_not_unlock_if_synchronized_offset()); 876 if (flag) { 877 mov(tmp, 1); 878 strb(tmp, do_not_unlock_if_synchronized); 879 } else { 880 strb(zero_register(tmp), do_not_unlock_if_synchronized); 881 } 882 } 883 884 // Lock object 885 // 886 // Argument: R1 : Points to BasicObjectLock to be used for locking. 887 // Must be initialized with object to lock. 888 // Blows volatile registers R0-R3, Rtemp, LR. Calls VM. 889 void InterpreterMacroAssembler::lock_object(Register Rlock) { 890 assert(Rlock == R1, "the second argument"); 891 892 if (LockingMode == LM_MONITOR) { 893 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock); 894 } else { 895 Label done; 896 897 const Register Robj = R2; 898 const Register Rmark = R3; 899 assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp); 900 901 const int obj_offset = in_bytes(BasicObjectLock::obj_offset()); 902 const int lock_offset = in_bytes(BasicObjectLock::lock_offset()); 903 const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes(); 904 905 Label already_locked, slow_case; 906 907 // Load object pointer 908 ldr(Robj, Address(Rlock, obj_offset)); 909 910 if (DiagnoseSyncOnValueBasedClasses != 0) { 911 load_klass(R0, Robj); 912 ldr_u32(R0, Address(R0, Klass::access_flags_offset())); 913 tst(R0, JVM_ACC_IS_VALUE_BASED_CLASS); 914 b(slow_case, ne); 915 } 916 917 if (LockingMode == LM_LIGHTWEIGHT) { 918 lightweight_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case); 919 b(done); 920 } else if (LockingMode == LM_LEGACY) { 921 // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread. 922 // That would be acceptable as ether CAS or slow case path is taken in that case. 923 // Exception to that is if the object is locked by the calling thread, then the recursive test will pass (guaranteed as 924 // loads are satisfied from a store queue if performed on the same processor). 925 926 assert(oopDesc::mark_offset_in_bytes() == 0, "must be"); 927 ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes())); 928 929 // Test if object is already locked 930 tst(Rmark, markWord::unlocked_value); 931 b(already_locked, eq); 932 933 // Save old object->mark() into BasicLock's displaced header 934 str(Rmark, Address(Rlock, mark_offset)); 935 936 cas_for_lock_acquire(Rmark, Rlock, Robj, Rtemp, slow_case); 937 938 b(done); 939 940 // If we got here that means the object is locked by ether calling thread or another thread. 941 bind(already_locked); 942 // Handling of locked objects: recursive locks and slow case. 943 944 // Fast check for recursive lock. 945 // 946 // Can apply the optimization only if this is a stack lock 947 // allocated in this thread. For efficiency, we can focus on 948 // recently allocated stack locks (instead of reading the stack 949 // base and checking whether 'mark' points inside the current 950 // thread stack): 951 // 1) (mark & 3) == 0 952 // 2) SP <= mark < SP + os::pagesize() 953 // 954 // Warning: SP + os::pagesize can overflow the stack base. We must 955 // neither apply the optimization for an inflated lock allocated 956 // just above the thread stack (this is why condition 1 matters) 957 // nor apply the optimization if the stack lock is inside the stack 958 // of another thread. The latter is avoided even in case of overflow 959 // because we have guard pages at the end of all stacks. Hence, if 960 // we go over the stack base and hit the stack of another thread, 961 // this should not be in a writeable area that could contain a 962 // stack lock allocated by that thread. As a consequence, a stack 963 // lock less than page size away from SP is guaranteed to be 964 // owned by the current thread. 965 // 966 // Note: assuming SP is aligned, we can check the low bits of 967 // (mark-SP) instead of the low bits of mark. In that case, 968 // assuming page size is a power of 2, we can merge the two 969 // conditions into a single test: 970 // => ((mark - SP) & (3 - os::pagesize())) == 0 971 972 // (3 - os::pagesize()) cannot be encoded as an ARM immediate operand. 973 // Check independently the low bits and the distance to SP. 974 // -1- test low 2 bits 975 movs(R0, AsmOperand(Rmark, lsl, 30)); 976 // -2- test (mark - SP) if the low two bits are 0 977 sub(R0, Rmark, SP, eq); 978 movs(R0, AsmOperand(R0, lsr, exact_log2(os::vm_page_size())), eq); 979 // If still 'eq' then recursive locking OK: store 0 into lock record 980 str(R0, Address(Rlock, mark_offset), eq); 981 982 b(done, eq); 983 } 984 985 bind(slow_case); 986 987 // Call the runtime routine for slow case 988 if (LockingMode == LM_LIGHTWEIGHT) { 989 // Pass oop, not lock, in fast lock case. call_VM wants R1 though. 990 push(R1); 991 mov(R1, Robj); 992 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj), R1); 993 pop(R1); 994 } else { 995 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock); 996 } 997 bind(done); 998 } 999 } 1000 1001 // Unlocks an object. Used in monitorexit bytecode and remove_activation. 1002 // 1003 // Argument: R0: Points to BasicObjectLock structure for lock 1004 // Throw an IllegalMonitorException if object is not locked by current thread 1005 // Blows volatile registers R0-R3, Rtemp, LR. Calls VM. 1006 void InterpreterMacroAssembler::unlock_object(Register Rlock) { 1007 assert(Rlock == R0, "the first argument"); 1008 1009 if (LockingMode == LM_MONITOR) { 1010 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock); 1011 } else { 1012 Label done, slow_case; 1013 1014 const Register Robj = R2; 1015 const Register Rmark = R3; 1016 assert_different_registers(Robj, Rmark, Rlock, Rtemp); 1017 1018 const int obj_offset = in_bytes(BasicObjectLock::obj_offset()); 1019 const int lock_offset = in_bytes(BasicObjectLock::lock_offset()); 1020 const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes(); 1021 1022 const Register Rzero = zero_register(Rtemp); 1023 1024 // Load oop into Robj 1025 ldr(Robj, Address(Rlock, obj_offset)); 1026 1027 // Free entry 1028 str(Rzero, Address(Rlock, obj_offset)); 1029 1030 if (LockingMode == LM_LIGHTWEIGHT) { 1031 1032 // Check for non-symmetric locking. This is allowed by the spec and the interpreter 1033 // must handle it. 1034 ldr(Rtemp, Address(Rthread, JavaThread::lock_stack_top_offset())); 1035 sub(Rtemp, Rtemp, oopSize); 1036 ldr(Rtemp, Address(Rthread, Rtemp)); 1037 cmpoop(Rtemp, Robj); 1038 b(slow_case, ne); 1039 1040 lightweight_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 1041 1 /* savemask (save t1) */, slow_case); 1042 1043 b(done); 1044 1045 } else if (LockingMode == LM_LEGACY) { 1046 1047 // Load the old header from BasicLock structure 1048 ldr(Rmark, Address(Rlock, mark_offset)); 1049 1050 // Test for recursion (zero mark in BasicLock) 1051 cbz(Rmark, done); 1052 1053 bool allow_fallthrough_on_failure = true; 1054 1055 cas_for_lock_release(Rlock, Rmark, Robj, Rtemp, slow_case, allow_fallthrough_on_failure); 1056 1057 b(done, eq); 1058 1059 } 1060 bind(slow_case); 1061 1062 // Call the runtime routine for slow case. 1063 str(Robj, Address(Rlock, obj_offset)); // restore obj 1064 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock); 1065 1066 bind(done); 1067 } 1068 } 1069 1070 // Test ImethodDataPtr. If it is null, continue at the specified label 1071 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) { 1072 assert(ProfileInterpreter, "must be profiling interpreter"); 1073 ldr(mdp, Address(FP, frame::interpreter_frame_mdp_offset * wordSize)); 1074 cbz(mdp, zero_continue); 1075 } 1076 1077 1078 // Set the method data pointer for the current bcp. 1079 // Blows volatile registers R0-R3, Rtemp, LR. 1080 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1081 assert(ProfileInterpreter, "must be profiling interpreter"); 1082 Label set_mdp; 1083 1084 // Test MDO to avoid the call if it is null. 1085 ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 1086 cbz(Rtemp, set_mdp); 1087 1088 mov(R0, Rmethod); 1089 mov(R1, Rbcp); 1090 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), R0, R1); 1091 // R0/W0: mdi 1092 1093 // mdo is guaranteed to be non-zero here, we checked for it before the call. 1094 ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 1095 add(Rtemp, Rtemp, in_bytes(MethodData::data_offset())); 1096 add_ptr_scaled_int32(Rtemp, Rtemp, R0, 0); 1097 1098 bind(set_mdp); 1099 str(Rtemp, Address(FP, frame::interpreter_frame_mdp_offset * wordSize)); 1100 } 1101 1102 1103 void InterpreterMacroAssembler::verify_method_data_pointer() { 1104 assert(ProfileInterpreter, "must be profiling interpreter"); 1105 #ifdef ASSERT 1106 Label verify_continue; 1107 save_caller_save_registers(); 1108 1109 const Register Rmdp = R2; 1110 test_method_data_pointer(Rmdp, verify_continue); // If mdp is zero, continue 1111 1112 // If the mdp is valid, it will point to a DataLayout header which is 1113 // consistent with the bcp. The converse is highly probable also. 1114 1115 ldrh(R3, Address(Rmdp, DataLayout::bci_offset())); 1116 ldr(Rtemp, Address(Rmethod, Method::const_offset())); 1117 add(R3, R3, Rtemp); 1118 add(R3, R3, in_bytes(ConstMethod::codes_offset())); 1119 cmp(R3, Rbcp); 1120 b(verify_continue, eq); 1121 1122 mov(R0, Rmethod); 1123 mov(R1, Rbcp); 1124 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R0, R1, Rmdp); 1125 1126 bind(verify_continue); 1127 restore_caller_save_registers(); 1128 #endif // ASSERT 1129 } 1130 1131 1132 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int offset, Register value) { 1133 assert(ProfileInterpreter, "must be profiling interpreter"); 1134 assert_different_registers(mdp_in, value); 1135 str(value, Address(mdp_in, offset)); 1136 } 1137 1138 1139 // Increments mdp data. Sets bumped_count register to adjusted counter. 1140 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 1141 int offset, 1142 Register bumped_count, 1143 bool decrement) { 1144 assert(ProfileInterpreter, "must be profiling interpreter"); 1145 1146 // Counter address 1147 Address data(mdp_in, offset); 1148 assert_different_registers(mdp_in, bumped_count); 1149 1150 increment_mdp_data_at(data, bumped_count, decrement); 1151 } 1152 1153 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, int flag_byte_constant) { 1154 assert_different_registers(mdp_in, Rtemp); 1155 assert(ProfileInterpreter, "must be profiling interpreter"); 1156 assert((0 < flag_byte_constant) && (flag_byte_constant < (1 << BitsPerByte)), "flag mask is out of range"); 1157 1158 // Set the flag 1159 ldrb(Rtemp, Address(mdp_in, in_bytes(DataLayout::flags_offset()))); 1160 orr(Rtemp, Rtemp, (unsigned)flag_byte_constant); 1161 strb(Rtemp, Address(mdp_in, in_bytes(DataLayout::flags_offset()))); 1162 } 1163 1164 1165 // Increments mdp data. Sets bumped_count register to adjusted counter. 1166 void InterpreterMacroAssembler::increment_mdp_data_at(Address data, 1167 Register bumped_count, 1168 bool decrement) { 1169 assert(ProfileInterpreter, "must be profiling interpreter"); 1170 1171 ldr(bumped_count, data); 1172 if (decrement) { 1173 // Decrement the register. Set condition codes. 1174 subs(bumped_count, bumped_count, DataLayout::counter_increment); 1175 // Avoid overflow. 1176 add(bumped_count, bumped_count, DataLayout::counter_increment, pl); 1177 } else { 1178 // Increment the register. Set condition codes. 1179 adds(bumped_count, bumped_count, DataLayout::counter_increment); 1180 // Avoid overflow. 1181 sub(bumped_count, bumped_count, DataLayout::counter_increment, mi); 1182 } 1183 str(bumped_count, data); 1184 } 1185 1186 1187 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, 1188 int offset, 1189 Register value, 1190 Register test_value_out, 1191 Label& not_equal_continue) { 1192 assert(ProfileInterpreter, "must be profiling interpreter"); 1193 assert_different_registers(mdp_in, test_value_out, value); 1194 1195 ldr(test_value_out, Address(mdp_in, offset)); 1196 cmp(test_value_out, value); 1197 1198 b(not_equal_continue, ne); 1199 } 1200 1201 1202 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp, Register reg_temp) { 1203 assert(ProfileInterpreter, "must be profiling interpreter"); 1204 assert_different_registers(mdp_in, reg_temp); 1205 1206 ldr(reg_temp, Address(mdp_in, offset_of_disp)); 1207 add(mdp_in, mdp_in, reg_temp); 1208 str(mdp_in, Address(FP, frame::interpreter_frame_mdp_offset * wordSize)); 1209 } 1210 1211 1212 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg_offset, Register reg_tmp) { 1213 assert(ProfileInterpreter, "must be profiling interpreter"); 1214 assert_different_registers(mdp_in, reg_offset, reg_tmp); 1215 1216 ldr(reg_tmp, Address(mdp_in, reg_offset)); 1217 add(mdp_in, mdp_in, reg_tmp); 1218 str(mdp_in, Address(FP, frame::interpreter_frame_mdp_offset * wordSize)); 1219 } 1220 1221 1222 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) { 1223 assert(ProfileInterpreter, "must be profiling interpreter"); 1224 add(mdp_in, mdp_in, constant); 1225 str(mdp_in, Address(FP, frame::interpreter_frame_mdp_offset * wordSize)); 1226 } 1227 1228 1229 // Blows volatile registers R0-R3, Rtemp, LR). 1230 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { 1231 assert(ProfileInterpreter, "must be profiling interpreter"); 1232 assert_different_registers(return_bci, R0, R1, R2, R3, Rtemp); 1233 1234 mov(R1, return_bci); 1235 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), R1); 1236 } 1237 1238 1239 // Sets mdp, bumped_count registers, blows Rtemp. 1240 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bumped_count) { 1241 assert_different_registers(mdp, bumped_count); 1242 1243 if (ProfileInterpreter) { 1244 Label profile_continue; 1245 1246 // If no method data exists, go to profile_continue. 1247 // Otherwise, assign to mdp 1248 test_method_data_pointer(mdp, profile_continue); 1249 1250 // We are taking a branch. Increment the taken count. 1251 increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()), bumped_count); 1252 1253 // The method data pointer needs to be updated to reflect the new target. 1254 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()), Rtemp); 1255 1256 bind (profile_continue); 1257 } 1258 } 1259 1260 1261 // Sets mdp, blows Rtemp. 1262 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { 1263 assert_different_registers(mdp, Rtemp); 1264 1265 if (ProfileInterpreter) { 1266 Label profile_continue; 1267 1268 // If no method data exists, go to profile_continue. 1269 test_method_data_pointer(mdp, profile_continue); 1270 1271 // We are taking a branch. Increment the not taken count. 1272 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()), Rtemp); 1273 1274 // The method data pointer needs to be updated to correspond to the next bytecode 1275 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); 1276 1277 bind (profile_continue); 1278 } 1279 } 1280 1281 1282 // Sets mdp, blows Rtemp. 1283 void InterpreterMacroAssembler::profile_call(Register mdp) { 1284 assert_different_registers(mdp, Rtemp); 1285 1286 if (ProfileInterpreter) { 1287 Label profile_continue; 1288 1289 // If no method data exists, go to profile_continue. 1290 test_method_data_pointer(mdp, profile_continue); 1291 1292 // We are making a call. Increment the count. 1293 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp); 1294 1295 // The method data pointer needs to be updated to reflect the new target. 1296 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); 1297 1298 bind (profile_continue); 1299 } 1300 } 1301 1302 1303 // Sets mdp, blows Rtemp. 1304 void InterpreterMacroAssembler::profile_final_call(Register mdp) { 1305 if (ProfileInterpreter) { 1306 Label profile_continue; 1307 1308 // If no method data exists, go to profile_continue. 1309 test_method_data_pointer(mdp, profile_continue); 1310 1311 // We are making a call. Increment the count. 1312 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp); 1313 1314 // The method data pointer needs to be updated to reflect the new target. 1315 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1316 1317 bind (profile_continue); 1318 } 1319 } 1320 1321 1322 // Sets mdp, blows Rtemp. 1323 void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register receiver, bool receiver_can_be_null) { 1324 assert_different_registers(mdp, receiver, Rtemp); 1325 1326 if (ProfileInterpreter) { 1327 Label profile_continue; 1328 1329 // If no method data exists, go to profile_continue. 1330 test_method_data_pointer(mdp, profile_continue); 1331 1332 Label skip_receiver_profile; 1333 if (receiver_can_be_null) { 1334 Label not_null; 1335 cbnz(receiver, not_null); 1336 // We are making a call. Increment the count for null receiver. 1337 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp); 1338 b(skip_receiver_profile); 1339 bind(not_null); 1340 } 1341 1342 // Record the receiver type. 1343 record_klass_in_profile(receiver, mdp, Rtemp, true); 1344 bind(skip_receiver_profile); 1345 1346 // The method data pointer needs to be updated to reflect the new target. 1347 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1348 bind(profile_continue); 1349 } 1350 } 1351 1352 1353 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1354 Register receiver, Register mdp, 1355 Register reg_tmp, 1356 int start_row, Label& done, bool is_virtual_call) { 1357 if (TypeProfileWidth == 0) 1358 return; 1359 1360 assert_different_registers(receiver, mdp, reg_tmp); 1361 1362 int last_row = VirtualCallData::row_limit() - 1; 1363 assert(start_row <= last_row, "must be work left to do"); 1364 // Test this row for both the receiver and for null. 1365 // Take any of three different outcomes: 1366 // 1. found receiver => increment count and goto done 1367 // 2. found null => keep looking for case 1, maybe allocate this cell 1368 // 3. found something else => keep looking for cases 1 and 2 1369 // Case 3 is handled by a recursive call. 1370 for (int row = start_row; row <= last_row; row++) { 1371 Label next_test; 1372 1373 // See if the receiver is receiver[n]. 1374 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); 1375 1376 test_mdp_data_at(mdp, recvr_offset, receiver, reg_tmp, next_test); 1377 1378 // The receiver is receiver[n]. Increment count[n]. 1379 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); 1380 increment_mdp_data_at(mdp, count_offset, reg_tmp); 1381 b(done); 1382 1383 bind(next_test); 1384 // reg_tmp now contains the receiver from the CallData. 1385 1386 if (row == start_row) { 1387 Label found_null; 1388 // Failed the equality check on receiver[n]... Test for null. 1389 if (start_row == last_row) { 1390 // The only thing left to do is handle the null case. 1391 if (is_virtual_call) { 1392 cbz(reg_tmp, found_null); 1393 // Receiver did not match any saved receiver and there is no empty row for it. 1394 // Increment total counter to indicate polymorphic case. 1395 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), reg_tmp); 1396 b(done); 1397 bind(found_null); 1398 } else { 1399 cbnz(reg_tmp, done); 1400 } 1401 break; 1402 } 1403 // Since null is rare, make it be the branch-taken case. 1404 cbz(reg_tmp, found_null); 1405 1406 // Put all the "Case 3" tests here. 1407 record_klass_in_profile_helper(receiver, mdp, reg_tmp, start_row + 1, done, is_virtual_call); 1408 1409 // Found a null. Keep searching for a matching receiver, 1410 // but remember that this is an empty (unused) slot. 1411 bind(found_null); 1412 } 1413 } 1414 1415 // In the fall-through case, we found no matching receiver, but we 1416 // observed the receiver[start_row] is null. 1417 1418 // Fill in the receiver field and increment the count. 1419 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); 1420 set_mdp_data_at(mdp, recvr_offset, receiver); 1421 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); 1422 mov(reg_tmp, DataLayout::counter_increment); 1423 set_mdp_data_at(mdp, count_offset, reg_tmp); 1424 if (start_row > 0) { 1425 b(done); 1426 } 1427 } 1428 1429 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1430 Register mdp, 1431 Register reg_tmp, 1432 bool is_virtual_call) { 1433 assert(ProfileInterpreter, "must be profiling"); 1434 assert_different_registers(receiver, mdp, reg_tmp); 1435 1436 Label done; 1437 1438 record_klass_in_profile_helper(receiver, mdp, reg_tmp, 0, done, is_virtual_call); 1439 1440 bind (done); 1441 } 1442 1443 // Sets mdp, blows volatile registers R0-R3, Rtemp, LR). 1444 void InterpreterMacroAssembler::profile_ret(Register mdp, Register return_bci) { 1445 assert_different_registers(mdp, return_bci, Rtemp, R0, R1, R2, R3); 1446 1447 if (ProfileInterpreter) { 1448 Label profile_continue; 1449 uint row; 1450 1451 // If no method data exists, go to profile_continue. 1452 test_method_data_pointer(mdp, profile_continue); 1453 1454 // Update the total ret count. 1455 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp); 1456 1457 for (row = 0; row < RetData::row_limit(); row++) { 1458 Label next_test; 1459 1460 // See if return_bci is equal to bci[n]: 1461 test_mdp_data_at(mdp, in_bytes(RetData::bci_offset(row)), return_bci, 1462 Rtemp, next_test); 1463 1464 // return_bci is equal to bci[n]. Increment the count. 1465 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)), Rtemp); 1466 1467 // The method data pointer needs to be updated to reflect the new target. 1468 update_mdp_by_offset(mdp, in_bytes(RetData::bci_displacement_offset(row)), Rtemp); 1469 b(profile_continue); 1470 bind(next_test); 1471 } 1472 1473 update_mdp_for_ret(return_bci); 1474 1475 bind(profile_continue); 1476 } 1477 } 1478 1479 1480 // Sets mdp. 1481 void InterpreterMacroAssembler::profile_null_seen(Register mdp) { 1482 if (ProfileInterpreter) { 1483 Label profile_continue; 1484 1485 // If no method data exists, go to profile_continue. 1486 test_method_data_pointer(mdp, profile_continue); 1487 1488 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); 1489 1490 // The method data pointer needs to be updated. 1491 int mdp_delta = in_bytes(BitData::bit_data_size()); 1492 if (TypeProfileCasts) { 1493 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1494 } 1495 update_mdp_by_constant(mdp, mdp_delta); 1496 1497 bind (profile_continue); 1498 } 1499 } 1500 1501 1502 // Sets mdp, blows Rtemp. 1503 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { 1504 assert_different_registers(mdp, Rtemp); 1505 1506 if (ProfileInterpreter && TypeProfileCasts) { 1507 Label profile_continue; 1508 1509 // If no method data exists, go to profile_continue. 1510 test_method_data_pointer(mdp, profile_continue); 1511 1512 int count_offset = in_bytes(CounterData::count_offset()); 1513 // Back up the address, since we have already bumped the mdp. 1514 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1515 1516 // *Decrement* the counter. We expect to see zero or small negatives. 1517 increment_mdp_data_at(mdp, count_offset, Rtemp, true); 1518 1519 bind (profile_continue); 1520 } 1521 } 1522 1523 1524 // Sets mdp, blows Rtemp. 1525 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass) 1526 { 1527 assert_different_registers(mdp, klass, Rtemp); 1528 1529 if (ProfileInterpreter) { 1530 Label profile_continue; 1531 1532 // If no method data exists, go to profile_continue. 1533 test_method_data_pointer(mdp, profile_continue); 1534 1535 // The method data pointer needs to be updated. 1536 int mdp_delta = in_bytes(BitData::bit_data_size()); 1537 if (TypeProfileCasts) { 1538 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1539 1540 // Record the object type. 1541 record_klass_in_profile(klass, mdp, Rtemp, false); 1542 } 1543 update_mdp_by_constant(mdp, mdp_delta); 1544 1545 bind(profile_continue); 1546 } 1547 } 1548 1549 1550 // Sets mdp, blows Rtemp. 1551 void InterpreterMacroAssembler::profile_switch_default(Register mdp) { 1552 assert_different_registers(mdp, Rtemp); 1553 1554 if (ProfileInterpreter) { 1555 Label profile_continue; 1556 1557 // If no method data exists, go to profile_continue. 1558 test_method_data_pointer(mdp, profile_continue); 1559 1560 // Update the default case count 1561 increment_mdp_data_at(mdp, in_bytes(MultiBranchData::default_count_offset()), Rtemp); 1562 1563 // The method data pointer needs to be updated. 1564 update_mdp_by_offset(mdp, in_bytes(MultiBranchData::default_displacement_offset()), Rtemp); 1565 1566 bind(profile_continue); 1567 } 1568 } 1569 1570 1571 // Sets mdp. Blows reg_tmp1, reg_tmp2. Index could be the same as reg_tmp2. 1572 void InterpreterMacroAssembler::profile_switch_case(Register mdp, Register index, Register reg_tmp1, Register reg_tmp2) { 1573 assert_different_registers(mdp, reg_tmp1, reg_tmp2); 1574 assert_different_registers(mdp, reg_tmp1, index); 1575 1576 if (ProfileInterpreter) { 1577 Label profile_continue; 1578 1579 const int count_offset = in_bytes(MultiBranchData::case_array_offset()) + 1580 in_bytes(MultiBranchData::relative_count_offset()); 1581 1582 const int displacement_offset = in_bytes(MultiBranchData::case_array_offset()) + 1583 in_bytes(MultiBranchData::relative_displacement_offset()); 1584 1585 // If no method data exists, go to profile_continue. 1586 test_method_data_pointer(mdp, profile_continue); 1587 1588 // Build the base (index * per_case_size_in_bytes()) 1589 logical_shift_left(reg_tmp1, index, exact_log2(in_bytes(MultiBranchData::per_case_size()))); 1590 1591 // Update the case count 1592 add(reg_tmp1, reg_tmp1, count_offset); 1593 increment_mdp_data_at(Address(mdp, reg_tmp1), reg_tmp2); 1594 1595 // The method data pointer needs to be updated. 1596 add(reg_tmp1, reg_tmp1, displacement_offset - count_offset); 1597 update_mdp_by_offset(mdp, reg_tmp1, reg_tmp2); 1598 1599 bind (profile_continue); 1600 } 1601 } 1602 1603 1604 void InterpreterMacroAssembler::byteswap_u32(Register r, Register rtmp1, Register rtmp2) { 1605 if (VM_Version::supports_rev()) { 1606 rev(r, r); 1607 } else { 1608 eor(rtmp1, r, AsmOperand(r, ror, 16)); 1609 mvn(rtmp2, 0x0000ff00); 1610 andr(rtmp1, rtmp2, AsmOperand(rtmp1, lsr, 8)); 1611 eor(r, rtmp1, AsmOperand(r, ror, 8)); 1612 } 1613 } 1614 1615 1616 void InterpreterMacroAssembler::inc_global_counter(address address_of_counter, int offset, Register tmp1, Register tmp2, bool avoid_overflow) { 1617 const intx addr = (intx) (address_of_counter + offset); 1618 1619 assert ((addr & 0x3) == 0, "address of counter should be aligned"); 1620 const intx offset_mask = right_n_bits(12); 1621 1622 const address base = (address) (addr & ~offset_mask); 1623 const int offs = (int) (addr & offset_mask); 1624 1625 const Register addr_base = tmp1; 1626 const Register val = tmp2; 1627 1628 mov_slow(addr_base, base); 1629 ldr_s32(val, Address(addr_base, offs)); 1630 1631 if (avoid_overflow) { 1632 adds_32(val, val, 1); 1633 str(val, Address(addr_base, offs), pl); 1634 } else { 1635 add_32(val, val, 1); 1636 str_32(val, Address(addr_base, offs)); 1637 } 1638 } 1639 1640 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char *file, int line) { 1641 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop", file, line); } 1642 } 1643 1644 // Inline assembly for: 1645 // 1646 // if (thread is in interp_only_mode) { 1647 // InterpreterRuntime::post_method_entry(); 1648 // } 1649 // if (DTraceMethodProbes) { 1650 // SharedRuntime::dtrace_method_entry(method, receiver); 1651 // } 1652 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 1653 // SharedRuntime::rc_trace_method_entry(method, receiver); 1654 // } 1655 1656 void InterpreterMacroAssembler::notify_method_entry() { 1657 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1658 // track stack depth. If it is possible to enter interp_only_mode we add 1659 // the code to check if the event should be sent. 1660 if (can_post_interpreter_events()) { 1661 Label L; 1662 1663 ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset())); 1664 cbz(Rtemp, L); 1665 1666 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); 1667 1668 bind(L); 1669 } 1670 1671 // Note: Disable DTrace runtime check for now to eliminate overhead on each method entry 1672 if (DTraceMethodProbes) { 1673 Label Lcontinue; 1674 1675 ldrb_global(Rtemp, (address)&DTraceMethodProbes); 1676 cbz(Rtemp, Lcontinue); 1677 1678 mov(R0, Rthread); 1679 mov(R1, Rmethod); 1680 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), R0, R1); 1681 1682 bind(Lcontinue); 1683 } 1684 // RedefineClasses() tracing support for obsolete method entry 1685 if (log_is_enabled(Trace, redefine, class, obsolete)) { 1686 mov(R0, Rthread); 1687 mov(R1, Rmethod); 1688 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1689 R0, R1); 1690 } 1691 } 1692 1693 1694 void InterpreterMacroAssembler::notify_method_exit( 1695 TosState state, NotifyMethodExitMode mode, 1696 bool native, Register result_lo, Register result_hi, FloatRegister result_fp) { 1697 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1698 // track stack depth. If it is possible to enter interp_only_mode we add 1699 // the code to check if the event should be sent. 1700 if (mode == NotifyJVMTI && can_post_interpreter_events()) { 1701 Label L; 1702 // Note: frame::interpreter_frame_result has a dependency on how the 1703 // method result is saved across the call to post_method_exit. If this 1704 // is changed then the interpreter_frame_result implementation will 1705 // need to be updated too. 1706 1707 ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset())); 1708 cbz(Rtemp, L); 1709 1710 if (native) { 1711 // For c++ and template interpreter push both result registers on the 1712 // stack in native, we don't know the state. 1713 // See frame::interpreter_frame_result for code that gets the result values from here. 1714 assert(result_lo != noreg, "result registers should be defined"); 1715 1716 assert(result_hi != noreg, "result registers should be defined"); 1717 1718 #ifdef __ABI_HARD__ 1719 assert(result_fp != fnoreg, "FP result register must be defined"); 1720 sub(SP, SP, 2 * wordSize); 1721 fstd(result_fp, Address(SP)); 1722 #endif // __ABI_HARD__ 1723 1724 push(RegisterSet(result_lo) | RegisterSet(result_hi)); 1725 1726 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 1727 1728 pop(RegisterSet(result_lo) | RegisterSet(result_hi)); 1729 #ifdef __ABI_HARD__ 1730 fldd(result_fp, Address(SP)); 1731 add(SP, SP, 2 * wordSize); 1732 #endif // __ABI_HARD__ 1733 1734 } else { 1735 // For the template interpreter, the value on tos is the size of the 1736 // state. (c++ interpreter calls jvmti somewhere else). 1737 push(state); 1738 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 1739 pop(state); 1740 } 1741 1742 bind(L); 1743 } 1744 1745 // Note: Disable DTrace runtime check for now to eliminate overhead on each method exit 1746 if (DTraceMethodProbes) { 1747 Label Lcontinue; 1748 1749 ldrb_global(Rtemp, (address)&DTraceMethodProbes); 1750 cbz(Rtemp, Lcontinue); 1751 1752 push(state); 1753 1754 mov(R0, Rthread); 1755 mov(R1, Rmethod); 1756 1757 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), R0, R1); 1758 1759 pop(state); 1760 1761 bind(Lcontinue); 1762 } 1763 } 1764 1765 1766 #ifndef PRODUCT 1767 1768 void InterpreterMacroAssembler::trace_state(const char* msg) { 1769 int push_size = save_caller_save_registers(); 1770 1771 Label Lcontinue; 1772 InlinedString Lmsg0("%s: FP=" INTPTR_FORMAT ", SP=" INTPTR_FORMAT "\n"); 1773 InlinedString Lmsg(msg); 1774 InlinedAddress Lprintf((address)printf); 1775 1776 ldr_literal(R0, Lmsg0); 1777 ldr_literal(R1, Lmsg); 1778 mov(R2, FP); 1779 add(R3, SP, push_size); // original SP (without saved registers) 1780 ldr_literal(Rtemp, Lprintf); 1781 call(Rtemp); 1782 1783 b(Lcontinue); 1784 1785 bind_literal(Lmsg0); 1786 bind_literal(Lmsg); 1787 bind_literal(Lprintf); 1788 1789 1790 bind(Lcontinue); 1791 1792 restore_caller_save_registers(); 1793 } 1794 1795 #endif 1796 1797 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 1798 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 1799 int increment, Address mask_addr, 1800 Register scratch, Register scratch2, 1801 AsmCondition cond, Label* where) { 1802 // caution: scratch2 and base address of counter_addr can be the same 1803 assert_different_registers(scratch, scratch2); 1804 ldr_u32(scratch, counter_addr); 1805 add(scratch, scratch, increment); 1806 str_32(scratch, counter_addr); 1807 1808 ldr(scratch2, mask_addr); 1809 andrs(scratch, scratch, scratch2); 1810 b(*where, cond); 1811 } 1812 1813 void InterpreterMacroAssembler::get_method_counters(Register method, 1814 Register Rcounters, 1815 Label& skip, 1816 bool saveRegs, 1817 Register reg1, 1818 Register reg2, 1819 Register reg3) { 1820 const Address method_counters(method, Method::method_counters_offset()); 1821 Label has_counters; 1822 1823 ldr(Rcounters, method_counters); 1824 cbnz(Rcounters, has_counters); 1825 1826 if (saveRegs) { 1827 // Save and restore in use caller-saved registers since they will be trashed by call_VM 1828 assert(reg1 != noreg, "must specify reg1"); 1829 assert(reg2 != noreg, "must specify reg2"); 1830 assert(reg3 == noreg, "must not specify reg3"); 1831 push(RegisterSet(reg1) | RegisterSet(reg2)); 1832 } 1833 1834 mov(R1, method); 1835 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1); 1836 1837 if (saveRegs) { 1838 pop(RegisterSet(reg1) | RegisterSet(reg2)); 1839 } 1840 1841 ldr(Rcounters, method_counters); 1842 cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory 1843 1844 bind(has_counters); 1845 }