1 /* 2 * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/compilerDefinitions.inline.hpp" 29 #include "gc/shared/barrierSetAssembler.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/tlab_globals.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "interpreter/interp_masm.hpp" 35 #include "interpreter/templateTable.hpp" 36 #include "memory/universe.hpp" 37 #include "oops/methodData.hpp" 38 #include "oops/method.inline.hpp" 39 #include "oops/objArrayKlass.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/resolvedFieldEntry.hpp" 42 #include "oops/resolvedIndyEntry.hpp" 43 #include "oops/resolvedMethodEntry.hpp" 44 #include "prims/jvmtiExport.hpp" 45 #include "prims/methodHandles.hpp" 46 #include "runtime/frame.inline.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 #include "runtime/stubRoutines.hpp" 49 #include "runtime/synchronizer.hpp" 50 #include "utilities/powerOfTwo.hpp" 51 52 #define __ _masm-> 53 54 // Address computation: local variables 55 56 static inline Address iaddress(int n) { 57 return Address(rlocals, Interpreter::local_offset_in_bytes(n)); 58 } 59 60 static inline Address laddress(int n) { 61 return iaddress(n + 1); 62 } 63 64 static inline Address faddress(int n) { 65 return iaddress(n); 66 } 67 68 static inline Address daddress(int n) { 69 return laddress(n); 70 } 71 72 static inline Address aaddress(int n) { 73 return iaddress(n); 74 } 75 76 static inline Address iaddress(Register r) { 77 return Address(rlocals, r, Address::lsl(3)); 78 } 79 80 static inline Address laddress(Register r, Register scratch, 81 InterpreterMacroAssembler* _masm) { 82 __ lea(scratch, Address(rlocals, r, Address::lsl(3))); 83 return Address(scratch, Interpreter::local_offset_in_bytes(1)); 84 } 85 86 static inline Address faddress(Register r) { 87 return iaddress(r); 88 } 89 90 static inline Address daddress(Register r, Register scratch, 91 InterpreterMacroAssembler* _masm) { 92 return laddress(r, scratch, _masm); 93 } 94 95 static inline Address aaddress(Register r) { 96 return iaddress(r); 97 } 98 99 static inline Address at_rsp() { 100 return Address(esp, 0); 101 } 102 103 // At top of Java expression stack which may be different than esp(). It 104 // isn't for category 1 objects. 105 static inline Address at_tos () { 106 return Address(esp, Interpreter::expr_offset_in_bytes(0)); 107 } 108 109 static inline Address at_tos_p1() { 110 return Address(esp, Interpreter::expr_offset_in_bytes(1)); 111 } 112 113 static inline Address at_tos_p2() { 114 return Address(esp, Interpreter::expr_offset_in_bytes(2)); 115 } 116 117 static inline Address at_tos_p3() { 118 return Address(esp, Interpreter::expr_offset_in_bytes(3)); 119 } 120 121 static inline Address at_tos_p4() { 122 return Address(esp, Interpreter::expr_offset_in_bytes(4)); 123 } 124 125 static inline Address at_tos_p5() { 126 return Address(esp, Interpreter::expr_offset_in_bytes(5)); 127 } 128 129 // Condition conversion 130 static Assembler::Condition j_not(TemplateTable::Condition cc) { 131 switch (cc) { 132 case TemplateTable::equal : return Assembler::NE; 133 case TemplateTable::not_equal : return Assembler::EQ; 134 case TemplateTable::less : return Assembler::GE; 135 case TemplateTable::less_equal : return Assembler::GT; 136 case TemplateTable::greater : return Assembler::LE; 137 case TemplateTable::greater_equal: return Assembler::LT; 138 } 139 ShouldNotReachHere(); 140 return Assembler::EQ; 141 } 142 143 144 // Miscellaneous helper routines 145 // Store an oop (or null) at the Address described by obj. 146 // If val == noreg this means store a null 147 static void do_oop_store(InterpreterMacroAssembler* _masm, 148 Address dst, 149 Register val, 150 DecoratorSet decorators) { 151 assert(val == noreg || val == r0, "parameter is just for looks"); 152 __ store_heap_oop(dst, val, r10, r11, r3, decorators); 153 } 154 155 static void do_oop_load(InterpreterMacroAssembler* _masm, 156 Address src, 157 Register dst, 158 DecoratorSet decorators) { 159 __ load_heap_oop(dst, src, r10, r11, decorators); 160 } 161 162 Address TemplateTable::at_bcp(int offset) { 163 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 164 return Address(rbcp, offset); 165 } 166 167 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 168 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 169 int byte_no) 170 { 171 if (!RewriteBytecodes) return; 172 Label L_patch_done; 173 174 switch (bc) { 175 case Bytecodes::_fast_vputfield: 176 case Bytecodes::_fast_aputfield: 177 case Bytecodes::_fast_bputfield: 178 case Bytecodes::_fast_zputfield: 179 case Bytecodes::_fast_cputfield: 180 case Bytecodes::_fast_dputfield: 181 case Bytecodes::_fast_fputfield: 182 case Bytecodes::_fast_iputfield: 183 case Bytecodes::_fast_lputfield: 184 case Bytecodes::_fast_sputfield: 185 { 186 // We skip bytecode quickening for putfield instructions when 187 // the put_code written to the constant pool cache is zero. 188 // This is required so that every execution of this instruction 189 // calls out to InterpreterRuntime::resolve_get_put to do 190 // additional, required work. 191 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 192 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 193 __ load_field_entry(temp_reg, bc_reg); 194 if (byte_no == f1_byte) { 195 __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset()))); 196 } else { 197 __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset()))); 198 } 199 // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in() 200 __ ldarb(temp_reg, temp_reg); 201 __ movw(bc_reg, bc); 202 __ cbzw(temp_reg, L_patch_done); // don't patch 203 } 204 break; 205 default: 206 assert(byte_no == -1, "sanity"); 207 // the pair bytecodes have already done the load. 208 if (load_bc_into_bc_reg) { 209 __ movw(bc_reg, bc); 210 } 211 } 212 213 if (JvmtiExport::can_post_breakpoint()) { 214 Label L_fast_patch; 215 // if a breakpoint is present we can't rewrite the stream directly 216 __ load_unsigned_byte(temp_reg, at_bcp(0)); 217 __ cmpw(temp_reg, Bytecodes::_breakpoint); 218 __ br(Assembler::NE, L_fast_patch); 219 // Let breakpoint table handling rewrite to quicker bytecode 220 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg); 221 __ b(L_patch_done); 222 __ bind(L_fast_patch); 223 } 224 225 #ifdef ASSERT 226 Label L_okay; 227 __ load_unsigned_byte(temp_reg, at_bcp(0)); 228 __ cmpw(temp_reg, (int) Bytecodes::java_code(bc)); 229 __ br(Assembler::EQ, L_okay); 230 __ cmpw(temp_reg, bc_reg); 231 __ br(Assembler::EQ, L_okay); 232 __ stop("patching the wrong bytecode"); 233 __ bind(L_okay); 234 #endif 235 236 // patch bytecode 237 __ strb(bc_reg, at_bcp(0)); 238 __ bind(L_patch_done); 239 } 240 241 242 // Individual instructions 243 244 void TemplateTable::nop() { 245 transition(vtos, vtos); 246 // nothing to do 247 } 248 249 void TemplateTable::shouldnotreachhere() { 250 transition(vtos, vtos); 251 __ stop("shouldnotreachhere bytecode"); 252 } 253 254 void TemplateTable::aconst_null() 255 { 256 transition(vtos, atos); 257 __ mov(r0, 0); 258 } 259 260 void TemplateTable::iconst(int value) 261 { 262 transition(vtos, itos); 263 __ mov(r0, value); 264 } 265 266 void TemplateTable::lconst(int value) 267 { 268 __ mov(r0, value); 269 } 270 271 void TemplateTable::fconst(int value) 272 { 273 transition(vtos, ftos); 274 switch (value) { 275 case 0: 276 __ fmovs(v0, 0.0); 277 break; 278 case 1: 279 __ fmovs(v0, 1.0); 280 break; 281 case 2: 282 __ fmovs(v0, 2.0); 283 break; 284 default: 285 ShouldNotReachHere(); 286 break; 287 } 288 } 289 290 void TemplateTable::dconst(int value) 291 { 292 transition(vtos, dtos); 293 switch (value) { 294 case 0: 295 __ fmovd(v0, 0.0); 296 break; 297 case 1: 298 __ fmovd(v0, 1.0); 299 break; 300 case 2: 301 __ fmovd(v0, 2.0); 302 break; 303 default: 304 ShouldNotReachHere(); 305 break; 306 } 307 } 308 309 void TemplateTable::bipush() 310 { 311 transition(vtos, itos); 312 __ load_signed_byte32(r0, at_bcp(1)); 313 } 314 315 void TemplateTable::sipush() 316 { 317 transition(vtos, itos); 318 __ load_unsigned_short(r0, at_bcp(1)); 319 __ revw(r0, r0); 320 __ asrw(r0, r0, 16); 321 } 322 323 void TemplateTable::ldc(LdcType type) 324 { 325 transition(vtos, vtos); 326 Label call_ldc, notFloat, notClass, notInt, Done; 327 328 if (is_ldc_wide(type)) { 329 __ get_unsigned_2_byte_index_at_bcp(r1, 1); 330 } else { 331 __ load_unsigned_byte(r1, at_bcp(1)); 332 } 333 __ get_cpool_and_tags(r2, r0); 334 335 const int base_offset = ConstantPool::header_size() * wordSize; 336 const int tags_offset = Array<u1>::base_offset_in_bytes(); 337 338 // get type 339 __ add(r3, r1, tags_offset); 340 __ lea(r3, Address(r0, r3)); 341 __ ldarb(r3, r3); 342 343 // unresolved class - get the resolved class 344 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass); 345 __ br(Assembler::EQ, call_ldc); 346 347 // unresolved class in error state - call into runtime to throw the error 348 // from the first resolution attempt 349 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError); 350 __ br(Assembler::EQ, call_ldc); 351 352 // resolved class - need to call vm to get java mirror of the class 353 __ cmp(r3, (u1)JVM_CONSTANT_Class); 354 __ br(Assembler::NE, notClass); 355 356 __ bind(call_ldc); 357 __ mov(c_rarg1, is_ldc_wide(type) ? 1 : 0); 358 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); 359 __ push_ptr(r0); 360 __ verify_oop(r0); 361 __ b(Done); 362 363 __ bind(notClass); 364 __ cmp(r3, (u1)JVM_CONSTANT_Float); 365 __ br(Assembler::NE, notFloat); 366 // ftos 367 __ adds(r1, r2, r1, Assembler::LSL, 3); 368 __ ldrs(v0, Address(r1, base_offset)); 369 __ push_f(); 370 __ b(Done); 371 372 __ bind(notFloat); 373 374 __ cmp(r3, (u1)JVM_CONSTANT_Integer); 375 __ br(Assembler::NE, notInt); 376 377 // itos 378 __ adds(r1, r2, r1, Assembler::LSL, 3); 379 __ ldrw(r0, Address(r1, base_offset)); 380 __ push_i(r0); 381 __ b(Done); 382 383 __ bind(notInt); 384 condy_helper(Done); 385 386 __ bind(Done); 387 } 388 389 // Fast path for caching oop constants. 390 void TemplateTable::fast_aldc(LdcType type) 391 { 392 transition(vtos, atos); 393 394 Register result = r0; 395 Register tmp = r1; 396 Register rarg = r2; 397 398 int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1); 399 400 Label resolved; 401 402 // We are resolved if the resolved reference cache entry contains a 403 // non-null object (String, MethodType, etc.) 404 assert_different_registers(result, tmp); 405 __ get_cache_index_at_bcp(tmp, 1, index_size); 406 __ load_resolved_reference_at_index(result, tmp); 407 __ cbnz(result, resolved); 408 409 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 410 411 // first time invocation - must resolve first 412 __ mov(rarg, (int)bytecode()); 413 __ call_VM(result, entry, rarg); 414 415 __ bind(resolved); 416 417 { // Check for the null sentinel. 418 // If we just called the VM, it already did the mapping for us, 419 // but it's harmless to retry. 420 Label notNull; 421 422 // Stash null_sentinel address to get its value later 423 __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr()); 424 __ ldr(tmp, Address(rarg)); 425 __ resolve_oop_handle(tmp, r5, rscratch2); 426 __ cmpoop(result, tmp); 427 __ br(Assembler::NE, notNull); 428 __ mov(result, 0); // null object reference 429 __ bind(notNull); 430 } 431 432 if (VerifyOops) { 433 // Safe to call with 0 result 434 __ verify_oop(result); 435 } 436 } 437 438 void TemplateTable::ldc2_w() 439 { 440 transition(vtos, vtos); 441 Label notDouble, notLong, Done; 442 __ get_unsigned_2_byte_index_at_bcp(r0, 1); 443 444 __ get_cpool_and_tags(r1, r2); 445 const int base_offset = ConstantPool::header_size() * wordSize; 446 const int tags_offset = Array<u1>::base_offset_in_bytes(); 447 448 // get type 449 __ lea(r2, Address(r2, r0, Address::lsl(0))); 450 __ load_unsigned_byte(r2, Address(r2, tags_offset)); 451 __ cmpw(r2, (int)JVM_CONSTANT_Double); 452 __ br(Assembler::NE, notDouble); 453 454 // dtos 455 __ lea (r2, Address(r1, r0, Address::lsl(3))); 456 __ ldrd(v0, Address(r2, base_offset)); 457 __ push_d(); 458 __ b(Done); 459 460 __ bind(notDouble); 461 __ cmpw(r2, (int)JVM_CONSTANT_Long); 462 __ br(Assembler::NE, notLong); 463 464 // ltos 465 __ lea(r0, Address(r1, r0, Address::lsl(3))); 466 __ ldr(r0, Address(r0, base_offset)); 467 __ push_l(); 468 __ b(Done); 469 470 __ bind(notLong); 471 condy_helper(Done); 472 473 __ bind(Done); 474 } 475 476 void TemplateTable::condy_helper(Label& Done) 477 { 478 Register obj = r0; 479 Register rarg = r1; 480 Register flags = r2; 481 Register off = r3; 482 483 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 484 485 __ mov(rarg, (int) bytecode()); 486 __ call_VM(obj, entry, rarg); 487 488 __ get_vm_result_2(flags, rthread); 489 490 // VMr = obj = base address to find primitive value to push 491 // VMr2 = flags = (tos, off) using format of CPCE::_flags 492 __ mov(off, flags); 493 __ andw(off, off, ConstantPoolCache::field_index_mask); 494 495 const Address field(obj, off); 496 497 // What sort of thing are we loading? 498 // x86 uses a shift and mask or wings it with a shift plus assert 499 // the mask is not needed. aarch64 just uses bitfield extract 500 __ ubfxw(flags, flags, ConstantPoolCache::tos_state_shift, 501 ConstantPoolCache::tos_state_bits); 502 503 switch (bytecode()) { 504 case Bytecodes::_ldc: 505 case Bytecodes::_ldc_w: 506 { 507 // tos in (itos, ftos, stos, btos, ctos, ztos) 508 Label notInt, notFloat, notShort, notByte, notChar, notBool; 509 __ cmpw(flags, itos); 510 __ br(Assembler::NE, notInt); 511 // itos 512 __ ldrw(r0, field); 513 __ push(itos); 514 __ b(Done); 515 516 __ bind(notInt); 517 __ cmpw(flags, ftos); 518 __ br(Assembler::NE, notFloat); 519 // ftos 520 __ load_float(field); 521 __ push(ftos); 522 __ b(Done); 523 524 __ bind(notFloat); 525 __ cmpw(flags, stos); 526 __ br(Assembler::NE, notShort); 527 // stos 528 __ load_signed_short(r0, field); 529 __ push(stos); 530 __ b(Done); 531 532 __ bind(notShort); 533 __ cmpw(flags, btos); 534 __ br(Assembler::NE, notByte); 535 // btos 536 __ load_signed_byte(r0, field); 537 __ push(btos); 538 __ b(Done); 539 540 __ bind(notByte); 541 __ cmpw(flags, ctos); 542 __ br(Assembler::NE, notChar); 543 // ctos 544 __ load_unsigned_short(r0, field); 545 __ push(ctos); 546 __ b(Done); 547 548 __ bind(notChar); 549 __ cmpw(flags, ztos); 550 __ br(Assembler::NE, notBool); 551 // ztos 552 __ load_signed_byte(r0, field); 553 __ push(ztos); 554 __ b(Done); 555 556 __ bind(notBool); 557 break; 558 } 559 560 case Bytecodes::_ldc2_w: 561 { 562 Label notLong, notDouble; 563 __ cmpw(flags, ltos); 564 __ br(Assembler::NE, notLong); 565 // ltos 566 __ ldr(r0, field); 567 __ push(ltos); 568 __ b(Done); 569 570 __ bind(notLong); 571 __ cmpw(flags, dtos); 572 __ br(Assembler::NE, notDouble); 573 // dtos 574 __ load_double(field); 575 __ push(dtos); 576 __ b(Done); 577 578 __ bind(notDouble); 579 break; 580 } 581 582 default: 583 ShouldNotReachHere(); 584 } 585 586 __ stop("bad ldc/condy"); 587 } 588 589 void TemplateTable::locals_index(Register reg, int offset) 590 { 591 __ ldrb(reg, at_bcp(offset)); 592 __ neg(reg, reg); 593 } 594 595 void TemplateTable::iload() { 596 iload_internal(); 597 } 598 599 void TemplateTable::nofast_iload() { 600 iload_internal(may_not_rewrite); 601 } 602 603 void TemplateTable::iload_internal(RewriteControl rc) { 604 transition(vtos, itos); 605 if (RewriteFrequentPairs && rc == may_rewrite) { 606 Label rewrite, done; 607 Register bc = r4; 608 609 // get next bytecode 610 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload))); 611 612 // if _iload, wait to rewrite to iload2. We only want to rewrite the 613 // last two iloads in a pair. Comparing against fast_iload means that 614 // the next bytecode is neither an iload or a caload, and therefore 615 // an iload pair. 616 __ cmpw(r1, Bytecodes::_iload); 617 __ br(Assembler::EQ, done); 618 619 // if _fast_iload rewrite to _fast_iload2 620 __ cmpw(r1, Bytecodes::_fast_iload); 621 __ movw(bc, Bytecodes::_fast_iload2); 622 __ br(Assembler::EQ, rewrite); 623 624 // if _caload rewrite to _fast_icaload 625 __ cmpw(r1, Bytecodes::_caload); 626 __ movw(bc, Bytecodes::_fast_icaload); 627 __ br(Assembler::EQ, rewrite); 628 629 // else rewrite to _fast_iload 630 __ movw(bc, Bytecodes::_fast_iload); 631 632 // rewrite 633 // bc: new bytecode 634 __ bind(rewrite); 635 patch_bytecode(Bytecodes::_iload, bc, r1, false); 636 __ bind(done); 637 638 } 639 640 // do iload, get the local value into tos 641 locals_index(r1); 642 __ ldr(r0, iaddress(r1)); 643 644 } 645 646 void TemplateTable::fast_iload2() 647 { 648 transition(vtos, itos); 649 locals_index(r1); 650 __ ldr(r0, iaddress(r1)); 651 __ push(itos); 652 locals_index(r1, 3); 653 __ ldr(r0, iaddress(r1)); 654 } 655 656 void TemplateTable::fast_iload() 657 { 658 transition(vtos, itos); 659 locals_index(r1); 660 __ ldr(r0, iaddress(r1)); 661 } 662 663 void TemplateTable::lload() 664 { 665 transition(vtos, ltos); 666 __ ldrb(r1, at_bcp(1)); 667 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord); 668 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1))); 669 } 670 671 void TemplateTable::fload() 672 { 673 transition(vtos, ftos); 674 locals_index(r1); 675 // n.b. we use ldrd here because this is a 64 bit slot 676 // this is comparable to the iload case 677 __ ldrd(v0, faddress(r1)); 678 } 679 680 void TemplateTable::dload() 681 { 682 transition(vtos, dtos); 683 __ ldrb(r1, at_bcp(1)); 684 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord); 685 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1))); 686 } 687 688 void TemplateTable::aload() 689 { 690 transition(vtos, atos); 691 locals_index(r1); 692 __ ldr(r0, iaddress(r1)); 693 } 694 695 void TemplateTable::locals_index_wide(Register reg) { 696 __ ldrh(reg, at_bcp(2)); 697 __ rev16w(reg, reg); 698 __ neg(reg, reg); 699 } 700 701 void TemplateTable::wide_iload() { 702 transition(vtos, itos); 703 locals_index_wide(r1); 704 __ ldr(r0, iaddress(r1)); 705 } 706 707 void TemplateTable::wide_lload() 708 { 709 transition(vtos, ltos); 710 __ ldrh(r1, at_bcp(2)); 711 __ rev16w(r1, r1); 712 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord); 713 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1))); 714 } 715 716 void TemplateTable::wide_fload() 717 { 718 transition(vtos, ftos); 719 locals_index_wide(r1); 720 // n.b. we use ldrd here because this is a 64 bit slot 721 // this is comparable to the iload case 722 __ ldrd(v0, faddress(r1)); 723 } 724 725 void TemplateTable::wide_dload() 726 { 727 transition(vtos, dtos); 728 __ ldrh(r1, at_bcp(2)); 729 __ rev16w(r1, r1); 730 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord); 731 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1))); 732 } 733 734 void TemplateTable::wide_aload() 735 { 736 transition(vtos, atos); 737 locals_index_wide(r1); 738 __ ldr(r0, aaddress(r1)); 739 } 740 741 void TemplateTable::index_check(Register array, Register index) 742 { 743 // destroys r1, rscratch1 744 // sign extend index for use by indexed load 745 // __ movl2ptr(index, index); 746 // check index 747 Register length = rscratch1; 748 __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes())); 749 __ cmpw(index, length); 750 if (index != r1) { 751 // ??? convention: move aberrant index into r1 for exception message 752 assert(r1 != array, "different registers"); 753 __ mov(r1, index); 754 } 755 Label ok; 756 __ br(Assembler::LO, ok); 757 // ??? convention: move array into r3 for exception message 758 __ mov(r3, array); 759 __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); 760 __ br(rscratch1); 761 __ bind(ok); 762 } 763 764 void TemplateTable::iaload() 765 { 766 transition(itos, itos); 767 __ mov(r1, r0); 768 __ pop_ptr(r0); 769 // r0: array 770 // r1: index 771 index_check(r0, r1); // leaves index in r1, kills rscratch1 772 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2); 773 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg); 774 } 775 776 void TemplateTable::laload() 777 { 778 transition(itos, ltos); 779 __ mov(r1, r0); 780 __ pop_ptr(r0); 781 // r0: array 782 // r1: index 783 index_check(r0, r1); // leaves index in r1, kills rscratch1 784 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3); 785 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg); 786 } 787 788 void TemplateTable::faload() 789 { 790 transition(itos, ftos); 791 __ mov(r1, r0); 792 __ pop_ptr(r0); 793 // r0: array 794 // r1: index 795 index_check(r0, r1); // leaves index in r1, kills rscratch1 796 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2); 797 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg); 798 } 799 800 void TemplateTable::daload() 801 { 802 transition(itos, dtos); 803 __ mov(r1, r0); 804 __ pop_ptr(r0); 805 // r0: array 806 // r1: index 807 index_check(r0, r1); // leaves index in r1, kills rscratch1 808 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3); 809 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg); 810 } 811 812 void TemplateTable::aaload() 813 { 814 transition(itos, atos); 815 __ mov(r1, r0); 816 __ pop_ptr(r0); 817 // r0: array 818 // r1: index 819 index_check(r0, r1); // leaves index in r1, kills rscratch1 820 __ profile_array_type<ArrayLoadData>(r2, r0, r4); 821 if (UseFlatArray) { 822 Label is_flat_array, done; 823 824 __ test_flat_array_oop(r0, r8 /*temp*/, is_flat_array); 825 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); 826 do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY); 827 828 __ b(done); 829 __ bind(is_flat_array); 830 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), r0, r1); 831 // Ensure the stores to copy the inline field contents are visible 832 // before any subsequent store that publishes this reference. 833 __ membar(Assembler::StoreStore); 834 __ bind(done); 835 } else { 836 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); 837 do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY); 838 } 839 __ profile_element_type(r2, r0, r4); 840 } 841 842 void TemplateTable::baload() 843 { 844 transition(itos, itos); 845 __ mov(r1, r0); 846 __ pop_ptr(r0); 847 // r0: array 848 // r1: index 849 index_check(r0, r1); // leaves index in r1, kills rscratch1 850 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0); 851 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg); 852 } 853 854 void TemplateTable::caload() 855 { 856 transition(itos, itos); 857 __ mov(r1, r0); 858 __ pop_ptr(r0); 859 // r0: array 860 // r1: index 861 index_check(r0, r1); // leaves index in r1, kills rscratch1 862 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1); 863 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg); 864 } 865 866 // iload followed by caload frequent pair 867 void TemplateTable::fast_icaload() 868 { 869 transition(vtos, itos); 870 // load index out of locals 871 locals_index(r2); 872 __ ldr(r1, iaddress(r2)); 873 874 __ pop_ptr(r0); 875 876 // r0: array 877 // r1: index 878 index_check(r0, r1); // leaves index in r1, kills rscratch1 879 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1); 880 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg); 881 } 882 883 void TemplateTable::saload() 884 { 885 transition(itos, itos); 886 __ mov(r1, r0); 887 __ pop_ptr(r0); 888 // r0: array 889 // r1: index 890 index_check(r0, r1); // leaves index in r1, kills rscratch1 891 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1); 892 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg); 893 } 894 895 void TemplateTable::iload(int n) 896 { 897 transition(vtos, itos); 898 __ ldr(r0, iaddress(n)); 899 } 900 901 void TemplateTable::lload(int n) 902 { 903 transition(vtos, ltos); 904 __ ldr(r0, laddress(n)); 905 } 906 907 void TemplateTable::fload(int n) 908 { 909 transition(vtos, ftos); 910 __ ldrs(v0, faddress(n)); 911 } 912 913 void TemplateTable::dload(int n) 914 { 915 transition(vtos, dtos); 916 __ ldrd(v0, daddress(n)); 917 } 918 919 void TemplateTable::aload(int n) 920 { 921 transition(vtos, atos); 922 __ ldr(r0, iaddress(n)); 923 } 924 925 void TemplateTable::aload_0() { 926 aload_0_internal(); 927 } 928 929 void TemplateTable::nofast_aload_0() { 930 aload_0_internal(may_not_rewrite); 931 } 932 933 void TemplateTable::aload_0_internal(RewriteControl rc) { 934 // According to bytecode histograms, the pairs: 935 // 936 // _aload_0, _fast_igetfield 937 // _aload_0, _fast_agetfield 938 // _aload_0, _fast_fgetfield 939 // 940 // occur frequently. If RewriteFrequentPairs is set, the (slow) 941 // _aload_0 bytecode checks if the next bytecode is either 942 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 943 // rewrites the current bytecode into a pair bytecode; otherwise it 944 // rewrites the current bytecode into _fast_aload_0 that doesn't do 945 // the pair check anymore. 946 // 947 // Note: If the next bytecode is _getfield, the rewrite must be 948 // delayed, otherwise we may miss an opportunity for a pair. 949 // 950 // Also rewrite frequent pairs 951 // aload_0, aload_1 952 // aload_0, iload_1 953 // These bytecodes with a small amount of code are most profitable 954 // to rewrite 955 if (RewriteFrequentPairs && rc == may_rewrite) { 956 Label rewrite, done; 957 const Register bc = r4; 958 959 // get next bytecode 960 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); 961 962 // if _getfield then wait with rewrite 963 __ cmpw(r1, Bytecodes::Bytecodes::_getfield); 964 __ br(Assembler::EQ, done); 965 966 // if _igetfield then rewrite to _fast_iaccess_0 967 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 968 __ cmpw(r1, Bytecodes::_fast_igetfield); 969 __ movw(bc, Bytecodes::_fast_iaccess_0); 970 __ br(Assembler::EQ, rewrite); 971 972 // if _agetfield then rewrite to _fast_aaccess_0 973 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 974 __ cmpw(r1, Bytecodes::_fast_agetfield); 975 __ movw(bc, Bytecodes::_fast_aaccess_0); 976 __ br(Assembler::EQ, rewrite); 977 978 // if _fgetfield then rewrite to _fast_faccess_0 979 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 980 __ cmpw(r1, Bytecodes::_fast_fgetfield); 981 __ movw(bc, Bytecodes::_fast_faccess_0); 982 __ br(Assembler::EQ, rewrite); 983 984 // else rewrite to _fast_aload0 985 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition"); 986 __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0); 987 988 // rewrite 989 // bc: new bytecode 990 __ bind(rewrite); 991 patch_bytecode(Bytecodes::_aload_0, bc, r1, false); 992 993 __ bind(done); 994 } 995 996 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 997 aload(0); 998 } 999 1000 void TemplateTable::istore() 1001 { 1002 transition(itos, vtos); 1003 locals_index(r1); 1004 // FIXME: We're being very pernickerty here storing a jint in a 1005 // local with strw, which costs an extra instruction over what we'd 1006 // be able to do with a simple str. We should just store the whole 1007 // word. 1008 __ lea(rscratch1, iaddress(r1)); 1009 __ strw(r0, Address(rscratch1)); 1010 } 1011 1012 void TemplateTable::lstore() 1013 { 1014 transition(ltos, vtos); 1015 locals_index(r1); 1016 __ str(r0, laddress(r1, rscratch1, _masm)); 1017 } 1018 1019 void TemplateTable::fstore() { 1020 transition(ftos, vtos); 1021 locals_index(r1); 1022 __ lea(rscratch1, iaddress(r1)); 1023 __ strs(v0, Address(rscratch1)); 1024 } 1025 1026 void TemplateTable::dstore() { 1027 transition(dtos, vtos); 1028 locals_index(r1); 1029 __ strd(v0, daddress(r1, rscratch1, _masm)); 1030 } 1031 1032 void TemplateTable::astore() 1033 { 1034 transition(vtos, vtos); 1035 __ pop_ptr(r0); 1036 locals_index(r1); 1037 __ str(r0, aaddress(r1)); 1038 } 1039 1040 void TemplateTable::wide_istore() { 1041 transition(vtos, vtos); 1042 __ pop_i(); 1043 locals_index_wide(r1); 1044 __ lea(rscratch1, iaddress(r1)); 1045 __ strw(r0, Address(rscratch1)); 1046 } 1047 1048 void TemplateTable::wide_lstore() { 1049 transition(vtos, vtos); 1050 __ pop_l(); 1051 locals_index_wide(r1); 1052 __ str(r0, laddress(r1, rscratch1, _masm)); 1053 } 1054 1055 void TemplateTable::wide_fstore() { 1056 transition(vtos, vtos); 1057 __ pop_f(); 1058 locals_index_wide(r1); 1059 __ lea(rscratch1, faddress(r1)); 1060 __ strs(v0, rscratch1); 1061 } 1062 1063 void TemplateTable::wide_dstore() { 1064 transition(vtos, vtos); 1065 __ pop_d(); 1066 locals_index_wide(r1); 1067 __ strd(v0, daddress(r1, rscratch1, _masm)); 1068 } 1069 1070 void TemplateTable::wide_astore() { 1071 transition(vtos, vtos); 1072 __ pop_ptr(r0); 1073 locals_index_wide(r1); 1074 __ str(r0, aaddress(r1)); 1075 } 1076 1077 void TemplateTable::iastore() { 1078 transition(itos, vtos); 1079 __ pop_i(r1); 1080 __ pop_ptr(r3); 1081 // r0: value 1082 // r1: index 1083 // r3: array 1084 index_check(r3, r1); // prefer index in r1 1085 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2); 1086 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg, noreg); 1087 } 1088 1089 void TemplateTable::lastore() { 1090 transition(ltos, vtos); 1091 __ pop_i(r1); 1092 __ pop_ptr(r3); 1093 // r0: value 1094 // r1: index 1095 // r3: array 1096 index_check(r3, r1); // prefer index in r1 1097 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3); 1098 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg, noreg); 1099 } 1100 1101 void TemplateTable::fastore() { 1102 transition(ftos, vtos); 1103 __ pop_i(r1); 1104 __ pop_ptr(r3); 1105 // v0: value 1106 // r1: index 1107 // r3: array 1108 index_check(r3, r1); // prefer index in r1 1109 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2); 1110 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg, noreg); 1111 } 1112 1113 void TemplateTable::dastore() { 1114 transition(dtos, vtos); 1115 __ pop_i(r1); 1116 __ pop_ptr(r3); 1117 // v0: value 1118 // r1: index 1119 // r3: array 1120 index_check(r3, r1); // prefer index in r1 1121 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3); 1122 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg, noreg); 1123 } 1124 1125 void TemplateTable::aastore() { 1126 Label is_null, is_flat_array, ok_is_subtype, done; 1127 transition(vtos, vtos); 1128 // stack: ..., array, index, value 1129 __ ldr(r0, at_tos()); // value 1130 __ ldr(r2, at_tos_p1()); // index 1131 __ ldr(r3, at_tos_p2()); // array 1132 1133 index_check(r3, r2); // kills r1 1134 1135 __ profile_array_type<ArrayStoreData>(r4, r3, r5); 1136 __ profile_multiple_element_types(r4, r0, r5, r6); 1137 1138 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); 1139 Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop)); 1140 // Be careful not to clobber r4 below 1141 1142 // do array store check - check for null value first 1143 __ cbz(r0, is_null); 1144 1145 // Move array class to r5 1146 __ load_klass(r5, r3); 1147 1148 if (UseFlatArray) { 1149 __ ldrw(r6, Address(r5, Klass::layout_helper_offset())); 1150 __ test_flat_array_layout(r6, is_flat_array); 1151 } 1152 1153 // Move subklass into r1 1154 __ load_klass(r1, r0); 1155 1156 // Move array element superklass into r0 1157 __ ldr(r0, Address(r5, ObjArrayKlass::element_klass_offset())); 1158 // Compress array + index*oopSize + 12 into a single register. Frees r2. 1159 1160 // Generate subtype check. Blows r2, r5 1161 // Superklass in r0. Subklass in r1. 1162 1163 // is "r1 <: r0" ? (value subclass <: array element superclass) 1164 __ gen_subtype_check(r1, ok_is_subtype, false); 1165 1166 // Come here on failure 1167 // object is at TOS 1168 __ b(Interpreter::_throw_ArrayStoreException_entry); 1169 1170 // Come here on success 1171 __ bind(ok_is_subtype); 1172 1173 // Get the value we will store 1174 __ ldr(r0, at_tos()); 1175 // Now store using the appropriate barrier 1176 do_oop_store(_masm, element_address, r0, IS_ARRAY); 1177 __ b(done); 1178 1179 // Have a null in r0, r3=array, r2=index. Store null at ary[idx] 1180 __ bind(is_null); 1181 if (EnableValhalla) { 1182 Label is_null_into_value_array_npe, store_null; 1183 1184 // No way to store null in flat null-free array 1185 __ test_null_free_array_oop(r3, r8, is_null_into_value_array_npe); 1186 __ b(store_null); 1187 1188 __ bind(is_null_into_value_array_npe); 1189 __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry)); 1190 1191 __ bind(store_null); 1192 } 1193 1194 // Store a null 1195 do_oop_store(_masm, element_address, noreg, IS_ARRAY); 1196 __ b(done); 1197 1198 if (UseFlatArray) { 1199 Label is_type_ok; 1200 __ bind(is_flat_array); // Store non-null value to flat 1201 1202 // Simplistic type check... 1203 // r0 - value, r2 - index, r3 - array. 1204 1205 // Profile the not-null value's klass. 1206 // Load value class 1207 __ load_klass(r1, r0); 1208 1209 // Move element klass into r7 1210 __ ldr(r7, Address(r5, ArrayKlass::element_klass_offset())); 1211 1212 // flat value array needs exact type match 1213 // is "r1 == r7" (value subclass == array element superclass) 1214 1215 __ cmp(r7, r1); 1216 __ br(Assembler::EQ, is_type_ok); 1217 1218 __ b(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); 1219 1220 __ bind(is_type_ok); 1221 // r1: value's klass 1222 // r3: array 1223 // r5: array klass 1224 __ test_klass_is_empty_inline_type(r1, r7, done); 1225 1226 // calc dst for copy 1227 __ ldrw(r7, at_tos_p1()); // index 1228 __ data_for_value_array_index(r3, r5, r7, r7); 1229 1230 // ...and src for copy 1231 __ ldr(r6, at_tos()); // value 1232 __ data_for_oop(r6, r6, r1); 1233 1234 __ mov(r4, r1); // Shuffle arguments to avoid conflict with c_rarg1 1235 __ access_value_copy(IN_HEAP, r6, r7, r4); 1236 } 1237 1238 // Pop stack arguments 1239 __ bind(done); 1240 __ add(esp, esp, 3 * Interpreter::stackElementSize); 1241 } 1242 1243 void TemplateTable::bastore() 1244 { 1245 transition(itos, vtos); 1246 __ pop_i(r1); 1247 __ pop_ptr(r3); 1248 // r0: value 1249 // r1: index 1250 // r3: array 1251 index_check(r3, r1); // prefer index in r1 1252 1253 // Need to check whether array is boolean or byte 1254 // since both types share the bastore bytecode. 1255 __ load_klass(r2, r3); 1256 __ ldrw(r2, Address(r2, Klass::layout_helper_offset())); 1257 int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit()); 1258 Label L_skip; 1259 __ tbz(r2, diffbit_index, L_skip); 1260 __ andw(r0, r0, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 1261 __ bind(L_skip); 1262 1263 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0); 1264 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg, noreg); 1265 } 1266 1267 void TemplateTable::castore() 1268 { 1269 transition(itos, vtos); 1270 __ pop_i(r1); 1271 __ pop_ptr(r3); 1272 // r0: value 1273 // r1: index 1274 // r3: array 1275 index_check(r3, r1); // prefer index in r1 1276 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1); 1277 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg, noreg); 1278 } 1279 1280 void TemplateTable::sastore() 1281 { 1282 castore(); 1283 } 1284 1285 void TemplateTable::istore(int n) 1286 { 1287 transition(itos, vtos); 1288 __ str(r0, iaddress(n)); 1289 } 1290 1291 void TemplateTable::lstore(int n) 1292 { 1293 transition(ltos, vtos); 1294 __ str(r0, laddress(n)); 1295 } 1296 1297 void TemplateTable::fstore(int n) 1298 { 1299 transition(ftos, vtos); 1300 __ strs(v0, faddress(n)); 1301 } 1302 1303 void TemplateTable::dstore(int n) 1304 { 1305 transition(dtos, vtos); 1306 __ strd(v0, daddress(n)); 1307 } 1308 1309 void TemplateTable::astore(int n) 1310 { 1311 transition(vtos, vtos); 1312 __ pop_ptr(r0); 1313 __ str(r0, iaddress(n)); 1314 } 1315 1316 void TemplateTable::pop() 1317 { 1318 transition(vtos, vtos); 1319 __ add(esp, esp, Interpreter::stackElementSize); 1320 } 1321 1322 void TemplateTable::pop2() 1323 { 1324 transition(vtos, vtos); 1325 __ add(esp, esp, 2 * Interpreter::stackElementSize); 1326 } 1327 1328 void TemplateTable::dup() 1329 { 1330 transition(vtos, vtos); 1331 __ ldr(r0, Address(esp, 0)); 1332 __ push(r0); 1333 // stack: ..., a, a 1334 } 1335 1336 void TemplateTable::dup_x1() 1337 { 1338 transition(vtos, vtos); 1339 // stack: ..., a, b 1340 __ ldr(r0, at_tos()); // load b 1341 __ ldr(r2, at_tos_p1()); // load a 1342 __ str(r0, at_tos_p1()); // store b 1343 __ str(r2, at_tos()); // store a 1344 __ push(r0); // push b 1345 // stack: ..., b, a, b 1346 } 1347 1348 void TemplateTable::dup_x2() 1349 { 1350 transition(vtos, vtos); 1351 // stack: ..., a, b, c 1352 __ ldr(r0, at_tos()); // load c 1353 __ ldr(r2, at_tos_p2()); // load a 1354 __ str(r0, at_tos_p2()); // store c in a 1355 __ push(r0); // push c 1356 // stack: ..., c, b, c, c 1357 __ ldr(r0, at_tos_p2()); // load b 1358 __ str(r2, at_tos_p2()); // store a in b 1359 // stack: ..., c, a, c, c 1360 __ str(r0, at_tos_p1()); // store b in c 1361 // stack: ..., c, a, b, c 1362 } 1363 1364 void TemplateTable::dup2() 1365 { 1366 transition(vtos, vtos); 1367 // stack: ..., a, b 1368 __ ldr(r0, at_tos_p1()); // load a 1369 __ push(r0); // push a 1370 __ ldr(r0, at_tos_p1()); // load b 1371 __ push(r0); // push b 1372 // stack: ..., a, b, a, b 1373 } 1374 1375 void TemplateTable::dup2_x1() 1376 { 1377 transition(vtos, vtos); 1378 // stack: ..., a, b, c 1379 __ ldr(r2, at_tos()); // load c 1380 __ ldr(r0, at_tos_p1()); // load b 1381 __ push(r0); // push b 1382 __ push(r2); // push c 1383 // stack: ..., a, b, c, b, c 1384 __ str(r2, at_tos_p3()); // store c in b 1385 // stack: ..., a, c, c, b, c 1386 __ ldr(r2, at_tos_p4()); // load a 1387 __ str(r2, at_tos_p2()); // store a in 2nd c 1388 // stack: ..., a, c, a, b, c 1389 __ str(r0, at_tos_p4()); // store b in a 1390 // stack: ..., b, c, a, b, c 1391 } 1392 1393 void TemplateTable::dup2_x2() 1394 { 1395 transition(vtos, vtos); 1396 // stack: ..., a, b, c, d 1397 __ ldr(r2, at_tos()); // load d 1398 __ ldr(r0, at_tos_p1()); // load c 1399 __ push(r0) ; // push c 1400 __ push(r2); // push d 1401 // stack: ..., a, b, c, d, c, d 1402 __ ldr(r0, at_tos_p4()); // load b 1403 __ str(r0, at_tos_p2()); // store b in d 1404 __ str(r2, at_tos_p4()); // store d in b 1405 // stack: ..., a, d, c, b, c, d 1406 __ ldr(r2, at_tos_p5()); // load a 1407 __ ldr(r0, at_tos_p3()); // load c 1408 __ str(r2, at_tos_p3()); // store a in c 1409 __ str(r0, at_tos_p5()); // store c in a 1410 // stack: ..., c, d, a, b, c, d 1411 } 1412 1413 void TemplateTable::swap() 1414 { 1415 transition(vtos, vtos); 1416 // stack: ..., a, b 1417 __ ldr(r2, at_tos_p1()); // load a 1418 __ ldr(r0, at_tos()); // load b 1419 __ str(r2, at_tos()); // store a in b 1420 __ str(r0, at_tos_p1()); // store b in a 1421 // stack: ..., b, a 1422 } 1423 1424 void TemplateTable::iop2(Operation op) 1425 { 1426 transition(itos, itos); 1427 // r0 <== r1 op r0 1428 __ pop_i(r1); 1429 switch (op) { 1430 case add : __ addw(r0, r1, r0); break; 1431 case sub : __ subw(r0, r1, r0); break; 1432 case mul : __ mulw(r0, r1, r0); break; 1433 case _and : __ andw(r0, r1, r0); break; 1434 case _or : __ orrw(r0, r1, r0); break; 1435 case _xor : __ eorw(r0, r1, r0); break; 1436 case shl : __ lslvw(r0, r1, r0); break; 1437 case shr : __ asrvw(r0, r1, r0); break; 1438 case ushr : __ lsrvw(r0, r1, r0);break; 1439 default : ShouldNotReachHere(); 1440 } 1441 } 1442 1443 void TemplateTable::lop2(Operation op) 1444 { 1445 transition(ltos, ltos); 1446 // r0 <== r1 op r0 1447 __ pop_l(r1); 1448 switch (op) { 1449 case add : __ add(r0, r1, r0); break; 1450 case sub : __ sub(r0, r1, r0); break; 1451 case mul : __ mul(r0, r1, r0); break; 1452 case _and : __ andr(r0, r1, r0); break; 1453 case _or : __ orr(r0, r1, r0); break; 1454 case _xor : __ eor(r0, r1, r0); break; 1455 default : ShouldNotReachHere(); 1456 } 1457 } 1458 1459 void TemplateTable::idiv() 1460 { 1461 transition(itos, itos); 1462 // explicitly check for div0 1463 Label no_div0; 1464 __ cbnzw(r0, no_div0); 1465 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry); 1466 __ br(rscratch1); 1467 __ bind(no_div0); 1468 __ pop_i(r1); 1469 // r0 <== r1 idiv r0 1470 __ corrected_idivl(r0, r1, r0, /* want_remainder */ false); 1471 } 1472 1473 void TemplateTable::irem() 1474 { 1475 transition(itos, itos); 1476 // explicitly check for div0 1477 Label no_div0; 1478 __ cbnzw(r0, no_div0); 1479 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry); 1480 __ br(rscratch1); 1481 __ bind(no_div0); 1482 __ pop_i(r1); 1483 // r0 <== r1 irem r0 1484 __ corrected_idivl(r0, r1, r0, /* want_remainder */ true); 1485 } 1486 1487 void TemplateTable::lmul() 1488 { 1489 transition(ltos, ltos); 1490 __ pop_l(r1); 1491 __ mul(r0, r0, r1); 1492 } 1493 1494 void TemplateTable::ldiv() 1495 { 1496 transition(ltos, ltos); 1497 // explicitly check for div0 1498 Label no_div0; 1499 __ cbnz(r0, no_div0); 1500 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry); 1501 __ br(rscratch1); 1502 __ bind(no_div0); 1503 __ pop_l(r1); 1504 // r0 <== r1 ldiv r0 1505 __ corrected_idivq(r0, r1, r0, /* want_remainder */ false); 1506 } 1507 1508 void TemplateTable::lrem() 1509 { 1510 transition(ltos, ltos); 1511 // explicitly check for div0 1512 Label no_div0; 1513 __ cbnz(r0, no_div0); 1514 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry); 1515 __ br(rscratch1); 1516 __ bind(no_div0); 1517 __ pop_l(r1); 1518 // r0 <== r1 lrem r0 1519 __ corrected_idivq(r0, r1, r0, /* want_remainder */ true); 1520 } 1521 1522 void TemplateTable::lshl() 1523 { 1524 transition(itos, ltos); 1525 // shift count is in r0 1526 __ pop_l(r1); 1527 __ lslv(r0, r1, r0); 1528 } 1529 1530 void TemplateTable::lshr() 1531 { 1532 transition(itos, ltos); 1533 // shift count is in r0 1534 __ pop_l(r1); 1535 __ asrv(r0, r1, r0); 1536 } 1537 1538 void TemplateTable::lushr() 1539 { 1540 transition(itos, ltos); 1541 // shift count is in r0 1542 __ pop_l(r1); 1543 __ lsrv(r0, r1, r0); 1544 } 1545 1546 void TemplateTable::fop2(Operation op) 1547 { 1548 transition(ftos, ftos); 1549 switch (op) { 1550 case add: 1551 // n.b. use ldrd because this is a 64 bit slot 1552 __ pop_f(v1); 1553 __ fadds(v0, v1, v0); 1554 break; 1555 case sub: 1556 __ pop_f(v1); 1557 __ fsubs(v0, v1, v0); 1558 break; 1559 case mul: 1560 __ pop_f(v1); 1561 __ fmuls(v0, v1, v0); 1562 break; 1563 case div: 1564 __ pop_f(v1); 1565 __ fdivs(v0, v1, v0); 1566 break; 1567 case rem: 1568 __ fmovs(v1, v0); 1569 __ pop_f(v0); 1570 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1571 break; 1572 default: 1573 ShouldNotReachHere(); 1574 break; 1575 } 1576 } 1577 1578 void TemplateTable::dop2(Operation op) 1579 { 1580 transition(dtos, dtos); 1581 switch (op) { 1582 case add: 1583 // n.b. use ldrd because this is a 64 bit slot 1584 __ pop_d(v1); 1585 __ faddd(v0, v1, v0); 1586 break; 1587 case sub: 1588 __ pop_d(v1); 1589 __ fsubd(v0, v1, v0); 1590 break; 1591 case mul: 1592 __ pop_d(v1); 1593 __ fmuld(v0, v1, v0); 1594 break; 1595 case div: 1596 __ pop_d(v1); 1597 __ fdivd(v0, v1, v0); 1598 break; 1599 case rem: 1600 __ fmovd(v1, v0); 1601 __ pop_d(v0); 1602 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1603 break; 1604 default: 1605 ShouldNotReachHere(); 1606 break; 1607 } 1608 } 1609 1610 void TemplateTable::ineg() 1611 { 1612 transition(itos, itos); 1613 __ negw(r0, r0); 1614 1615 } 1616 1617 void TemplateTable::lneg() 1618 { 1619 transition(ltos, ltos); 1620 __ neg(r0, r0); 1621 } 1622 1623 void TemplateTable::fneg() 1624 { 1625 transition(ftos, ftos); 1626 __ fnegs(v0, v0); 1627 } 1628 1629 void TemplateTable::dneg() 1630 { 1631 transition(dtos, dtos); 1632 __ fnegd(v0, v0); 1633 } 1634 1635 void TemplateTable::iinc() 1636 { 1637 transition(vtos, vtos); 1638 __ load_signed_byte(r1, at_bcp(2)); // get constant 1639 locals_index(r2); 1640 __ ldr(r0, iaddress(r2)); 1641 __ addw(r0, r0, r1); 1642 __ str(r0, iaddress(r2)); 1643 } 1644 1645 void TemplateTable::wide_iinc() 1646 { 1647 transition(vtos, vtos); 1648 // __ mov(r1, zr); 1649 __ ldrw(r1, at_bcp(2)); // get constant and index 1650 __ rev16(r1, r1); 1651 __ ubfx(r2, r1, 0, 16); 1652 __ neg(r2, r2); 1653 __ sbfx(r1, r1, 16, 16); 1654 __ ldr(r0, iaddress(r2)); 1655 __ addw(r0, r0, r1); 1656 __ str(r0, iaddress(r2)); 1657 } 1658 1659 void TemplateTable::convert() 1660 { 1661 // Checking 1662 #ifdef ASSERT 1663 { 1664 TosState tos_in = ilgl; 1665 TosState tos_out = ilgl; 1666 switch (bytecode()) { 1667 case Bytecodes::_i2l: // fall through 1668 case Bytecodes::_i2f: // fall through 1669 case Bytecodes::_i2d: // fall through 1670 case Bytecodes::_i2b: // fall through 1671 case Bytecodes::_i2c: // fall through 1672 case Bytecodes::_i2s: tos_in = itos; break; 1673 case Bytecodes::_l2i: // fall through 1674 case Bytecodes::_l2f: // fall through 1675 case Bytecodes::_l2d: tos_in = ltos; break; 1676 case Bytecodes::_f2i: // fall through 1677 case Bytecodes::_f2l: // fall through 1678 case Bytecodes::_f2d: tos_in = ftos; break; 1679 case Bytecodes::_d2i: // fall through 1680 case Bytecodes::_d2l: // fall through 1681 case Bytecodes::_d2f: tos_in = dtos; break; 1682 default : ShouldNotReachHere(); 1683 } 1684 switch (bytecode()) { 1685 case Bytecodes::_l2i: // fall through 1686 case Bytecodes::_f2i: // fall through 1687 case Bytecodes::_d2i: // fall through 1688 case Bytecodes::_i2b: // fall through 1689 case Bytecodes::_i2c: // fall through 1690 case Bytecodes::_i2s: tos_out = itos; break; 1691 case Bytecodes::_i2l: // fall through 1692 case Bytecodes::_f2l: // fall through 1693 case Bytecodes::_d2l: tos_out = ltos; break; 1694 case Bytecodes::_i2f: // fall through 1695 case Bytecodes::_l2f: // fall through 1696 case Bytecodes::_d2f: tos_out = ftos; break; 1697 case Bytecodes::_i2d: // fall through 1698 case Bytecodes::_l2d: // fall through 1699 case Bytecodes::_f2d: tos_out = dtos; break; 1700 default : ShouldNotReachHere(); 1701 } 1702 transition(tos_in, tos_out); 1703 } 1704 #endif // ASSERT 1705 // static const int64_t is_nan = 0x8000000000000000L; 1706 1707 // Conversion 1708 switch (bytecode()) { 1709 case Bytecodes::_i2l: 1710 __ sxtw(r0, r0); 1711 break; 1712 case Bytecodes::_i2f: 1713 __ scvtfws(v0, r0); 1714 break; 1715 case Bytecodes::_i2d: 1716 __ scvtfwd(v0, r0); 1717 break; 1718 case Bytecodes::_i2b: 1719 __ sxtbw(r0, r0); 1720 break; 1721 case Bytecodes::_i2c: 1722 __ uxthw(r0, r0); 1723 break; 1724 case Bytecodes::_i2s: 1725 __ sxthw(r0, r0); 1726 break; 1727 case Bytecodes::_l2i: 1728 __ uxtw(r0, r0); 1729 break; 1730 case Bytecodes::_l2f: 1731 __ scvtfs(v0, r0); 1732 break; 1733 case Bytecodes::_l2d: 1734 __ scvtfd(v0, r0); 1735 break; 1736 case Bytecodes::_f2i: 1737 { 1738 Label L_Okay; 1739 __ clear_fpsr(); 1740 __ fcvtzsw(r0, v0); 1741 __ get_fpsr(r1); 1742 __ cbzw(r1, L_Okay); 1743 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i)); 1744 __ bind(L_Okay); 1745 } 1746 break; 1747 case Bytecodes::_f2l: 1748 { 1749 Label L_Okay; 1750 __ clear_fpsr(); 1751 __ fcvtzs(r0, v0); 1752 __ get_fpsr(r1); 1753 __ cbzw(r1, L_Okay); 1754 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1755 __ bind(L_Okay); 1756 } 1757 break; 1758 case Bytecodes::_f2d: 1759 __ fcvts(v0, v0); 1760 break; 1761 case Bytecodes::_d2i: 1762 { 1763 Label L_Okay; 1764 __ clear_fpsr(); 1765 __ fcvtzdw(r0, v0); 1766 __ get_fpsr(r1); 1767 __ cbzw(r1, L_Okay); 1768 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); 1769 __ bind(L_Okay); 1770 } 1771 break; 1772 case Bytecodes::_d2l: 1773 { 1774 Label L_Okay; 1775 __ clear_fpsr(); 1776 __ fcvtzd(r0, v0); 1777 __ get_fpsr(r1); 1778 __ cbzw(r1, L_Okay); 1779 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1780 __ bind(L_Okay); 1781 } 1782 break; 1783 case Bytecodes::_d2f: 1784 __ fcvtd(v0, v0); 1785 break; 1786 default: 1787 ShouldNotReachHere(); 1788 } 1789 } 1790 1791 void TemplateTable::lcmp() 1792 { 1793 transition(ltos, itos); 1794 Label done; 1795 __ pop_l(r1); 1796 __ cmp(r1, r0); 1797 __ mov(r0, (uint64_t)-1L); 1798 __ br(Assembler::LT, done); 1799 // __ mov(r0, 1UL); 1800 // __ csel(r0, r0, zr, Assembler::NE); 1801 // and here is a faster way 1802 __ csinc(r0, zr, zr, Assembler::EQ); 1803 __ bind(done); 1804 } 1805 1806 void TemplateTable::float_cmp(bool is_float, int unordered_result) 1807 { 1808 Label done; 1809 if (is_float) { 1810 // XXX get rid of pop here, use ... reg, mem32 1811 __ pop_f(v1); 1812 __ fcmps(v1, v0); 1813 } else { 1814 // XXX get rid of pop here, use ... reg, mem64 1815 __ pop_d(v1); 1816 __ fcmpd(v1, v0); 1817 } 1818 if (unordered_result < 0) { 1819 // we want -1 for unordered or less than, 0 for equal and 1 for 1820 // greater than. 1821 __ mov(r0, (uint64_t)-1L); 1822 // for FP LT tests less than or unordered 1823 __ br(Assembler::LT, done); 1824 // install 0 for EQ otherwise 1 1825 __ csinc(r0, zr, zr, Assembler::EQ); 1826 } else { 1827 // we want -1 for less than, 0 for equal and 1 for unordered or 1828 // greater than. 1829 __ mov(r0, 1L); 1830 // for FP HI tests greater than or unordered 1831 __ br(Assembler::HI, done); 1832 // install 0 for EQ otherwise ~0 1833 __ csinv(r0, zr, zr, Assembler::EQ); 1834 1835 } 1836 __ bind(done); 1837 } 1838 1839 void TemplateTable::branch(bool is_jsr, bool is_wide) 1840 { 1841 __ profile_taken_branch(r0, r1); 1842 const ByteSize be_offset = MethodCounters::backedge_counter_offset() + 1843 InvocationCounter::counter_offset(); 1844 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + 1845 InvocationCounter::counter_offset(); 1846 1847 // load branch displacement 1848 if (!is_wide) { 1849 __ ldrh(r2, at_bcp(1)); 1850 __ rev16(r2, r2); 1851 // sign extend the 16 bit value in r2 1852 __ sbfm(r2, r2, 0, 15); 1853 } else { 1854 __ ldrw(r2, at_bcp(1)); 1855 __ revw(r2, r2); 1856 // sign extend the 32 bit value in r2 1857 __ sbfm(r2, r2, 0, 31); 1858 } 1859 1860 // Handle all the JSR stuff here, then exit. 1861 // It's much shorter and cleaner than intermingling with the non-JSR 1862 // normal-branch stuff occurring below. 1863 1864 if (is_jsr) { 1865 // Pre-load the next target bytecode into rscratch1 1866 __ load_unsigned_byte(rscratch1, Address(rbcp, r2)); 1867 // compute return address as bci 1868 __ ldr(rscratch2, Address(rmethod, Method::const_offset())); 1869 __ add(rscratch2, rscratch2, 1870 in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3)); 1871 __ sub(r1, rbcp, rscratch2); 1872 __ push_i(r1); 1873 // Adjust the bcp by the 16-bit displacement in r2 1874 __ add(rbcp, rbcp, r2); 1875 __ dispatch_only(vtos, /*generate_poll*/true); 1876 return; 1877 } 1878 1879 // Normal (non-jsr) branch handling 1880 1881 // Adjust the bcp by the displacement in r2 1882 __ add(rbcp, rbcp, r2); 1883 1884 assert(UseLoopCounter || !UseOnStackReplacement, 1885 "on-stack-replacement requires loop counters"); 1886 Label backedge_counter_overflow; 1887 Label dispatch; 1888 if (UseLoopCounter) { 1889 // increment backedge counter for backward branches 1890 // r0: MDO 1891 // w1: MDO bumped taken-count 1892 // r2: target offset 1893 __ cmp(r2, zr); 1894 __ br(Assembler::GT, dispatch); // count only if backward branch 1895 1896 // ECN: FIXME: This code smells 1897 // check if MethodCounters exists 1898 Label has_counters; 1899 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset())); 1900 __ cbnz(rscratch1, has_counters); 1901 __ push(r0); 1902 __ push(r1); 1903 __ push(r2); 1904 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 1905 InterpreterRuntime::build_method_counters), rmethod); 1906 __ pop(r2); 1907 __ pop(r1); 1908 __ pop(r0); 1909 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset())); 1910 __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory 1911 __ bind(has_counters); 1912 1913 Label no_mdo; 1914 int increment = InvocationCounter::count_increment; 1915 if (ProfileInterpreter) { 1916 // Are we profiling? 1917 __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset()))); 1918 __ cbz(r1, no_mdo); 1919 // Increment the MDO backedge counter 1920 const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) + 1921 in_bytes(InvocationCounter::counter_offset())); 1922 const Address mask(r1, in_bytes(MethodData::backedge_mask_offset())); 1923 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, 1924 r0, rscratch1, false, Assembler::EQ, 1925 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch); 1926 __ b(dispatch); 1927 } 1928 __ bind(no_mdo); 1929 // Increment backedge counter in MethodCounters* 1930 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset())); 1931 const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset())); 1932 __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask, 1933 r0, rscratch2, false, Assembler::EQ, 1934 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch); 1935 __ bind(dispatch); 1936 } 1937 1938 // Pre-load the next target bytecode into rscratch1 1939 __ load_unsigned_byte(rscratch1, Address(rbcp, 0)); 1940 1941 // continue with the bytecode @ target 1942 // rscratch1: target bytecode 1943 // rbcp: target bcp 1944 __ dispatch_only(vtos, /*generate_poll*/true); 1945 1946 if (UseLoopCounter && UseOnStackReplacement) { 1947 // invocation counter overflow 1948 __ bind(backedge_counter_overflow); 1949 __ neg(r2, r2); 1950 __ add(r2, r2, rbcp); // branch bcp 1951 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) 1952 __ call_VM(noreg, 1953 CAST_FROM_FN_PTR(address, 1954 InterpreterRuntime::frequency_counter_overflow), 1955 r2); 1956 __ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode 1957 1958 // r0: osr nmethod (osr ok) or null (osr not possible) 1959 // w1: target bytecode 1960 // r2: scratch 1961 __ cbz(r0, dispatch); // test result -- no osr if null 1962 // nmethod may have been invalidated (VM may block upon call_VM return) 1963 __ ldrb(r2, Address(r0, nmethod::state_offset())); 1964 if (nmethod::in_use != 0) 1965 __ sub(r2, r2, nmethod::in_use); 1966 __ cbnz(r2, dispatch); 1967 1968 // We have the address of an on stack replacement routine in r0 1969 // We need to prepare to execute the OSR method. First we must 1970 // migrate the locals and monitors off of the stack. 1971 1972 __ mov(r19, r0); // save the nmethod 1973 1974 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); 1975 1976 // r0 is OSR buffer, move it to expected parameter location 1977 __ mov(j_rarg0, r0); 1978 1979 // remove activation 1980 // get sender esp 1981 __ ldr(esp, 1982 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1983 // remove frame anchor 1984 __ leave(); 1985 // Ensure compiled code always sees stack at proper alignment 1986 __ andr(sp, esp, -16); 1987 1988 // and begin the OSR nmethod 1989 __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset())); 1990 __ br(rscratch1); 1991 } 1992 } 1993 1994 1995 void TemplateTable::if_0cmp(Condition cc) 1996 { 1997 transition(itos, vtos); 1998 // assume branch is more often taken than not (loops use backward branches) 1999 Label not_taken; 2000 if (cc == equal) 2001 __ cbnzw(r0, not_taken); 2002 else if (cc == not_equal) 2003 __ cbzw(r0, not_taken); 2004 else { 2005 __ andsw(zr, r0, r0); 2006 __ br(j_not(cc), not_taken); 2007 } 2008 2009 branch(false, false); 2010 __ bind(not_taken); 2011 __ profile_not_taken_branch(r0); 2012 } 2013 2014 void TemplateTable::if_icmp(Condition cc) 2015 { 2016 transition(itos, vtos); 2017 // assume branch is more often taken than not (loops use backward branches) 2018 Label not_taken; 2019 __ pop_i(r1); 2020 __ cmpw(r1, r0, Assembler::LSL); 2021 __ br(j_not(cc), not_taken); 2022 branch(false, false); 2023 __ bind(not_taken); 2024 __ profile_not_taken_branch(r0); 2025 } 2026 2027 void TemplateTable::if_nullcmp(Condition cc) 2028 { 2029 transition(atos, vtos); 2030 // assume branch is more often taken than not (loops use backward branches) 2031 Label not_taken; 2032 if (cc == equal) 2033 __ cbnz(r0, not_taken); 2034 else 2035 __ cbz(r0, not_taken); 2036 branch(false, false); 2037 __ bind(not_taken); 2038 __ profile_not_taken_branch(r0); 2039 } 2040 2041 void TemplateTable::if_acmp(Condition cc) { 2042 transition(atos, vtos); 2043 // assume branch is more often taken than not (loops use backward branches) 2044 Label taken, not_taken; 2045 __ pop_ptr(r1); 2046 2047 __ profile_acmp(r2, r1, r0, r4); 2048 2049 Register is_inline_type_mask = rscratch1; 2050 __ mov(is_inline_type_mask, markWord::inline_type_pattern); 2051 2052 if (EnableValhalla) { 2053 __ cmp(r1, r0); 2054 __ br(Assembler::EQ, (cc == equal) ? taken : not_taken); 2055 2056 // might be substitutable, test if either r0 or r1 is null 2057 __ andr(r2, r0, r1); 2058 __ cbz(r2, (cc == equal) ? not_taken : taken); 2059 2060 // and both are values ? 2061 __ ldr(r2, Address(r1, oopDesc::mark_offset_in_bytes())); 2062 __ andr(r2, r2, is_inline_type_mask); 2063 __ ldr(r4, Address(r0, oopDesc::mark_offset_in_bytes())); 2064 __ andr(r4, r4, is_inline_type_mask); 2065 __ andr(r2, r2, r4); 2066 __ cmp(r2, is_inline_type_mask); 2067 __ br(Assembler::NE, (cc == equal) ? not_taken : taken); 2068 2069 // same value klass ? 2070 __ load_metadata(r2, r1); 2071 __ load_metadata(r4, r0); 2072 __ cmp(r2, r4); 2073 __ br(Assembler::NE, (cc == equal) ? not_taken : taken); 2074 2075 // Know both are the same type, let's test for substitutability... 2076 if (cc == equal) { 2077 invoke_is_substitutable(r0, r1, taken, not_taken); 2078 } else { 2079 invoke_is_substitutable(r0, r1, not_taken, taken); 2080 } 2081 __ stop("Not reachable"); 2082 } 2083 2084 __ cmpoop(r1, r0); 2085 __ br(j_not(cc), not_taken); 2086 __ bind(taken); 2087 branch(false, false); 2088 __ bind(not_taken); 2089 __ profile_not_taken_branch(r0, true); 2090 } 2091 2092 void TemplateTable::invoke_is_substitutable(Register aobj, Register bobj, 2093 Label& is_subst, Label& not_subst) { 2094 2095 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::is_substitutable), aobj, bobj); 2096 // Restored... r0 answer, jmp to outcome... 2097 __ cbz(r0, not_subst); 2098 __ b(is_subst); 2099 } 2100 2101 2102 void TemplateTable::ret() { 2103 transition(vtos, vtos); 2104 locals_index(r1); 2105 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp 2106 __ profile_ret(r1, r2); 2107 __ ldr(rbcp, Address(rmethod, Method::const_offset())); 2108 __ lea(rbcp, Address(rbcp, r1)); 2109 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); 2110 __ dispatch_next(vtos, 0, /*generate_poll*/true); 2111 } 2112 2113 void TemplateTable::wide_ret() { 2114 transition(vtos, vtos); 2115 locals_index_wide(r1); 2116 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp 2117 __ profile_ret(r1, r2); 2118 __ ldr(rbcp, Address(rmethod, Method::const_offset())); 2119 __ lea(rbcp, Address(rbcp, r1)); 2120 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); 2121 __ dispatch_next(vtos, 0, /*generate_poll*/true); 2122 } 2123 2124 2125 void TemplateTable::tableswitch() { 2126 Label default_case, continue_execution; 2127 transition(itos, vtos); 2128 // align rbcp 2129 __ lea(r1, at_bcp(BytesPerInt)); 2130 __ andr(r1, r1, -BytesPerInt); 2131 // load lo & hi 2132 __ ldrw(r2, Address(r1, BytesPerInt)); 2133 __ ldrw(r3, Address(r1, 2 * BytesPerInt)); 2134 __ rev32(r2, r2); 2135 __ rev32(r3, r3); 2136 // check against lo & hi 2137 __ cmpw(r0, r2); 2138 __ br(Assembler::LT, default_case); 2139 __ cmpw(r0, r3); 2140 __ br(Assembler::GT, default_case); 2141 // lookup dispatch offset 2142 __ subw(r0, r0, r2); 2143 __ lea(r3, Address(r1, r0, Address::uxtw(2))); 2144 __ ldrw(r3, Address(r3, 3 * BytesPerInt)); 2145 __ profile_switch_case(r0, r1, r2); 2146 // continue execution 2147 __ bind(continue_execution); 2148 __ rev32(r3, r3); 2149 __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0))); 2150 __ add(rbcp, rbcp, r3, ext::sxtw); 2151 __ dispatch_only(vtos, /*generate_poll*/true); 2152 // handle default 2153 __ bind(default_case); 2154 __ profile_switch_default(r0); 2155 __ ldrw(r3, Address(r1, 0)); 2156 __ b(continue_execution); 2157 } 2158 2159 void TemplateTable::lookupswitch() { 2160 transition(itos, itos); 2161 __ stop("lookupswitch bytecode should have been rewritten"); 2162 } 2163 2164 void TemplateTable::fast_linearswitch() { 2165 transition(itos, vtos); 2166 Label loop_entry, loop, found, continue_execution; 2167 // bswap r0 so we can avoid bswapping the table entries 2168 __ rev32(r0, r0); 2169 // align rbcp 2170 __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of 2171 // this instruction (change offsets 2172 // below) 2173 __ andr(r19, r19, -BytesPerInt); 2174 // set counter 2175 __ ldrw(r1, Address(r19, BytesPerInt)); 2176 __ rev32(r1, r1); 2177 __ b(loop_entry); 2178 // table search 2179 __ bind(loop); 2180 __ lea(rscratch1, Address(r19, r1, Address::lsl(3))); 2181 __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt)); 2182 __ cmpw(r0, rscratch1); 2183 __ br(Assembler::EQ, found); 2184 __ bind(loop_entry); 2185 __ subs(r1, r1, 1); 2186 __ br(Assembler::PL, loop); 2187 // default case 2188 __ profile_switch_default(r0); 2189 __ ldrw(r3, Address(r19, 0)); 2190 __ b(continue_execution); 2191 // entry found -> get offset 2192 __ bind(found); 2193 __ lea(rscratch1, Address(r19, r1, Address::lsl(3))); 2194 __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt)); 2195 __ profile_switch_case(r1, r0, r19); 2196 // continue execution 2197 __ bind(continue_execution); 2198 __ rev32(r3, r3); 2199 __ add(rbcp, rbcp, r3, ext::sxtw); 2200 __ ldrb(rscratch1, Address(rbcp, 0)); 2201 __ dispatch_only(vtos, /*generate_poll*/true); 2202 } 2203 2204 void TemplateTable::fast_binaryswitch() { 2205 transition(itos, vtos); 2206 // Implementation using the following core algorithm: 2207 // 2208 // int binary_search(int key, LookupswitchPair* array, int n) { 2209 // // Binary search according to "Methodik des Programmierens" by 2210 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2211 // int i = 0; 2212 // int j = n; 2213 // while (i+1 < j) { 2214 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2215 // // with Q: for all i: 0 <= i < n: key < a[i] 2216 // // where a stands for the array and assuming that the (inexisting) 2217 // // element a[n] is infinitely big. 2218 // int h = (i + j) >> 1; 2219 // // i < h < j 2220 // if (key < array[h].fast_match()) { 2221 // j = h; 2222 // } else { 2223 // i = h; 2224 // } 2225 // } 2226 // // R: a[i] <= key < a[i+1] or Q 2227 // // (i.e., if key is within array, i is the correct index) 2228 // return i; 2229 // } 2230 2231 // Register allocation 2232 const Register key = r0; // already set (tosca) 2233 const Register array = r1; 2234 const Register i = r2; 2235 const Register j = r3; 2236 const Register h = rscratch1; 2237 const Register temp = rscratch2; 2238 2239 // Find array start 2240 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to 2241 // get rid of this 2242 // instruction (change 2243 // offsets below) 2244 __ andr(array, array, -BytesPerInt); 2245 2246 // Initialize i & j 2247 __ mov(i, 0); // i = 0; 2248 __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array); 2249 2250 // Convert j into native byteordering 2251 __ rev32(j, j); 2252 2253 // And start 2254 Label entry; 2255 __ b(entry); 2256 2257 // binary search loop 2258 { 2259 Label loop; 2260 __ bind(loop); 2261 // int h = (i + j) >> 1; 2262 __ addw(h, i, j); // h = i + j; 2263 __ lsrw(h, h, 1); // h = (i + j) >> 1; 2264 // if (key < array[h].fast_match()) { 2265 // j = h; 2266 // } else { 2267 // i = h; 2268 // } 2269 // Convert array[h].match to native byte-ordering before compare 2270 __ ldr(temp, Address(array, h, Address::lsl(3))); 2271 __ rev32(temp, temp); 2272 __ cmpw(key, temp); 2273 // j = h if (key < array[h].fast_match()) 2274 __ csel(j, h, j, Assembler::LT); 2275 // i = h if (key >= array[h].fast_match()) 2276 __ csel(i, h, i, Assembler::GE); 2277 // while (i+1 < j) 2278 __ bind(entry); 2279 __ addw(h, i, 1); // i+1 2280 __ cmpw(h, j); // i+1 < j 2281 __ br(Assembler::LT, loop); 2282 } 2283 2284 // end of binary search, result index is i (must check again!) 2285 Label default_case; 2286 // Convert array[i].match to native byte-ordering before compare 2287 __ ldr(temp, Address(array, i, Address::lsl(3))); 2288 __ rev32(temp, temp); 2289 __ cmpw(key, temp); 2290 __ br(Assembler::NE, default_case); 2291 2292 // entry found -> j = offset 2293 __ add(j, array, i, ext::uxtx, 3); 2294 __ ldrw(j, Address(j, BytesPerInt)); 2295 __ profile_switch_case(i, key, array); 2296 __ rev32(j, j); 2297 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0))); 2298 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0))); 2299 __ dispatch_only(vtos, /*generate_poll*/true); 2300 2301 // default case -> j = default offset 2302 __ bind(default_case); 2303 __ profile_switch_default(i); 2304 __ ldrw(j, Address(array, -2 * BytesPerInt)); 2305 __ rev32(j, j); 2306 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0))); 2307 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0))); 2308 __ dispatch_only(vtos, /*generate_poll*/true); 2309 } 2310 2311 2312 void TemplateTable::_return(TosState state) 2313 { 2314 transition(state, state); 2315 assert(_desc->calls_vm(), 2316 "inconsistent calls_vm information"); // call in remove_activation 2317 2318 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2319 assert(state == vtos, "only valid state"); 2320 2321 __ ldr(c_rarg1, aaddress(0)); 2322 __ load_klass(r3, c_rarg1); 2323 __ ldrw(r3, Address(r3, Klass::access_flags_offset())); 2324 Label skip_register_finalizer; 2325 __ tbz(r3, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer); 2326 2327 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1); 2328 2329 __ bind(skip_register_finalizer); 2330 } 2331 2332 // Issue a StoreStore barrier after all stores but before return 2333 // from any constructor for any class with a final field. We don't 2334 // know if this is a finalizer, so we always do so. 2335 if (_desc->bytecode() == Bytecodes::_return) 2336 __ membar(MacroAssembler::StoreStore); 2337 2338 if (_desc->bytecode() != Bytecodes::_return_register_finalizer) { 2339 Label no_safepoint; 2340 __ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset())); 2341 __ tbz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), no_safepoint); 2342 __ push(state); 2343 __ push_cont_fastpath(rthread); 2344 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 2345 __ pop_cont_fastpath(rthread); 2346 __ pop(state); 2347 __ bind(no_safepoint); 2348 } 2349 2350 // Narrow result if state is itos but result type is smaller. 2351 // Need to narrow in the return bytecode rather than in generate_return_entry 2352 // since compiled code callers expect the result to already be narrowed. 2353 if (state == itos) { 2354 __ narrow(r0); 2355 } 2356 2357 __ remove_activation(state); 2358 __ ret(lr); 2359 } 2360 2361 // ---------------------------------------------------------------------------- 2362 // Volatile variables demand their effects be made known to all CPU's 2363 // in order. Store buffers on most chips allow reads & writes to 2364 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2365 // without some kind of memory barrier (i.e., it's not sufficient that 2366 // the interpreter does not reorder volatile references, the hardware 2367 // also must not reorder them). 2368 // 2369 // According to the new Java Memory Model (JMM): 2370 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2371 // writes act as acquire & release, so: 2372 // (2) A read cannot let unrelated NON-volatile memory refs that 2373 // happen after the read float up to before the read. It's OK for 2374 // non-volatile memory refs that happen before the volatile read to 2375 // float down below it. 2376 // (3) Similar a volatile write cannot let unrelated NON-volatile 2377 // memory refs that happen BEFORE the write float down to after the 2378 // write. It's OK for non-volatile memory refs that happen after the 2379 // volatile write to float up before it. 2380 // 2381 // We only put in barriers around volatile refs (they are expensive), 2382 // not _between_ memory refs (that would require us to track the 2383 // flavor of the previous memory refs). Requirements (2) and (3) 2384 // require some barriers before volatile stores and after volatile 2385 // loads. These nearly cover requirement (1) but miss the 2386 // volatile-store-volatile-load case. This final case is placed after 2387 // volatile-stores although it could just as well go before 2388 // volatile-loads. 2389 2390 void TemplateTable::resolve_cache_and_index_for_method(int byte_no, 2391 Register Rcache, 2392 Register index) { 2393 const Register temp = r19; 2394 assert_different_registers(Rcache, index, temp); 2395 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2396 2397 Label resolved, clinit_barrier_slow; 2398 2399 Bytecodes::Code code = bytecode(); 2400 __ load_method_entry(Rcache, index); 2401 switch(byte_no) { 2402 case f1_byte: 2403 __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode1_offset()))); 2404 break; 2405 case f2_byte: 2406 __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode2_offset()))); 2407 break; 2408 } 2409 // Load-acquire the bytecode to match store-release in InterpreterRuntime 2410 __ ldarb(temp, temp); 2411 __ subs(zr, temp, (int) code); // have we resolved this bytecode? 2412 __ br(Assembler::EQ, resolved); 2413 2414 // resolve first time through 2415 // Class initialization barrier slow path lands here as well. 2416 __ bind(clinit_barrier_slow); 2417 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2418 __ mov(temp, (int) code); 2419 __ call_VM(noreg, entry, temp); 2420 2421 // Update registers with resolved info 2422 __ load_method_entry(Rcache, index); 2423 // n.b. unlike x86 Rcache is now rcpool plus the indexed offset 2424 // so all clients ofthis method must be modified accordingly 2425 __ bind(resolved); 2426 2427 // Class initialization barrier for static methods 2428 if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) { 2429 __ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset()))); 2430 __ load_method_holder(temp, temp); 2431 __ clinit_barrier(temp, rscratch1, nullptr, &clinit_barrier_slow); 2432 } 2433 } 2434 2435 void TemplateTable::resolve_cache_and_index_for_field(int byte_no, 2436 Register Rcache, 2437 Register index) { 2438 const Register temp = r19; 2439 assert_different_registers(Rcache, index, temp); 2440 2441 Label resolved; 2442 2443 Bytecodes::Code code = bytecode(); 2444 switch (code) { 2445 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2446 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2447 default: break; 2448 } 2449 2450 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2451 __ load_field_entry(Rcache, index); 2452 if (byte_no == f1_byte) { 2453 __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::get_code_offset()))); 2454 } else { 2455 __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::put_code_offset()))); 2456 } 2457 // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in() 2458 __ ldarb(temp, temp); 2459 __ subs(zr, temp, (int) code); // have we resolved this bytecode? 2460 __ br(Assembler::EQ, resolved); 2461 2462 // resolve first time through 2463 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2464 __ mov(temp, (int) code); 2465 __ call_VM(noreg, entry, temp); 2466 2467 // Update registers with resolved info 2468 __ load_field_entry(Rcache, index); 2469 __ bind(resolved); 2470 } 2471 2472 void TemplateTable::load_resolved_field_entry(Register obj, 2473 Register cache, 2474 Register tos_state, 2475 Register offset, 2476 Register flags, 2477 bool is_static = false) { 2478 assert_different_registers(cache, tos_state, flags, offset); 2479 2480 // Field offset 2481 __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); 2482 2483 // Flags 2484 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset()))); 2485 2486 // TOS state 2487 __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset()))); 2488 2489 // Klass overwrite register 2490 if (is_static) { 2491 __ ldr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset())); 2492 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2493 __ ldr(obj, Address(obj, mirror_offset)); 2494 __ resolve_oop_handle(obj, r5, rscratch2); 2495 } 2496 } 2497 2498 void TemplateTable::load_resolved_method_entry_special_or_static(Register cache, 2499 Register method, 2500 Register flags) { 2501 2502 // setup registers 2503 const Register index = flags; 2504 assert_different_registers(method, cache, flags); 2505 2506 // determine constant pool cache field offsets 2507 resolve_cache_and_index_for_method(f1_byte, cache, index); 2508 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); 2509 __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); 2510 } 2511 2512 void TemplateTable::load_resolved_method_entry_handle(Register cache, 2513 Register method, 2514 Register ref_index, 2515 Register flags) { 2516 // setup registers 2517 const Register index = ref_index; 2518 assert_different_registers(method, flags); 2519 assert_different_registers(method, cache, index); 2520 2521 // determine constant pool cache field offsets 2522 resolve_cache_and_index_for_method(f1_byte, cache, index); 2523 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); 2524 2525 // maybe push appendix to arguments (just before return address) 2526 Label L_no_push; 2527 __ tbz(flags, ResolvedMethodEntry::has_appendix_shift, L_no_push); 2528 // invokehandle uses an index into the resolved references array 2529 __ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset()))); 2530 // Push the appendix as a trailing parameter. 2531 // This must be done before we get the receiver, 2532 // since the parameter_size includes it. 2533 Register appendix = method; 2534 __ load_resolved_reference_at_index(appendix, ref_index); 2535 __ push(appendix); // push appendix (MethodType, CallSite, etc.) 2536 __ bind(L_no_push); 2537 2538 __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); 2539 } 2540 2541 void TemplateTable::load_resolved_method_entry_interface(Register cache, 2542 Register klass, 2543 Register method_or_table_index, 2544 Register flags) { 2545 // setup registers 2546 const Register index = method_or_table_index; 2547 assert_different_registers(method_or_table_index, cache, flags); 2548 2549 // determine constant pool cache field offsets 2550 resolve_cache_and_index_for_method(f1_byte, cache, index); 2551 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); 2552 2553 // Invokeinterface can behave in different ways: 2554 // If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will 2555 // behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or 2556 // vtable index is placed in the register. 2557 // Otherwise, the registers will be populated with the klass and method. 2558 2559 Label NotVirtual; Label NotVFinal; Label Done; 2560 __ tbz(flags, ResolvedMethodEntry::is_forced_virtual_shift, NotVirtual); 2561 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal); 2562 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); 2563 __ b(Done); 2564 2565 __ bind(NotVFinal); 2566 __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset()))); 2567 __ b(Done); 2568 2569 __ bind(NotVirtual); 2570 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); 2571 __ ldr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset()))); 2572 __ bind(Done); 2573 } 2574 2575 void TemplateTable::load_resolved_method_entry_virtual(Register cache, 2576 Register method_or_table_index, 2577 Register flags) { 2578 // setup registers 2579 const Register index = flags; 2580 assert_different_registers(method_or_table_index, cache, flags); 2581 2582 // determine constant pool cache field offsets 2583 resolve_cache_and_index_for_method(f2_byte, cache, index); 2584 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); 2585 2586 // method_or_table_index can either be an itable index or a method depending on the virtual final flag 2587 Label NotVFinal; Label Done; 2588 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal); 2589 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); 2590 __ b(Done); 2591 2592 __ bind(NotVFinal); 2593 __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset()))); 2594 __ bind(Done); 2595 } 2596 2597 // The rmethod register is input and overwritten to be the adapter method for the 2598 // indy call. Link Register (lr) is set to the return address for the adapter and 2599 // an appendix may be pushed to the stack. Registers r0-r3 are clobbered 2600 void TemplateTable::load_invokedynamic_entry(Register method) { 2601 // setup registers 2602 const Register appendix = r0; 2603 const Register cache = r2; 2604 const Register index = r3; 2605 assert_different_registers(method, appendix, cache, index, rcpool); 2606 2607 __ save_bcp(); 2608 2609 Label resolved; 2610 2611 __ load_resolved_indy_entry(cache, index); 2612 // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in() 2613 __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset()))); 2614 __ ldar(method, method); 2615 2616 // Compare the method to zero 2617 __ cbnz(method, resolved); 2618 2619 Bytecodes::Code code = bytecode(); 2620 2621 // Call to the interpreter runtime to resolve invokedynamic 2622 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2623 __ mov(method, code); // this is essentially Bytecodes::_invokedynamic 2624 __ call_VM(noreg, entry, method); 2625 // Update registers with resolved info 2626 __ load_resolved_indy_entry(cache, index); 2627 // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in() 2628 __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset()))); 2629 __ ldar(method, method); 2630 2631 #ifdef ASSERT 2632 __ cbnz(method, resolved); 2633 __ stop("Should be resolved by now"); 2634 #endif // ASSERT 2635 __ bind(resolved); 2636 2637 Label L_no_push; 2638 // Check if there is an appendix 2639 __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset()))); 2640 __ tbz(index, ResolvedIndyEntry::has_appendix_shift, L_no_push); 2641 2642 // Get appendix 2643 __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset()))); 2644 // Push the appendix as a trailing parameter 2645 // since the parameter_size includes it. 2646 __ push(method); 2647 __ mov(method, index); 2648 __ load_resolved_reference_at_index(appendix, method); 2649 __ verify_oop(appendix); 2650 __ pop(method); 2651 __ push(appendix); // push appendix (MethodType, CallSite, etc.) 2652 __ bind(L_no_push); 2653 2654 // compute return type 2655 __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset()))); 2656 // load return address 2657 // Return address is loaded into link register(lr) and not pushed to the stack 2658 // like x86 2659 { 2660 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 2661 __ mov(rscratch1, table_addr); 2662 __ ldr(lr, Address(rscratch1, index, Address::lsl(3))); 2663 } 2664 } 2665 2666 // The registers cache and index expected to be set before call. 2667 // Correct values of the cache and index registers are preserved. 2668 void TemplateTable::jvmti_post_field_access(Register cache, Register index, 2669 bool is_static, bool has_tos) { 2670 // do the JVMTI work here to avoid disturbing the register state below 2671 // We use c_rarg registers here because we want to use the register used in 2672 // the call to the VM 2673 if (JvmtiExport::can_post_field_access()) { 2674 // Check to see if a field access watch has been set before we 2675 // take the time to call into the VM. 2676 Label L1; 2677 assert_different_registers(cache, index, r0); 2678 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); 2679 __ ldrw(r0, Address(rscratch1)); 2680 __ cbzw(r0, L1); 2681 2682 __ load_field_entry(c_rarg2, index); 2683 2684 if (is_static) { 2685 __ mov(c_rarg1, zr); // null object reference 2686 } else { 2687 __ ldr(c_rarg1, at_tos()); // get object pointer without popping it 2688 __ verify_oop(c_rarg1); 2689 } 2690 // c_rarg1: object pointer or null 2691 // c_rarg2: cache entry pointer 2692 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 2693 InterpreterRuntime::post_field_access), 2694 c_rarg1, c_rarg2); 2695 __ load_field_entry(cache, index); 2696 __ bind(L1); 2697 } 2698 } 2699 2700 void TemplateTable::pop_and_check_object(Register r) 2701 { 2702 __ pop_ptr(r); 2703 __ null_check(r); // for field access must check obj. 2704 __ verify_oop(r); 2705 } 2706 2707 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) 2708 { 2709 const Register cache = r2; 2710 const Register obj = r4; 2711 const Register klass = r5; 2712 const Register inline_klass = r7; 2713 const Register field_index = r23; 2714 const Register index = r3; 2715 const Register tos_state = r3; 2716 const Register off = r19; 2717 const Register flags = r6; 2718 const Register bc = r4; // uses same reg as obj, so don't mix them 2719 2720 resolve_cache_and_index_for_field(byte_no, cache, index); 2721 jvmti_post_field_access(cache, index, is_static, false); 2722 2723 // Valhalla extras 2724 __ load_unsigned_short(field_index, Address(cache, in_bytes(ResolvedFieldEntry::field_index_offset()))); 2725 __ ldr(klass, Address(cache, ResolvedFieldEntry::field_holder_offset())); 2726 2727 load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); 2728 2729 if (!is_static) { 2730 // obj is on the stack 2731 pop_and_check_object(obj); 2732 } 2733 2734 // 8179954: We need to make sure that the code generated for 2735 // volatile accesses forms a sequentially-consistent set of 2736 // operations when combined with STLR and LDAR. Without a leading 2737 // membar it's possible for a simple Dekker test to fail if loads 2738 // use LDR;DMB but stores use STLR. This can happen if C2 compiles 2739 // the stores in one method and we interpret the loads in another. 2740 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){ 2741 Label notVolatile; 2742 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile); 2743 __ membar(MacroAssembler::AnyAny); 2744 __ bind(notVolatile); 2745 } 2746 2747 const Address field(obj, off); 2748 2749 Label Done, notByte, notBool, notInt, notShort, notChar, 2750 notLong, notFloat, notObj, notDouble; 2751 2752 assert(btos == 0, "change code, btos != 0"); 2753 __ cbnz(tos_state, notByte); 2754 2755 // Don't rewrite getstatic, only getfield 2756 if (is_static) rc = may_not_rewrite; 2757 2758 // btos 2759 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg); 2760 __ push(btos); 2761 // Rewrite bytecode to be faster 2762 if (rc == may_rewrite) { 2763 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1); 2764 } 2765 __ b(Done); 2766 2767 __ bind(notByte); 2768 __ cmp(tos_state, (u1)ztos); 2769 __ br(Assembler::NE, notBool); 2770 2771 // ztos (same code as btos) 2772 __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg); 2773 __ push(ztos); 2774 // Rewrite bytecode to be faster 2775 if (rc == may_rewrite) { 2776 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2777 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1); 2778 } 2779 __ b(Done); 2780 2781 __ bind(notBool); 2782 __ cmp(tos_state, (u1)atos); 2783 __ br(Assembler::NE, notObj); 2784 // atos 2785 if (!EnableValhalla) { 2786 do_oop_load(_masm, field, r0, IN_HEAP); 2787 __ push(atos); 2788 if (rc == may_rewrite) { 2789 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1); 2790 } 2791 __ b(Done); 2792 } else { // Valhalla 2793 if (is_static) { 2794 __ load_heap_oop(r0, field, rscratch1, rscratch2); 2795 Label is_null_free_inline_type, uninitialized; 2796 // Issue below if the static field has not been initialized yet 2797 __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_null_free_inline_type); 2798 // field is not a null free inline type 2799 __ push(atos); 2800 __ b(Done); 2801 // field is a null free inline type, must not return null even if uninitialized 2802 __ bind(is_null_free_inline_type); 2803 __ cbz(r0, uninitialized); 2804 __ push(atos); 2805 __ b(Done); 2806 __ bind(uninitialized); 2807 Label slow_case, finish; 2808 __ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset())); 2809 __ cmp(rscratch1, (u1)InstanceKlass::fully_initialized); 2810 __ br(Assembler::NE, slow_case); 2811 __ get_default_value_oop(klass, off /* temp */, r0); 2812 __ b(finish); 2813 __ bind(slow_case); 2814 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_inline_type_field), obj, cache); 2815 __ bind(finish); 2816 __ verify_oop(r0); 2817 __ push(atos); 2818 __ b(Done); 2819 } else { 2820 Label is_flat, nonnull, is_inline_type, has_null_marker, rewrite_inline; 2821 __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_inline_type); 2822 __ test_field_has_null_marker(flags, noreg /*temp*/, has_null_marker); 2823 // Non-inline field case 2824 __ load_heap_oop(r0, field, rscratch1, rscratch2); 2825 __ push(atos); 2826 if (rc == may_rewrite) { 2827 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1); 2828 } 2829 __ b(Done); 2830 __ bind(is_inline_type); 2831 __ test_field_is_flat(flags, noreg /* temp */, is_flat); 2832 // field is not flat 2833 __ load_heap_oop(r0, field, rscratch1, rscratch2); 2834 __ cbnz(r0, nonnull); 2835 __ get_inline_type_field_klass(klass, field_index, inline_klass); 2836 __ get_default_value_oop(inline_klass, klass /* temp */, r0); 2837 __ bind(nonnull); 2838 __ verify_oop(r0); 2839 __ push(atos); 2840 __ b(rewrite_inline); 2841 __ bind(is_flat); 2842 // field is flat 2843 __ mov(r0, obj); 2844 __ read_flat_field(klass, field_index, off, inline_klass /* temp */, r0); 2845 __ verify_oop(r0); 2846 __ push(atos); 2847 __ b(rewrite_inline); 2848 __ bind(has_null_marker); 2849 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_nullable_flat_field), obj, cache); 2850 __ verify_oop(r0); 2851 __ push(atos); 2852 __ bind(rewrite_inline); 2853 if (rc == may_rewrite) { 2854 patch_bytecode(Bytecodes::_fast_vgetfield, bc, r1); 2855 } 2856 __ b(Done); 2857 } 2858 } 2859 2860 __ bind(notObj); 2861 __ cmp(tos_state, (u1)itos); 2862 __ br(Assembler::NE, notInt); 2863 // itos 2864 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg); 2865 __ push(itos); 2866 // Rewrite bytecode to be faster 2867 if (rc == may_rewrite) { 2868 patch_bytecode(Bytecodes::_fast_igetfield, bc, r1); 2869 } 2870 __ b(Done); 2871 2872 __ bind(notInt); 2873 __ cmp(tos_state, (u1)ctos); 2874 __ br(Assembler::NE, notChar); 2875 // ctos 2876 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg); 2877 __ push(ctos); 2878 // Rewrite bytecode to be faster 2879 if (rc == may_rewrite) { 2880 patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1); 2881 } 2882 __ b(Done); 2883 2884 __ bind(notChar); 2885 __ cmp(tos_state, (u1)stos); 2886 __ br(Assembler::NE, notShort); 2887 // stos 2888 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg); 2889 __ push(stos); 2890 // Rewrite bytecode to be faster 2891 if (rc == may_rewrite) { 2892 patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1); 2893 } 2894 __ b(Done); 2895 2896 __ bind(notShort); 2897 __ cmp(tos_state, (u1)ltos); 2898 __ br(Assembler::NE, notLong); 2899 // ltos 2900 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg); 2901 __ push(ltos); 2902 // Rewrite bytecode to be faster 2903 if (rc == may_rewrite) { 2904 patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1); 2905 } 2906 __ b(Done); 2907 2908 __ bind(notLong); 2909 __ cmp(tos_state, (u1)ftos); 2910 __ br(Assembler::NE, notFloat); 2911 // ftos 2912 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); 2913 __ push(ftos); 2914 // Rewrite bytecode to be faster 2915 if (rc == may_rewrite) { 2916 patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1); 2917 } 2918 __ b(Done); 2919 2920 __ bind(notFloat); 2921 #ifdef ASSERT 2922 __ cmp(tos_state, (u1)dtos); 2923 __ br(Assembler::NE, notDouble); 2924 #endif 2925 // dtos 2926 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg); 2927 __ push(dtos); 2928 // Rewrite bytecode to be faster 2929 if (rc == may_rewrite) { 2930 patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1); 2931 } 2932 #ifdef ASSERT 2933 __ b(Done); 2934 2935 __ bind(notDouble); 2936 __ stop("Bad state"); 2937 #endif 2938 2939 __ bind(Done); 2940 2941 Label notVolatile; 2942 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile); 2943 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); 2944 __ bind(notVolatile); 2945 } 2946 2947 2948 void TemplateTable::getfield(int byte_no) 2949 { 2950 getfield_or_static(byte_no, false); 2951 } 2952 2953 void TemplateTable::nofast_getfield(int byte_no) { 2954 getfield_or_static(byte_no, false, may_not_rewrite); 2955 } 2956 2957 void TemplateTable::getstatic(int byte_no) 2958 { 2959 getfield_or_static(byte_no, true); 2960 } 2961 2962 // The registers cache and index expected to be set before call. 2963 // The function may destroy various registers, just not the cache and index registers. 2964 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { 2965 transition(vtos, vtos); 2966 2967 if (JvmtiExport::can_post_field_modification()) { 2968 // Check to see if a field modification watch has been set before 2969 // we take the time to call into the VM. 2970 Label L1; 2971 assert_different_registers(cache, index, r0); 2972 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); 2973 __ ldrw(r0, Address(rscratch1)); 2974 __ cbz(r0, L1); 2975 2976 __ mov(c_rarg2, cache); 2977 2978 if (is_static) { 2979 // Life is simple. Null out the object pointer. 2980 __ mov(c_rarg1, zr); 2981 } else { 2982 // Life is harder. The stack holds the value on top, followed by 2983 // the object. We don't know the size of the value, though; it 2984 // could be one or two words depending on its type. As a result, 2985 // we must find the type to determine where the object is. 2986 __ load_unsigned_byte(c_rarg3, Address(c_rarg2, in_bytes(ResolvedFieldEntry::type_offset()))); 2987 Label nope2, done, ok; 2988 __ ldr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue 2989 __ cmpw(c_rarg3, ltos); 2990 __ br(Assembler::EQ, ok); 2991 __ cmpw(c_rarg3, dtos); 2992 __ br(Assembler::NE, nope2); 2993 __ bind(ok); 2994 __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue) 2995 __ bind(nope2); 2996 } 2997 // object (tos) 2998 __ mov(c_rarg3, esp); 2999 // c_rarg1: object pointer set up above (null if static) 3000 // c_rarg2: cache entry pointer 3001 // c_rarg3: jvalue object on the stack 3002 __ call_VM(noreg, 3003 CAST_FROM_FN_PTR(address, 3004 InterpreterRuntime::post_field_modification), 3005 c_rarg1, c_rarg2, c_rarg3); 3006 __ load_field_entry(cache, index); 3007 __ bind(L1); 3008 } 3009 } 3010 3011 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 3012 transition(vtos, vtos); 3013 3014 const Register cache = r2; 3015 const Register index = r3; 3016 const Register tos_state = r3; 3017 const Register obj = r2; 3018 const Register off = r19; 3019 const Register flags = r6; 3020 const Register bc = r4; 3021 const Register inline_klass = r5; 3022 3023 resolve_cache_and_index_for_field(byte_no, cache, index); 3024 jvmti_post_field_mod(cache, index, is_static); 3025 load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); 3026 3027 Label Done; 3028 { 3029 Label notVolatile; 3030 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3031 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore); 3032 __ bind(notVolatile); 3033 } 3034 3035 // field address 3036 const Address field(obj, off); 3037 3038 Label notByte, notBool, notInt, notShort, notChar, 3039 notLong, notFloat, notObj, notDouble; 3040 3041 assert(btos == 0, "change code, btos != 0"); 3042 __ cbnz(tos_state, notByte); 3043 3044 // Don't rewrite putstatic, only putfield 3045 if (is_static) rc = may_not_rewrite; 3046 3047 // btos 3048 { 3049 __ pop(btos); 3050 if (!is_static) pop_and_check_object(obj); 3051 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg); 3052 if (rc == may_rewrite) { 3053 patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no); 3054 } 3055 __ b(Done); 3056 } 3057 3058 __ bind(notByte); 3059 __ cmp(tos_state, (u1)ztos); 3060 __ br(Assembler::NE, notBool); 3061 3062 // ztos 3063 { 3064 __ pop(ztos); 3065 if (!is_static) pop_and_check_object(obj); 3066 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg); 3067 if (rc == may_rewrite) { 3068 patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no); 3069 } 3070 __ b(Done); 3071 } 3072 3073 __ bind(notBool); 3074 __ cmp(tos_state, (u1)atos); 3075 __ br(Assembler::NE, notObj); 3076 3077 // atos 3078 { 3079 if (!EnableValhalla) { 3080 __ pop(atos); 3081 if (!is_static) pop_and_check_object(obj); 3082 // Store into the field 3083 do_oop_store(_masm, field, r0, IN_HEAP); 3084 if (rc == may_rewrite) { 3085 patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no); 3086 } 3087 __ b(Done); 3088 } else { // Valhalla 3089 __ pop(atos); 3090 if (is_static) { 3091 Label is_inline_type; 3092 __ test_field_is_not_null_free_inline_type(flags, noreg /* temp */, is_inline_type); 3093 __ null_check(r0); 3094 __ bind(is_inline_type); 3095 do_oop_store(_masm, field, r0, IN_HEAP); 3096 __ b(Done); 3097 } else { 3098 Label is_inline_type, is_flat, has_null_marker, rewrite_not_inline, rewrite_inline; 3099 __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_inline_type); 3100 __ test_field_has_null_marker(flags, noreg /*temp*/, has_null_marker); 3101 // Not an inline type 3102 pop_and_check_object(obj); 3103 // Store into the field 3104 do_oop_store(_masm, field, r0, IN_HEAP); 3105 __ bind(rewrite_not_inline); 3106 if (rc == may_rewrite) { 3107 patch_bytecode(Bytecodes::_fast_aputfield, bc, r19, true, byte_no); 3108 } 3109 __ b(Done); 3110 // Implementation of the inline type semantic 3111 __ bind(is_inline_type); 3112 __ null_check(r0); 3113 __ test_field_is_flat(flags, noreg /*temp*/, is_flat); 3114 // field is not flat 3115 pop_and_check_object(obj); 3116 // Store into the field 3117 do_oop_store(_masm, field, r0, IN_HEAP); 3118 __ b(rewrite_inline); 3119 __ bind(is_flat); 3120 // field is flat 3121 pop_and_check_object(obj); 3122 assert_different_registers(r0, inline_klass, obj, off); 3123 __ load_klass(inline_klass, r0); 3124 __ data_for_oop(r0, r0, inline_klass); 3125 __ add(obj, obj, off); 3126 __ access_value_copy(IN_HEAP, r0, obj, inline_klass); 3127 __ b(rewrite_inline); 3128 __ bind(has_null_marker); 3129 assert_different_registers(r0, cache, r19); 3130 pop_and_check_object(r19); 3131 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_nullable_flat_field), r19, r0, cache); 3132 __ bind(rewrite_inline); 3133 if (rc == may_rewrite) { 3134 patch_bytecode(Bytecodes::_fast_vputfield, bc, r19, true, byte_no); 3135 } 3136 __ b(Done); 3137 } 3138 } // Valhalla 3139 } 3140 3141 __ bind(notObj); 3142 __ cmp(tos_state, (u1)itos); 3143 __ br(Assembler::NE, notInt); 3144 3145 // itos 3146 { 3147 __ pop(itos); 3148 if (!is_static) pop_and_check_object(obj); 3149 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg); 3150 if (rc == may_rewrite) { 3151 patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no); 3152 } 3153 __ b(Done); 3154 } 3155 3156 __ bind(notInt); 3157 __ cmp(tos_state, (u1)ctos); 3158 __ br(Assembler::NE, notChar); 3159 3160 // ctos 3161 { 3162 __ pop(ctos); 3163 if (!is_static) pop_and_check_object(obj); 3164 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg); 3165 if (rc == may_rewrite) { 3166 patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no); 3167 } 3168 __ b(Done); 3169 } 3170 3171 __ bind(notChar); 3172 __ cmp(tos_state, (u1)stos); 3173 __ br(Assembler::NE, notShort); 3174 3175 // stos 3176 { 3177 __ pop(stos); 3178 if (!is_static) pop_and_check_object(obj); 3179 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg); 3180 if (rc == may_rewrite) { 3181 patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no); 3182 } 3183 __ b(Done); 3184 } 3185 3186 __ bind(notShort); 3187 __ cmp(tos_state, (u1)ltos); 3188 __ br(Assembler::NE, notLong); 3189 3190 // ltos 3191 { 3192 __ pop(ltos); 3193 if (!is_static) pop_and_check_object(obj); 3194 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg); 3195 if (rc == may_rewrite) { 3196 patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no); 3197 } 3198 __ b(Done); 3199 } 3200 3201 __ bind(notLong); 3202 __ cmp(tos_state, (u1)ftos); 3203 __ br(Assembler::NE, notFloat); 3204 3205 // ftos 3206 { 3207 __ pop(ftos); 3208 if (!is_static) pop_and_check_object(obj); 3209 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg); 3210 if (rc == may_rewrite) { 3211 patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no); 3212 } 3213 __ b(Done); 3214 } 3215 3216 __ bind(notFloat); 3217 #ifdef ASSERT 3218 __ cmp(tos_state, (u1)dtos); 3219 __ br(Assembler::NE, notDouble); 3220 #endif 3221 3222 // dtos 3223 { 3224 __ pop(dtos); 3225 if (!is_static) pop_and_check_object(obj); 3226 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg); 3227 if (rc == may_rewrite) { 3228 patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no); 3229 } 3230 } 3231 3232 #ifdef ASSERT 3233 __ b(Done); 3234 3235 __ bind(notDouble); 3236 __ stop("Bad state"); 3237 #endif 3238 3239 __ bind(Done); 3240 3241 { 3242 Label notVolatile; 3243 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3244 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore); 3245 __ bind(notVolatile); 3246 } 3247 } 3248 3249 void TemplateTable::putfield(int byte_no) 3250 { 3251 putfield_or_static(byte_no, false); 3252 } 3253 3254 void TemplateTable::nofast_putfield(int byte_no) { 3255 putfield_or_static(byte_no, false, may_not_rewrite); 3256 } 3257 3258 void TemplateTable::putstatic(int byte_no) { 3259 putfield_or_static(byte_no, true); 3260 } 3261 3262 void TemplateTable::jvmti_post_fast_field_mod() { 3263 if (JvmtiExport::can_post_field_modification()) { 3264 // Check to see if a field modification watch has been set before 3265 // we take the time to call into the VM. 3266 Label L2; 3267 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); 3268 __ ldrw(c_rarg3, Address(rscratch1)); 3269 __ cbzw(c_rarg3, L2); 3270 __ pop_ptr(r19); // copy the object pointer from tos 3271 __ verify_oop(r19); 3272 __ push_ptr(r19); // put the object pointer back on tos 3273 // Save tos values before call_VM() clobbers them. Since we have 3274 // to do it for every data type, we use the saved values as the 3275 // jvalue object. 3276 switch (bytecode()) { // load values into the jvalue object 3277 case Bytecodes::_fast_vputfield: //fall through 3278 case Bytecodes::_fast_aputfield: __ push_ptr(r0); break; 3279 case Bytecodes::_fast_bputfield: // fall through 3280 case Bytecodes::_fast_zputfield: // fall through 3281 case Bytecodes::_fast_sputfield: // fall through 3282 case Bytecodes::_fast_cputfield: // fall through 3283 case Bytecodes::_fast_iputfield: __ push_i(r0); break; 3284 case Bytecodes::_fast_dputfield: __ push_d(); break; 3285 case Bytecodes::_fast_fputfield: __ push_f(); break; 3286 case Bytecodes::_fast_lputfield: __ push_l(r0); break; 3287 3288 default: 3289 ShouldNotReachHere(); 3290 } 3291 __ mov(c_rarg3, esp); // points to jvalue on the stack 3292 // access constant pool cache entry 3293 __ load_field_entry(c_rarg2, r0); 3294 __ verify_oop(r19); 3295 // r19: object pointer copied above 3296 // c_rarg2: cache entry pointer 3297 // c_rarg3: jvalue object on the stack 3298 __ call_VM(noreg, 3299 CAST_FROM_FN_PTR(address, 3300 InterpreterRuntime::post_field_modification), 3301 r19, c_rarg2, c_rarg3); 3302 3303 switch (bytecode()) { // restore tos values 3304 case Bytecodes::_fast_vputfield: //fall through 3305 case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break; 3306 case Bytecodes::_fast_bputfield: // fall through 3307 case Bytecodes::_fast_zputfield: // fall through 3308 case Bytecodes::_fast_sputfield: // fall through 3309 case Bytecodes::_fast_cputfield: // fall through 3310 case Bytecodes::_fast_iputfield: __ pop_i(r0); break; 3311 case Bytecodes::_fast_dputfield: __ pop_d(); break; 3312 case Bytecodes::_fast_fputfield: __ pop_f(); break; 3313 case Bytecodes::_fast_lputfield: __ pop_l(r0); break; 3314 default: break; 3315 } 3316 __ bind(L2); 3317 } 3318 } 3319 3320 void TemplateTable::fast_storefield(TosState state) 3321 { 3322 transition(state, vtos); 3323 3324 ByteSize base = ConstantPoolCache::base_offset(); 3325 3326 jvmti_post_fast_field_mod(); 3327 3328 // access constant pool cache 3329 __ load_field_entry(r2, r1); 3330 __ push(r0); 3331 // R1: field offset, R2: TOS, R3: flags 3332 load_resolved_field_entry(r2, r2, r0, r1, r3); 3333 __ pop(r0); 3334 3335 // Must prevent reordering of the following cp cache loads with bytecode load 3336 __ membar(MacroAssembler::LoadLoad); 3337 3338 { 3339 Label notVolatile; 3340 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3341 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore); 3342 __ bind(notVolatile); 3343 } 3344 3345 Label notVolatile; 3346 3347 // Get object from stack 3348 pop_and_check_object(r2); 3349 3350 // field address 3351 const Address field(r2, r1); 3352 3353 // access field 3354 switch (bytecode()) { 3355 case Bytecodes::_fast_vputfield: 3356 { 3357 Label is_flat, has_null_marker, done; 3358 __ test_field_has_null_marker(r3, noreg /* temp */, has_null_marker); 3359 __ null_check(r0); 3360 __ test_field_is_flat(r3, noreg /* temp */, is_flat); 3361 // field is not flat 3362 do_oop_store(_masm, field, r0, IN_HEAP); 3363 __ b(done); 3364 __ bind(is_flat); 3365 // field is flat 3366 __ load_klass(r4, r0); 3367 __ data_for_oop(r0, r0, r4); 3368 __ lea(rscratch1, field); 3369 __ access_value_copy(IN_HEAP, r0, rscratch1, r4); 3370 __ b(done); 3371 __ bind(has_null_marker); 3372 __ load_field_entry(r4, r1); 3373 __ mov(r1, r2); 3374 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_nullable_flat_field), r1, r0, r4); 3375 __ bind(done); 3376 } 3377 break; 3378 case Bytecodes::_fast_aputfield: 3379 do_oop_store(_masm, field, r0, IN_HEAP); 3380 break; 3381 case Bytecodes::_fast_lputfield: 3382 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg); 3383 break; 3384 case Bytecodes::_fast_iputfield: 3385 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg); 3386 break; 3387 case Bytecodes::_fast_zputfield: 3388 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg); 3389 break; 3390 case Bytecodes::_fast_bputfield: 3391 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg); 3392 break; 3393 case Bytecodes::_fast_sputfield: 3394 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg); 3395 break; 3396 case Bytecodes::_fast_cputfield: 3397 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg); 3398 break; 3399 case Bytecodes::_fast_fputfield: 3400 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg); 3401 break; 3402 case Bytecodes::_fast_dputfield: 3403 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg); 3404 break; 3405 default: 3406 ShouldNotReachHere(); 3407 } 3408 3409 { 3410 Label notVolatile; 3411 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3412 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore); 3413 __ bind(notVolatile); 3414 } 3415 } 3416 3417 3418 void TemplateTable::fast_accessfield(TosState state) 3419 { 3420 transition(atos, state); 3421 // Do the JVMTI work here to avoid disturbing the register state below 3422 if (JvmtiExport::can_post_field_access()) { 3423 // Check to see if a field access watch has been set before we 3424 // take the time to call into the VM. 3425 Label L1; 3426 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); 3427 __ ldrw(r2, Address(rscratch1)); 3428 __ cbzw(r2, L1); 3429 // access constant pool cache entry 3430 __ load_field_entry(c_rarg2, rscratch2); 3431 __ verify_oop(r0); 3432 __ push_ptr(r0); // save object pointer before call_VM() clobbers it 3433 __ mov(c_rarg1, r0); 3434 // c_rarg1: object pointer copied above 3435 // c_rarg2: cache entry pointer 3436 __ call_VM(noreg, 3437 CAST_FROM_FN_PTR(address, 3438 InterpreterRuntime::post_field_access), 3439 c_rarg1, c_rarg2); 3440 __ pop_ptr(r0); // restore object pointer 3441 __ bind(L1); 3442 } 3443 3444 // access constant pool cache 3445 __ load_field_entry(r2, r1); 3446 3447 // Must prevent reordering of the following cp cache loads with bytecode load 3448 __ membar(MacroAssembler::LoadLoad); 3449 3450 __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); 3451 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset()))); 3452 3453 // r0: object 3454 __ verify_oop(r0); 3455 __ null_check(r0); 3456 const Address field(r0, r1); 3457 3458 // 8179954: We need to make sure that the code generated for 3459 // volatile accesses forms a sequentially-consistent set of 3460 // operations when combined with STLR and LDAR. Without a leading 3461 // membar it's possible for a simple Dekker test to fail if loads 3462 // use LDR;DMB but stores use STLR. This can happen if C2 compiles 3463 // the stores in one method and we interpret the loads in another. 3464 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) { 3465 Label notVolatile; 3466 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3467 __ membar(MacroAssembler::AnyAny); 3468 __ bind(notVolatile); 3469 } 3470 3471 // access field 3472 switch (bytecode()) { 3473 case Bytecodes::_fast_vgetfield: 3474 { 3475 Register index = r4, klass = r5, inline_klass = r6, tmp = r7; 3476 Label is_flat, has_null_marker, nonnull, Done; 3477 __ test_field_has_null_marker(r3, noreg /*temp*/, has_null_marker); 3478 __ test_field_is_flat(r3, noreg /* temp */, is_flat); 3479 // field is not flat 3480 __ load_heap_oop(r0, field, rscratch1, rscratch2); 3481 __ cbnz(r0, nonnull); 3482 __ load_unsigned_short(index, Address(r2, in_bytes(ResolvedFieldEntry::field_index_offset()))); 3483 __ ldr(klass, Address(r2, in_bytes(ResolvedFieldEntry::field_holder_offset()))); 3484 __ get_inline_type_field_klass(klass, index, inline_klass); 3485 __ get_default_value_oop(inline_klass, tmp /* temp */, r0); 3486 __ bind(nonnull); 3487 __ verify_oop(r0); 3488 __ b(Done); 3489 __ bind(is_flat); 3490 // field is flat 3491 __ load_unsigned_short(index, Address(r2, in_bytes(ResolvedFieldEntry::field_index_offset()))); 3492 __ ldr(klass, Address(r2, in_bytes(ResolvedFieldEntry::field_holder_offset()))); 3493 __ read_flat_field(klass, index, r1, tmp /* temp */, r0); 3494 __ verify_oop(r0); 3495 __ b(Done); 3496 __ bind(has_null_marker); 3497 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_nullable_flat_field), r0, r2); 3498 __ verify_oop(r0); 3499 __ bind(Done); 3500 } 3501 break; 3502 case Bytecodes::_fast_agetfield: 3503 do_oop_load(_masm, field, r0, IN_HEAP); 3504 __ verify_oop(r0); 3505 break; 3506 case Bytecodes::_fast_lgetfield: 3507 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg); 3508 break; 3509 case Bytecodes::_fast_igetfield: 3510 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg); 3511 break; 3512 case Bytecodes::_fast_bgetfield: 3513 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg); 3514 break; 3515 case Bytecodes::_fast_sgetfield: 3516 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg); 3517 break; 3518 case Bytecodes::_fast_cgetfield: 3519 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg); 3520 break; 3521 case Bytecodes::_fast_fgetfield: 3522 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); 3523 break; 3524 case Bytecodes::_fast_dgetfield: 3525 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg); 3526 break; 3527 default: 3528 ShouldNotReachHere(); 3529 } 3530 { 3531 Label notVolatile; 3532 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3533 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); 3534 __ bind(notVolatile); 3535 } 3536 } 3537 3538 void TemplateTable::fast_xaccess(TosState state) 3539 { 3540 transition(vtos, state); 3541 3542 // get receiver 3543 __ ldr(r0, aaddress(0)); 3544 // access constant pool cache 3545 __ load_field_entry(r2, r3, 2); 3546 __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); 3547 3548 // 8179954: We need to make sure that the code generated for 3549 // volatile accesses forms a sequentially-consistent set of 3550 // operations when combined with STLR and LDAR. Without a leading 3551 // membar it's possible for a simple Dekker test to fail if loads 3552 // use LDR;DMB but stores use STLR. This can happen if C2 compiles 3553 // the stores in one method and we interpret the loads in another. 3554 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) { 3555 Label notVolatile; 3556 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset()))); 3557 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3558 __ membar(MacroAssembler::AnyAny); 3559 __ bind(notVolatile); 3560 } 3561 3562 // make sure exception is reported in correct bcp range (getfield is 3563 // next instruction) 3564 __ increment(rbcp); 3565 __ null_check(r0); 3566 switch (state) { 3567 case itos: 3568 __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg); 3569 break; 3570 case atos: 3571 do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP); 3572 __ verify_oop(r0); 3573 break; 3574 case ftos: 3575 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg); 3576 break; 3577 default: 3578 ShouldNotReachHere(); 3579 } 3580 3581 { 3582 Label notVolatile; 3583 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset()))); 3584 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3585 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); 3586 __ bind(notVolatile); 3587 } 3588 3589 __ decrement(rbcp); 3590 } 3591 3592 3593 3594 //----------------------------------------------------------------------------- 3595 // Calls 3596 3597 void TemplateTable::prepare_invoke(Register cache, Register recv) { 3598 3599 Bytecodes::Code code = bytecode(); 3600 const bool load_receiver = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic); 3601 3602 // save 'interpreter return address' 3603 __ save_bcp(); 3604 3605 // Load TOS state for later 3606 __ load_unsigned_byte(rscratch2, Address(cache, in_bytes(ResolvedMethodEntry::type_offset()))); 3607 3608 // load receiver if needed (note: no return address pushed yet) 3609 if (load_receiver) { 3610 __ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset()))); 3611 __ add(rscratch1, esp, recv, ext::uxtx, 3); 3612 __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1))); 3613 __ verify_oop(recv); 3614 } 3615 3616 // load return address 3617 { 3618 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3619 __ mov(rscratch1, table_addr); 3620 __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3))); 3621 } 3622 } 3623 3624 3625 void TemplateTable::invokevirtual_helper(Register index, 3626 Register recv, 3627 Register flags) 3628 { 3629 // Uses temporary registers r0, r3 3630 assert_different_registers(index, recv, r0, r3); 3631 // Test for an invoke of a final method 3632 Label notFinal; 3633 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, notFinal); 3634 3635 const Register method = index; // method must be rmethod 3636 assert(method == rmethod, 3637 "Method must be rmethod for interpreter calling convention"); 3638 3639 // do the call - the index is actually the method to call 3640 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method* 3641 3642 // It's final, need a null check here! 3643 __ null_check(recv); 3644 3645 // profile this call 3646 __ profile_final_call(r0); 3647 __ profile_arguments_type(r0, method, r4, true); 3648 3649 __ jump_from_interpreted(method, r0); 3650 3651 __ bind(notFinal); 3652 3653 // get receiver klass 3654 __ load_klass(r0, recv); 3655 3656 // profile this call 3657 __ profile_virtual_call(r0, rlocals, r3); 3658 3659 // get target Method & entry point 3660 __ lookup_virtual_method(r0, index, method); 3661 __ profile_arguments_type(r3, method, r4, true); 3662 // FIXME -- this looks completely redundant. is it? 3663 // __ ldr(r3, Address(method, Method::interpreter_entry_offset())); 3664 __ jump_from_interpreted(method, r3); 3665 } 3666 3667 void TemplateTable::invokevirtual(int byte_no) 3668 { 3669 transition(vtos, vtos); 3670 assert(byte_no == f2_byte, "use this argument"); 3671 3672 load_resolved_method_entry_virtual(r2, // ResolvedMethodEntry* 3673 rmethod, // Method* or itable index 3674 r3); // flags 3675 prepare_invoke(r2, r2); // recv 3676 3677 // rmethod: index (actually a Method*) 3678 // r2: receiver 3679 // r3: flags 3680 3681 invokevirtual_helper(rmethod, r2, r3); 3682 } 3683 3684 void TemplateTable::invokespecial(int byte_no) 3685 { 3686 transition(vtos, vtos); 3687 assert(byte_no == f1_byte, "use this argument"); 3688 3689 load_resolved_method_entry_special_or_static(r2, // ResolvedMethodEntry* 3690 rmethod, // Method* 3691 r3); // flags 3692 prepare_invoke(r2, r2); // get receiver also for null check 3693 __ verify_oop(r2); 3694 __ null_check(r2); 3695 // do the call 3696 __ profile_call(r0); 3697 __ profile_arguments_type(r0, rmethod, rbcp, false); 3698 __ jump_from_interpreted(rmethod, r0); 3699 } 3700 3701 void TemplateTable::invokestatic(int byte_no) 3702 { 3703 transition(vtos, vtos); 3704 assert(byte_no == f1_byte, "use this argument"); 3705 3706 load_resolved_method_entry_special_or_static(r2, // ResolvedMethodEntry* 3707 rmethod, // Method* 3708 r3); // flags 3709 prepare_invoke(r2, r2); // get receiver also for null check 3710 3711 // do the call 3712 __ profile_call(r0); 3713 __ profile_arguments_type(r0, rmethod, r4, false); 3714 __ jump_from_interpreted(rmethod, r0); 3715 } 3716 3717 void TemplateTable::fast_invokevfinal(int byte_no) 3718 { 3719 __ call_Unimplemented(); 3720 } 3721 3722 void TemplateTable::invokeinterface(int byte_no) { 3723 transition(vtos, vtos); 3724 assert(byte_no == f1_byte, "use this argument"); 3725 3726 load_resolved_method_entry_interface(r2, // ResolvedMethodEntry* 3727 r0, // Klass* 3728 rmethod, // Method* or itable/vtable index 3729 r3); // flags 3730 prepare_invoke(r2, r2); // receiver 3731 3732 // r0: interface klass (from f1) 3733 // rmethod: method (from f2) 3734 // r2: receiver 3735 // r3: flags 3736 3737 // First check for Object case, then private interface method, 3738 // then regular interface method. 3739 3740 // Special case of invokeinterface called for virtual method of 3741 // java.lang.Object. See cpCache.cpp for details. 3742 Label notObjectMethod; 3743 __ tbz(r3, ResolvedMethodEntry::is_forced_virtual_shift, notObjectMethod); 3744 3745 invokevirtual_helper(rmethod, r2, r3); 3746 __ bind(notObjectMethod); 3747 3748 Label no_such_interface; 3749 3750 // Check for private method invocation - indicated by vfinal 3751 Label notVFinal; 3752 __ tbz(r3, ResolvedMethodEntry::is_vfinal_shift, notVFinal); 3753 3754 // Get receiver klass into r3 3755 __ load_klass(r3, r2); 3756 3757 Label subtype; 3758 __ check_klass_subtype(r3, r0, r4, subtype); 3759 // If we get here the typecheck failed 3760 __ b(no_such_interface); 3761 __ bind(subtype); 3762 3763 __ profile_final_call(r0); 3764 __ profile_arguments_type(r0, rmethod, r4, true); 3765 __ jump_from_interpreted(rmethod, r0); 3766 3767 __ bind(notVFinal); 3768 3769 // Get receiver klass into r3 3770 __ restore_locals(); 3771 __ load_klass(r3, r2); 3772 3773 Label no_such_method; 3774 3775 // Preserve method for throw_AbstractMethodErrorVerbose. 3776 __ mov(r16, rmethod); 3777 // Receiver subtype check against REFC. 3778 // Superklass in r0. Subklass in r3. Blows rscratch2, r13 3779 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3780 r3, r0, noreg, 3781 // outputs: scan temp. reg, scan temp. reg 3782 rscratch2, r13, 3783 no_such_interface, 3784 /*return_method=*/false); 3785 3786 // profile this call 3787 __ profile_virtual_call(r3, r13, r19); 3788 3789 // Get declaring interface class from method, and itable index 3790 3791 __ load_method_holder(r0, rmethod); 3792 __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset())); 3793 __ subw(rmethod, rmethod, Method::itable_index_max); 3794 __ negw(rmethod, rmethod); 3795 3796 // Preserve recvKlass for throw_AbstractMethodErrorVerbose. 3797 __ mov(rlocals, r3); 3798 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3799 rlocals, r0, rmethod, 3800 // outputs: method, scan temp. reg 3801 rmethod, r13, 3802 no_such_interface); 3803 3804 // rmethod,: Method to call 3805 // r2: receiver 3806 // Check for abstract method error 3807 // Note: This should be done more efficiently via a throw_abstract_method_error 3808 // interpreter entry point and a conditional jump to it in case of a null 3809 // method. 3810 __ cbz(rmethod, no_such_method); 3811 3812 __ profile_arguments_type(r3, rmethod, r13, true); 3813 3814 // do the call 3815 // r2: receiver 3816 // rmethod,: Method 3817 __ jump_from_interpreted(rmethod, r3); 3818 __ should_not_reach_here(); 3819 3820 // exception handling code follows... 3821 // note: must restore interpreter registers to canonical 3822 // state for exception handling to work correctly! 3823 3824 __ bind(no_such_method); 3825 // throw exception 3826 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) 3827 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 3828 // Pass arguments for generating a verbose error message. 3829 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16); 3830 // the call_VM checks for exception, so we should never return here. 3831 __ should_not_reach_here(); 3832 3833 __ bind(no_such_interface); 3834 // throw exception 3835 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) 3836 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 3837 // Pass arguments for generating a verbose error message. 3838 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 3839 InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0); 3840 // the call_VM checks for exception, so we should never return here. 3841 __ should_not_reach_here(); 3842 return; 3843 } 3844 3845 void TemplateTable::invokehandle(int byte_no) { 3846 transition(vtos, vtos); 3847 assert(byte_no == f1_byte, "use this argument"); 3848 3849 load_resolved_method_entry_handle(r2, // ResolvedMethodEntry* 3850 rmethod, // Method* 3851 r0, // Resolved reference 3852 r3); // flags 3853 prepare_invoke(r2, r2); 3854 3855 __ verify_method_ptr(r2); 3856 __ verify_oop(r2); 3857 __ null_check(r2); 3858 3859 // FIXME: profile the LambdaForm also 3860 3861 // r13 is safe to use here as a scratch reg because it is about to 3862 // be clobbered by jump_from_interpreted(). 3863 __ profile_final_call(r13); 3864 __ profile_arguments_type(r13, rmethod, r4, true); 3865 3866 __ jump_from_interpreted(rmethod, r0); 3867 } 3868 3869 void TemplateTable::invokedynamic(int byte_no) { 3870 transition(vtos, vtos); 3871 assert(byte_no == f1_byte, "use this argument"); 3872 3873 load_invokedynamic_entry(rmethod); 3874 3875 // r0: CallSite object (from cpool->resolved_references[]) 3876 // rmethod: MH.linkToCallSite method 3877 3878 // Note: r0_callsite is already pushed 3879 3880 // %%% should make a type profile for any invokedynamic that takes a ref argument 3881 // profile this call 3882 __ profile_call(rbcp); 3883 __ profile_arguments_type(r3, rmethod, r13, false); 3884 3885 __ verify_oop(r0); 3886 3887 __ jump_from_interpreted(rmethod, r0); 3888 } 3889 3890 3891 //----------------------------------------------------------------------------- 3892 // Allocation 3893 3894 void TemplateTable::_new() { 3895 transition(vtos, atos); 3896 3897 __ get_unsigned_2_byte_index_at_bcp(r3, 1); 3898 Label slow_case; 3899 Label done; 3900 Label initialize_header; 3901 3902 __ get_cpool_and_tags(r4, r0); 3903 // Make sure the class we're about to instantiate has been resolved. 3904 // This is done before loading InstanceKlass to be consistent with the order 3905 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3906 const int tags_offset = Array<u1>::base_offset_in_bytes(); 3907 __ lea(rscratch1, Address(r0, r3, Address::lsl(0))); 3908 __ lea(rscratch1, Address(rscratch1, tags_offset)); 3909 __ ldarb(rscratch1, rscratch1); 3910 __ cmp(rscratch1, (u1)JVM_CONSTANT_Class); 3911 __ br(Assembler::NE, slow_case); 3912 3913 // get InstanceKlass 3914 __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1); 3915 3916 // make sure klass is initialized 3917 assert(VM_Version::supports_fast_class_init_checks(), "Optimization requires support for fast class initialization checks"); 3918 __ clinit_barrier(r4, rscratch1, nullptr /*L_fast_path*/, &slow_case); 3919 3920 __ allocate_instance(r4, r0, r3, r1, true, slow_case); 3921 __ b(done); 3922 3923 // slow case 3924 __ bind(slow_case); 3925 __ get_constant_pool(c_rarg1); 3926 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); 3927 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2); 3928 __ verify_oop(r0); 3929 3930 // continue 3931 __ bind(done); 3932 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3933 __ membar(Assembler::StoreStore); 3934 } 3935 3936 void TemplateTable::newarray() { 3937 transition(itos, atos); 3938 __ load_unsigned_byte(c_rarg1, at_bcp(1)); 3939 __ mov(c_rarg2, r0); 3940 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), 3941 c_rarg1, c_rarg2); 3942 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3943 __ membar(Assembler::StoreStore); 3944 } 3945 3946 void TemplateTable::anewarray() { 3947 transition(itos, atos); 3948 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); 3949 __ get_constant_pool(c_rarg1); 3950 __ mov(c_rarg3, r0); 3951 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), 3952 c_rarg1, c_rarg2, c_rarg3); 3953 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3954 __ membar(Assembler::StoreStore); 3955 } 3956 3957 void TemplateTable::arraylength() { 3958 transition(atos, itos); 3959 __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes())); 3960 } 3961 3962 void TemplateTable::checkcast() 3963 { 3964 transition(atos, atos); 3965 Label done, is_null, ok_is_subtype, quicked, resolved; 3966 __ cbz(r0, is_null); 3967 3968 // Get cpool & tags index 3969 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array 3970 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index 3971 // See if bytecode has already been quicked 3972 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes()); 3973 __ lea(r1, Address(rscratch1, r19)); 3974 __ ldarb(r1, r1); 3975 __ cmp(r1, (u1)JVM_CONSTANT_Class); 3976 __ br(Assembler::EQ, quicked); 3977 3978 __ push(atos); // save receiver for result, and for GC 3979 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3980 // vm_result_2 has metadata result 3981 __ get_vm_result_2(r0, rthread); 3982 __ pop(r3); // restore receiver 3983 __ b(resolved); 3984 3985 // Get superklass in r0 and subklass in r3 3986 __ bind(quicked); 3987 __ mov(r3, r0); // Save object in r3; r0 needed for subtype check 3988 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass 3989 3990 __ bind(resolved); 3991 __ load_klass(r19, r3); 3992 3993 // Generate subtype check. Blows r2, r5. Object in r3. 3994 // Superklass in r0. Subklass in r19. 3995 __ gen_subtype_check(r19, ok_is_subtype); 3996 3997 // Come here on failure 3998 __ push(r3); 3999 // object is at TOS 4000 __ b(Interpreter::_throw_ClassCastException_entry); 4001 4002 // Come here on success 4003 __ bind(ok_is_subtype); 4004 __ mov(r0, r3); // Restore object in r3 4005 4006 __ b(done); 4007 __ bind(is_null); 4008 4009 // Collect counts on whether this test sees nulls a lot or not. 4010 if (ProfileInterpreter) { 4011 __ profile_null_seen(r2); 4012 } 4013 4014 __ bind(done); 4015 } 4016 4017 void TemplateTable::instanceof() { 4018 transition(atos, itos); 4019 Label done, is_null, ok_is_subtype, quicked, resolved; 4020 __ cbz(r0, is_null); 4021 4022 // Get cpool & tags index 4023 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array 4024 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index 4025 // See if bytecode has already been quicked 4026 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes()); 4027 __ lea(r1, Address(rscratch1, r19)); 4028 __ ldarb(r1, r1); 4029 __ cmp(r1, (u1)JVM_CONSTANT_Class); 4030 __ br(Assembler::EQ, quicked); 4031 4032 __ push(atos); // save receiver for result, and for GC 4033 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4034 // vm_result_2 has metadata result 4035 __ get_vm_result_2(r0, rthread); 4036 __ pop(r3); // restore receiver 4037 __ verify_oop(r3); 4038 __ load_klass(r3, r3); 4039 __ b(resolved); 4040 4041 // Get superklass in r0 and subklass in r3 4042 __ bind(quicked); 4043 __ load_klass(r3, r0); 4044 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); 4045 4046 __ bind(resolved); 4047 4048 // Generate subtype check. Blows r2, r5 4049 // Superklass in r0. Subklass in r3. 4050 __ gen_subtype_check(r3, ok_is_subtype); 4051 4052 // Come here on failure 4053 __ mov(r0, 0); 4054 __ b(done); 4055 // Come here on success 4056 __ bind(ok_is_subtype); 4057 __ mov(r0, 1); 4058 4059 // Collect counts on whether this test sees nulls a lot or not. 4060 if (ProfileInterpreter) { 4061 __ b(done); 4062 __ bind(is_null); 4063 __ profile_null_seen(r2); 4064 } else { 4065 __ bind(is_null); // same as 'done' 4066 } 4067 __ bind(done); 4068 // r0 = 0: obj == nullptr or obj is not an instanceof the specified klass 4069 // r0 = 1: obj != nullptr and obj is an instanceof the specified klass 4070 } 4071 4072 //----------------------------------------------------------------------------- 4073 // Breakpoints 4074 void TemplateTable::_breakpoint() { 4075 // Note: We get here even if we are single stepping.. 4076 // jbug inists on setting breakpoints at every bytecode 4077 // even if we are in single step mode. 4078 4079 transition(vtos, vtos); 4080 4081 // get the unpatched byte code 4082 __ get_method(c_rarg1); 4083 __ call_VM(noreg, 4084 CAST_FROM_FN_PTR(address, 4085 InterpreterRuntime::get_original_bytecode_at), 4086 c_rarg1, rbcp); 4087 __ mov(r19, r0); 4088 4089 // post the breakpoint event 4090 __ call_VM(noreg, 4091 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), 4092 rmethod, rbcp); 4093 4094 // complete the execution of original bytecode 4095 __ mov(rscratch1, r19); 4096 __ dispatch_only_normal(vtos); 4097 } 4098 4099 //----------------------------------------------------------------------------- 4100 // Exceptions 4101 4102 void TemplateTable::athrow() { 4103 transition(atos, vtos); 4104 __ null_check(r0); 4105 __ b(Interpreter::throw_exception_entry()); 4106 } 4107 4108 //----------------------------------------------------------------------------- 4109 // Synchronization 4110 // 4111 // Note: monitorenter & exit are symmetric routines; which is reflected 4112 // in the assembly code structure as well 4113 // 4114 // Stack layout: 4115 // 4116 // [expressions ] <--- esp = expression stack top 4117 // .. 4118 // [expressions ] 4119 // [monitor entry] <--- monitor block top = expression stack bot 4120 // .. 4121 // [monitor entry] 4122 // [frame data ] <--- monitor block bot 4123 // ... 4124 // [saved rfp ] <--- rfp 4125 void TemplateTable::monitorenter() 4126 { 4127 transition(atos, vtos); 4128 4129 // check for null object 4130 __ null_check(r0); 4131 4132 Label is_inline_type; 4133 __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); 4134 __ test_markword_is_inline_type(rscratch1, is_inline_type); 4135 4136 const Address monitor_block_top( 4137 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 4138 const Address monitor_block_bot( 4139 rfp, frame::interpreter_frame_initial_sp_offset * wordSize); 4140 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 4141 4142 Label allocated; 4143 4144 // initialize entry pointer 4145 __ mov(c_rarg1, zr); // points to free slot or null 4146 4147 // find a free slot in the monitor block (result in c_rarg1) 4148 { 4149 Label entry, loop, exit; 4150 __ ldr(c_rarg3, monitor_block_top); // derelativize pointer 4151 __ lea(c_rarg3, Address(rfp, c_rarg3, Address::lsl(Interpreter::logStackElementSize))); 4152 // c_rarg3 points to current entry, starting with top-most entry 4153 4154 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom 4155 4156 __ b(entry); 4157 4158 __ bind(loop); 4159 // check if current entry is used 4160 // if not used then remember entry in c_rarg1 4161 __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset())); 4162 __ cmp(zr, rscratch1); 4163 __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ); 4164 // check if current entry is for same object 4165 __ cmp(r0, rscratch1); 4166 // if same object then stop searching 4167 __ br(Assembler::EQ, exit); 4168 // otherwise advance to next entry 4169 __ add(c_rarg3, c_rarg3, entry_size); 4170 __ bind(entry); 4171 // check if bottom reached 4172 __ cmp(c_rarg3, c_rarg2); 4173 // if not at bottom then check this entry 4174 __ br(Assembler::NE, loop); 4175 __ bind(exit); 4176 } 4177 4178 __ cbnz(c_rarg1, allocated); // check if a slot has been found and 4179 // if found, continue with that on 4180 4181 // allocate one if there's no free slot 4182 { 4183 Label entry, loop; 4184 // 1. compute new pointers // rsp: old expression stack top 4185 4186 __ check_extended_sp(); 4187 __ sub(sp, sp, entry_size); // make room for the monitor 4188 __ sub(rscratch1, sp, rfp); 4189 __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize); 4190 __ str(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize)); 4191 4192 __ ldr(c_rarg1, monitor_block_bot); // derelativize pointer 4193 __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize))); 4194 // c_rarg1 points to the old expression stack bottom 4195 4196 __ sub(esp, esp, entry_size); // move expression stack top 4197 __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom 4198 __ mov(c_rarg3, esp); // set start value for copy loop 4199 __ sub(rscratch1, c_rarg1, rfp); // relativize pointer 4200 __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize); 4201 __ str(rscratch1, monitor_block_bot); // set new monitor block bottom 4202 4203 __ b(entry); 4204 // 2. move expression stack contents 4205 __ bind(loop); 4206 __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack 4207 // word from old location 4208 __ str(c_rarg2, Address(c_rarg3, 0)); // and store it at new location 4209 __ add(c_rarg3, c_rarg3, wordSize); // advance to next word 4210 __ bind(entry); 4211 __ cmp(c_rarg3, c_rarg1); // check if bottom reached 4212 __ br(Assembler::NE, loop); // if not at bottom then 4213 // copy next word 4214 } 4215 4216 // call run-time routine 4217 // c_rarg1: points to monitor entry 4218 __ bind(allocated); 4219 4220 // Increment bcp to point to the next bytecode, so exception 4221 // handling for async. exceptions work correctly. 4222 // The object has already been popped from the stack, so the 4223 // expression stack looks correct. 4224 __ increment(rbcp); 4225 4226 // store object 4227 __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset())); 4228 __ lock_object(c_rarg1); 4229 4230 // check to make sure this monitor doesn't cause stack overflow after locking 4231 __ save_bcp(); // in case of exception 4232 __ generate_stack_overflow_check(0); 4233 4234 // The bcp has already been incremented. Just need to dispatch to 4235 // next instruction. 4236 __ dispatch_next(vtos); 4237 4238 __ bind(is_inline_type); 4239 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 4240 InterpreterRuntime::throw_identity_exception), r0); 4241 __ should_not_reach_here(); 4242 } 4243 4244 4245 void TemplateTable::monitorexit() 4246 { 4247 transition(atos, vtos); 4248 4249 // check for null object 4250 __ null_check(r0); 4251 4252 const int is_inline_type_mask = markWord::inline_type_pattern; 4253 Label has_identity; 4254 __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); 4255 __ mov(rscratch2, is_inline_type_mask); 4256 __ andr(rscratch1, rscratch1, rscratch2); 4257 __ cmp(rscratch1, rscratch2); 4258 __ br(Assembler::NE, has_identity); 4259 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 4260 InterpreterRuntime::throw_illegal_monitor_state_exception)); 4261 __ should_not_reach_here(); 4262 __ bind(has_identity); 4263 4264 const Address monitor_block_top( 4265 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 4266 const Address monitor_block_bot( 4267 rfp, frame::interpreter_frame_initial_sp_offset * wordSize); 4268 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 4269 4270 Label found; 4271 4272 // find matching slot 4273 { 4274 Label entry, loop; 4275 __ ldr(c_rarg1, monitor_block_top); // derelativize pointer 4276 __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize))); 4277 // c_rarg1 points to current entry, starting with top-most entry 4278 4279 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom 4280 // of monitor block 4281 __ b(entry); 4282 4283 __ bind(loop); 4284 // check if current entry is for same object 4285 __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset())); 4286 __ cmp(r0, rscratch1); 4287 // if same object then stop searching 4288 __ br(Assembler::EQ, found); 4289 // otherwise advance to next entry 4290 __ add(c_rarg1, c_rarg1, entry_size); 4291 __ bind(entry); 4292 // check if bottom reached 4293 __ cmp(c_rarg1, c_rarg2); 4294 // if not at bottom then check this entry 4295 __ br(Assembler::NE, loop); 4296 } 4297 4298 // error handling. Unlocking was not block-structured 4299 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 4300 InterpreterRuntime::throw_illegal_monitor_state_exception)); 4301 __ should_not_reach_here(); 4302 4303 // call run-time routine 4304 __ bind(found); 4305 __ push_ptr(r0); // make sure object is on stack (contract with oopMaps) 4306 __ unlock_object(c_rarg1); 4307 __ pop_ptr(r0); // discard object 4308 } 4309 4310 4311 // Wide instructions 4312 void TemplateTable::wide() 4313 { 4314 __ load_unsigned_byte(r19, at_bcp(1)); 4315 __ mov(rscratch1, (address)Interpreter::_wentry_point); 4316 __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3))); 4317 __ br(rscratch1); 4318 } 4319 4320 4321 // Multi arrays 4322 void TemplateTable::multianewarray() { 4323 transition(vtos, atos); 4324 __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions 4325 // last dim is on top of stack; we want address of first one: 4326 // first_addr = last_addr + (ndims - 1) * wordSize 4327 __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3))); 4328 __ sub(c_rarg1, c_rarg1, wordSize); 4329 call_VM(r0, 4330 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), 4331 c_rarg1); 4332 __ load_unsigned_byte(r1, at_bcp(3)); 4333 __ lea(esp, Address(esp, r1, Address::uxtw(3))); 4334 }