1 /* 2 * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "compiler/compilerDefinitions.inline.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "gc/shared/tlab_globals.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/interpreterRuntime.hpp" 35 #include "interpreter/interp_masm.hpp" 36 #include "interpreter/templateTable.hpp" 37 #include "memory/universe.hpp" 38 #include "oops/methodData.hpp" 39 #include "oops/method.inline.hpp" 40 #include "oops/objArrayKlass.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/resolvedFieldEntry.hpp" 43 #include "oops/resolvedIndyEntry.hpp" 44 #include "oops/resolvedMethodEntry.hpp" 45 #include "prims/jvmtiExport.hpp" 46 #include "prims/methodHandles.hpp" 47 #include "runtime/frame.inline.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubRoutines.hpp" 50 #include "runtime/synchronizer.hpp" 51 #include "utilities/powerOfTwo.hpp" 52 53 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 54 55 // Address computation: local variables 56 57 static inline Address iaddress(int n) { 58 return Address(rlocals, Interpreter::local_offset_in_bytes(n)); 59 } 60 61 static inline Address laddress(int n) { 62 return iaddress(n + 1); 63 } 64 65 static inline Address faddress(int n) { 66 return iaddress(n); 67 } 68 69 static inline Address daddress(int n) { 70 return laddress(n); 71 } 72 73 static inline Address aaddress(int n) { 74 return iaddress(n); 75 } 76 77 static inline Address iaddress(Register r) { 78 return Address(rlocals, r, Address::lsl(3)); 79 } 80 81 static inline Address laddress(Register r, Register scratch, 82 InterpreterMacroAssembler* _masm) { 83 __ lea(scratch, Address(rlocals, r, Address::lsl(3))); 84 return Address(scratch, Interpreter::local_offset_in_bytes(1)); 85 } 86 87 static inline Address faddress(Register r) { 88 return iaddress(r); 89 } 90 91 static inline Address daddress(Register r, Register scratch, 92 InterpreterMacroAssembler* _masm) { 93 return laddress(r, scratch, _masm); 94 } 95 96 static inline Address aaddress(Register r) { 97 return iaddress(r); 98 } 99 100 static inline Address at_rsp() { 101 return Address(esp, 0); 102 } 103 104 // At top of Java expression stack which may be different than esp(). It 105 // isn't for category 1 objects. 106 static inline Address at_tos () { 107 return Address(esp, Interpreter::expr_offset_in_bytes(0)); 108 } 109 110 static inline Address at_tos_p1() { 111 return Address(esp, Interpreter::expr_offset_in_bytes(1)); 112 } 113 114 static inline Address at_tos_p2() { 115 return Address(esp, Interpreter::expr_offset_in_bytes(2)); 116 } 117 118 static inline Address at_tos_p3() { 119 return Address(esp, Interpreter::expr_offset_in_bytes(3)); 120 } 121 122 static inline Address at_tos_p4() { 123 return Address(esp, Interpreter::expr_offset_in_bytes(4)); 124 } 125 126 static inline Address at_tos_p5() { 127 return Address(esp, Interpreter::expr_offset_in_bytes(5)); 128 } 129 130 // Condition conversion 131 static Assembler::Condition j_not(TemplateTable::Condition cc) { 132 switch (cc) { 133 case TemplateTable::equal : return Assembler::NE; 134 case TemplateTable::not_equal : return Assembler::EQ; 135 case TemplateTable::less : return Assembler::GE; 136 case TemplateTable::less_equal : return Assembler::GT; 137 case TemplateTable::greater : return Assembler::LE; 138 case TemplateTable::greater_equal: return Assembler::LT; 139 } 140 ShouldNotReachHere(); 141 return Assembler::EQ; 142 } 143 144 145 // Miscellaneous helper routines 146 // Store an oop (or null) at the Address described by obj. 147 // If val == noreg this means store a null 148 static void do_oop_store(InterpreterMacroAssembler* _masm, 149 Address dst, 150 Register val, 151 DecoratorSet decorators) { 152 assert(val == noreg || val == r0, "parameter is just for looks"); 153 __ store_heap_oop(dst, val, r10, r11, r3, decorators); 154 } 155 156 static void do_oop_load(InterpreterMacroAssembler* _masm, 157 Address src, 158 Register dst, 159 DecoratorSet decorators) { 160 __ load_heap_oop(dst, src, r10, r11, decorators); 161 } 162 163 Address TemplateTable::at_bcp(int offset) { 164 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 165 return Address(rbcp, offset); 166 } 167 168 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 169 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 170 int byte_no) 171 { 172 if (!RewriteBytecodes) return; 173 Label L_patch_done; 174 175 switch (bc) { 176 case Bytecodes::_fast_vputfield: 177 case Bytecodes::_fast_aputfield: 178 case Bytecodes::_fast_bputfield: 179 case Bytecodes::_fast_zputfield: 180 case Bytecodes::_fast_cputfield: 181 case Bytecodes::_fast_dputfield: 182 case Bytecodes::_fast_fputfield: 183 case Bytecodes::_fast_iputfield: 184 case Bytecodes::_fast_lputfield: 185 case Bytecodes::_fast_sputfield: 186 { 187 // We skip bytecode quickening for putfield instructions when 188 // the put_code written to the constant pool cache is zero. 189 // This is required so that every execution of this instruction 190 // calls out to InterpreterRuntime::resolve_get_put to do 191 // additional, required work. 192 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 193 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 194 __ load_field_entry(temp_reg, bc_reg); 195 if (byte_no == f1_byte) { 196 __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset()))); 197 } else { 198 __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset()))); 199 } 200 // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in() 201 __ ldarb(temp_reg, temp_reg); 202 __ movw(bc_reg, bc); 203 __ cbzw(temp_reg, L_patch_done); // don't patch 204 } 205 break; 206 default: 207 assert(byte_no == -1, "sanity"); 208 // the pair bytecodes have already done the load. 209 if (load_bc_into_bc_reg) { 210 __ movw(bc_reg, bc); 211 } 212 } 213 214 if (JvmtiExport::can_post_breakpoint()) { 215 Label L_fast_patch; 216 // if a breakpoint is present we can't rewrite the stream directly 217 __ load_unsigned_byte(temp_reg, at_bcp(0)); 218 __ cmpw(temp_reg, Bytecodes::_breakpoint); 219 __ br(Assembler::NE, L_fast_patch); 220 // Let breakpoint table handling rewrite to quicker bytecode 221 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg); 222 __ b(L_patch_done); 223 __ bind(L_fast_patch); 224 } 225 226 #ifdef ASSERT 227 Label L_okay; 228 __ load_unsigned_byte(temp_reg, at_bcp(0)); 229 __ cmpw(temp_reg, (int) Bytecodes::java_code(bc)); 230 __ br(Assembler::EQ, L_okay); 231 __ cmpw(temp_reg, bc_reg); 232 __ br(Assembler::EQ, L_okay); 233 __ stop("patching the wrong bytecode"); 234 __ bind(L_okay); 235 #endif 236 237 // patch bytecode 238 __ strb(bc_reg, at_bcp(0)); 239 __ bind(L_patch_done); 240 } 241 242 243 // Individual instructions 244 245 void TemplateTable::nop() { 246 transition(vtos, vtos); 247 // nothing to do 248 } 249 250 void TemplateTable::shouldnotreachhere() { 251 transition(vtos, vtos); 252 __ stop("shouldnotreachhere bytecode"); 253 } 254 255 void TemplateTable::aconst_null() 256 { 257 transition(vtos, atos); 258 __ mov(r0, 0); 259 } 260 261 void TemplateTable::iconst(int value) 262 { 263 transition(vtos, itos); 264 __ mov(r0, value); 265 } 266 267 void TemplateTable::lconst(int value) 268 { 269 __ mov(r0, value); 270 } 271 272 void TemplateTable::fconst(int value) 273 { 274 transition(vtos, ftos); 275 switch (value) { 276 case 0: 277 __ fmovs(v0, 0.0); 278 break; 279 case 1: 280 __ fmovs(v0, 1.0); 281 break; 282 case 2: 283 __ fmovs(v0, 2.0); 284 break; 285 default: 286 ShouldNotReachHere(); 287 break; 288 } 289 } 290 291 void TemplateTable::dconst(int value) 292 { 293 transition(vtos, dtos); 294 switch (value) { 295 case 0: 296 __ fmovd(v0, 0.0); 297 break; 298 case 1: 299 __ fmovd(v0, 1.0); 300 break; 301 case 2: 302 __ fmovd(v0, 2.0); 303 break; 304 default: 305 ShouldNotReachHere(); 306 break; 307 } 308 } 309 310 void TemplateTable::bipush() 311 { 312 transition(vtos, itos); 313 __ load_signed_byte32(r0, at_bcp(1)); 314 } 315 316 void TemplateTable::sipush() 317 { 318 transition(vtos, itos); 319 __ load_unsigned_short(r0, at_bcp(1)); 320 __ revw(r0, r0); 321 __ asrw(r0, r0, 16); 322 } 323 324 void TemplateTable::ldc(LdcType type) 325 { 326 transition(vtos, vtos); 327 Label call_ldc, notFloat, notClass, notInt, Done; 328 329 if (is_ldc_wide(type)) { 330 __ get_unsigned_2_byte_index_at_bcp(r1, 1); 331 } else { 332 __ load_unsigned_byte(r1, at_bcp(1)); 333 } 334 __ get_cpool_and_tags(r2, r0); 335 336 const int base_offset = ConstantPool::header_size() * wordSize; 337 const int tags_offset = Array<u1>::base_offset_in_bytes(); 338 339 // get type 340 __ add(r3, r1, tags_offset); 341 __ lea(r3, Address(r0, r3)); 342 __ ldarb(r3, r3); 343 344 // unresolved class - get the resolved class 345 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass); 346 __ br(Assembler::EQ, call_ldc); 347 348 // unresolved class in error state - call into runtime to throw the error 349 // from the first resolution attempt 350 __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError); 351 __ br(Assembler::EQ, call_ldc); 352 353 // resolved class - need to call vm to get java mirror of the class 354 __ cmp(r3, (u1)JVM_CONSTANT_Class); 355 __ br(Assembler::NE, notClass); 356 357 __ bind(call_ldc); 358 __ mov(c_rarg1, is_ldc_wide(type) ? 1 : 0); 359 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); 360 __ push_ptr(r0); 361 __ verify_oop(r0); 362 __ b(Done); 363 364 __ bind(notClass); 365 __ cmp(r3, (u1)JVM_CONSTANT_Float); 366 __ br(Assembler::NE, notFloat); 367 // ftos 368 __ adds(r1, r2, r1, Assembler::LSL, 3); 369 __ ldrs(v0, Address(r1, base_offset)); 370 __ push_f(); 371 __ b(Done); 372 373 __ bind(notFloat); 374 375 __ cmp(r3, (u1)JVM_CONSTANT_Integer); 376 __ br(Assembler::NE, notInt); 377 378 // itos 379 __ adds(r1, r2, r1, Assembler::LSL, 3); 380 __ ldrw(r0, Address(r1, base_offset)); 381 __ push_i(r0); 382 __ b(Done); 383 384 __ bind(notInt); 385 condy_helper(Done); 386 387 __ bind(Done); 388 } 389 390 // Fast path for caching oop constants. 391 void TemplateTable::fast_aldc(LdcType type) 392 { 393 transition(vtos, atos); 394 395 Register result = r0; 396 Register tmp = r1; 397 Register rarg = r2; 398 399 int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1); 400 401 Label resolved; 402 403 // We are resolved if the resolved reference cache entry contains a 404 // non-null object (String, MethodType, etc.) 405 assert_different_registers(result, tmp); 406 __ get_cache_index_at_bcp(tmp, 1, index_size); 407 __ load_resolved_reference_at_index(result, tmp); 408 __ cbnz(result, resolved); 409 410 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 411 412 // first time invocation - must resolve first 413 __ mov(rarg, (int)bytecode()); 414 __ call_VM(result, entry, rarg); 415 416 __ bind(resolved); 417 418 { // Check for the null sentinel. 419 // If we just called the VM, it already did the mapping for us, 420 // but it's harmless to retry. 421 Label notNull; 422 423 // Stash null_sentinel address to get its value later 424 __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr()); 425 __ ldr(tmp, Address(rarg)); 426 __ resolve_oop_handle(tmp, r5, rscratch2); 427 __ cmpoop(result, tmp); 428 __ br(Assembler::NE, notNull); 429 __ mov(result, 0); // null object reference 430 __ bind(notNull); 431 } 432 433 if (VerifyOops) { 434 // Safe to call with 0 result 435 __ verify_oop(result); 436 } 437 } 438 439 void TemplateTable::ldc2_w() 440 { 441 transition(vtos, vtos); 442 Label notDouble, notLong, Done; 443 __ get_unsigned_2_byte_index_at_bcp(r0, 1); 444 445 __ get_cpool_and_tags(r1, r2); 446 const int base_offset = ConstantPool::header_size() * wordSize; 447 const int tags_offset = Array<u1>::base_offset_in_bytes(); 448 449 // get type 450 __ lea(r2, Address(r2, r0, Address::lsl(0))); 451 __ load_unsigned_byte(r2, Address(r2, tags_offset)); 452 __ cmpw(r2, (int)JVM_CONSTANT_Double); 453 __ br(Assembler::NE, notDouble); 454 455 // dtos 456 __ lea (r2, Address(r1, r0, Address::lsl(3))); 457 __ ldrd(v0, Address(r2, base_offset)); 458 __ push_d(); 459 __ b(Done); 460 461 __ bind(notDouble); 462 __ cmpw(r2, (int)JVM_CONSTANT_Long); 463 __ br(Assembler::NE, notLong); 464 465 // ltos 466 __ lea(r0, Address(r1, r0, Address::lsl(3))); 467 __ ldr(r0, Address(r0, base_offset)); 468 __ push_l(); 469 __ b(Done); 470 471 __ bind(notLong); 472 condy_helper(Done); 473 474 __ bind(Done); 475 } 476 477 void TemplateTable::condy_helper(Label& Done) 478 { 479 Register obj = r0; 480 Register rarg = r1; 481 Register flags = r2; 482 Register off = r3; 483 484 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 485 486 __ mov(rarg, (int) bytecode()); 487 __ call_VM(obj, entry, rarg); 488 489 __ get_vm_result_2(flags, rthread); 490 491 // VMr = obj = base address to find primitive value to push 492 // VMr2 = flags = (tos, off) using format of CPCE::_flags 493 __ mov(off, flags); 494 __ andw(off, off, ConstantPoolCache::field_index_mask); 495 496 const Address field(obj, off); 497 498 // What sort of thing are we loading? 499 // x86 uses a shift and mask or wings it with a shift plus assert 500 // the mask is not needed. aarch64 just uses bitfield extract 501 __ ubfxw(flags, flags, ConstantPoolCache::tos_state_shift, 502 ConstantPoolCache::tos_state_bits); 503 504 switch (bytecode()) { 505 case Bytecodes::_ldc: 506 case Bytecodes::_ldc_w: 507 { 508 // tos in (itos, ftos, stos, btos, ctos, ztos) 509 Label notInt, notFloat, notShort, notByte, notChar, notBool; 510 __ cmpw(flags, itos); 511 __ br(Assembler::NE, notInt); 512 // itos 513 __ ldrw(r0, field); 514 __ push(itos); 515 __ b(Done); 516 517 __ bind(notInt); 518 __ cmpw(flags, ftos); 519 __ br(Assembler::NE, notFloat); 520 // ftos 521 __ load_float(field); 522 __ push(ftos); 523 __ b(Done); 524 525 __ bind(notFloat); 526 __ cmpw(flags, stos); 527 __ br(Assembler::NE, notShort); 528 // stos 529 __ load_signed_short(r0, field); 530 __ push(stos); 531 __ b(Done); 532 533 __ bind(notShort); 534 __ cmpw(flags, btos); 535 __ br(Assembler::NE, notByte); 536 // btos 537 __ load_signed_byte(r0, field); 538 __ push(btos); 539 __ b(Done); 540 541 __ bind(notByte); 542 __ cmpw(flags, ctos); 543 __ br(Assembler::NE, notChar); 544 // ctos 545 __ load_unsigned_short(r0, field); 546 __ push(ctos); 547 __ b(Done); 548 549 __ bind(notChar); 550 __ cmpw(flags, ztos); 551 __ br(Assembler::NE, notBool); 552 // ztos 553 __ load_signed_byte(r0, field); 554 __ push(ztos); 555 __ b(Done); 556 557 __ bind(notBool); 558 break; 559 } 560 561 case Bytecodes::_ldc2_w: 562 { 563 Label notLong, notDouble; 564 __ cmpw(flags, ltos); 565 __ br(Assembler::NE, notLong); 566 // ltos 567 __ ldr(r0, field); 568 __ push(ltos); 569 __ b(Done); 570 571 __ bind(notLong); 572 __ cmpw(flags, dtos); 573 __ br(Assembler::NE, notDouble); 574 // dtos 575 __ load_double(field); 576 __ push(dtos); 577 __ b(Done); 578 579 __ bind(notDouble); 580 break; 581 } 582 583 default: 584 ShouldNotReachHere(); 585 } 586 587 __ stop("bad ldc/condy"); 588 } 589 590 void TemplateTable::locals_index(Register reg, int offset) 591 { 592 __ ldrb(reg, at_bcp(offset)); 593 __ neg(reg, reg); 594 } 595 596 void TemplateTable::iload() { 597 iload_internal(); 598 } 599 600 void TemplateTable::nofast_iload() { 601 iload_internal(may_not_rewrite); 602 } 603 604 void TemplateTable::iload_internal(RewriteControl rc) { 605 transition(vtos, itos); 606 if (RewriteFrequentPairs && rc == may_rewrite) { 607 Label rewrite, done; 608 Register bc = r4; 609 610 // get next bytecode 611 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload))); 612 613 // if _iload, wait to rewrite to iload2. We only want to rewrite the 614 // last two iloads in a pair. Comparing against fast_iload means that 615 // the next bytecode is neither an iload or a caload, and therefore 616 // an iload pair. 617 __ cmpw(r1, Bytecodes::_iload); 618 __ br(Assembler::EQ, done); 619 620 // if _fast_iload rewrite to _fast_iload2 621 __ cmpw(r1, Bytecodes::_fast_iload); 622 __ movw(bc, Bytecodes::_fast_iload2); 623 __ br(Assembler::EQ, rewrite); 624 625 // if _caload rewrite to _fast_icaload 626 __ cmpw(r1, Bytecodes::_caload); 627 __ movw(bc, Bytecodes::_fast_icaload); 628 __ br(Assembler::EQ, rewrite); 629 630 // else rewrite to _fast_iload 631 __ movw(bc, Bytecodes::_fast_iload); 632 633 // rewrite 634 // bc: new bytecode 635 __ bind(rewrite); 636 patch_bytecode(Bytecodes::_iload, bc, r1, false); 637 __ bind(done); 638 639 } 640 641 // do iload, get the local value into tos 642 locals_index(r1); 643 __ ldr(r0, iaddress(r1)); 644 645 } 646 647 void TemplateTable::fast_iload2() 648 { 649 transition(vtos, itos); 650 locals_index(r1); 651 __ ldr(r0, iaddress(r1)); 652 __ push(itos); 653 locals_index(r1, 3); 654 __ ldr(r0, iaddress(r1)); 655 } 656 657 void TemplateTable::fast_iload() 658 { 659 transition(vtos, itos); 660 locals_index(r1); 661 __ ldr(r0, iaddress(r1)); 662 } 663 664 void TemplateTable::lload() 665 { 666 transition(vtos, ltos); 667 __ ldrb(r1, at_bcp(1)); 668 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord); 669 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1))); 670 } 671 672 void TemplateTable::fload() 673 { 674 transition(vtos, ftos); 675 locals_index(r1); 676 // n.b. we use ldrd here because this is a 64 bit slot 677 // this is comparable to the iload case 678 __ ldrd(v0, faddress(r1)); 679 } 680 681 void TemplateTable::dload() 682 { 683 transition(vtos, dtos); 684 __ ldrb(r1, at_bcp(1)); 685 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord); 686 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1))); 687 } 688 689 void TemplateTable::aload() 690 { 691 transition(vtos, atos); 692 locals_index(r1); 693 __ ldr(r0, iaddress(r1)); 694 } 695 696 void TemplateTable::locals_index_wide(Register reg) { 697 __ ldrh(reg, at_bcp(2)); 698 __ rev16w(reg, reg); 699 __ neg(reg, reg); 700 } 701 702 void TemplateTable::wide_iload() { 703 transition(vtos, itos); 704 locals_index_wide(r1); 705 __ ldr(r0, iaddress(r1)); 706 } 707 708 void TemplateTable::wide_lload() 709 { 710 transition(vtos, ltos); 711 __ ldrh(r1, at_bcp(2)); 712 __ rev16w(r1, r1); 713 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord); 714 __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1))); 715 } 716 717 void TemplateTable::wide_fload() 718 { 719 transition(vtos, ftos); 720 locals_index_wide(r1); 721 // n.b. we use ldrd here because this is a 64 bit slot 722 // this is comparable to the iload case 723 __ ldrd(v0, faddress(r1)); 724 } 725 726 void TemplateTable::wide_dload() 727 { 728 transition(vtos, dtos); 729 __ ldrh(r1, at_bcp(2)); 730 __ rev16w(r1, r1); 731 __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord); 732 __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1))); 733 } 734 735 void TemplateTable::wide_aload() 736 { 737 transition(vtos, atos); 738 locals_index_wide(r1); 739 __ ldr(r0, aaddress(r1)); 740 } 741 742 void TemplateTable::index_check(Register array, Register index) 743 { 744 // destroys r1, rscratch1 745 // sign extend index for use by indexed load 746 // __ movl2ptr(index, index); 747 // check index 748 Register length = rscratch1; 749 __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes())); 750 __ cmpw(index, length); 751 if (index != r1) { 752 // ??? convention: move aberrant index into r1 for exception message 753 assert(r1 != array, "different registers"); 754 __ mov(r1, index); 755 } 756 Label ok; 757 __ br(Assembler::LO, ok); 758 // ??? convention: move array into r3 for exception message 759 __ mov(r3, array); 760 __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); 761 __ br(rscratch1); 762 __ bind(ok); 763 } 764 765 void TemplateTable::iaload() 766 { 767 transition(itos, itos); 768 __ mov(r1, r0); 769 __ pop_ptr(r0); 770 // r0: array 771 // r1: index 772 index_check(r0, r1); // leaves index in r1, kills rscratch1 773 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2); 774 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg); 775 } 776 777 void TemplateTable::laload() 778 { 779 transition(itos, ltos); 780 __ mov(r1, r0); 781 __ pop_ptr(r0); 782 // r0: array 783 // r1: index 784 index_check(r0, r1); // leaves index in r1, kills rscratch1 785 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3); 786 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg); 787 } 788 789 void TemplateTable::faload() 790 { 791 transition(itos, ftos); 792 __ mov(r1, r0); 793 __ pop_ptr(r0); 794 // r0: array 795 // r1: index 796 index_check(r0, r1); // leaves index in r1, kills rscratch1 797 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2); 798 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg); 799 } 800 801 void TemplateTable::daload() 802 { 803 transition(itos, dtos); 804 __ mov(r1, r0); 805 __ pop_ptr(r0); 806 // r0: array 807 // r1: index 808 index_check(r0, r1); // leaves index in r1, kills rscratch1 809 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3); 810 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg); 811 } 812 813 void TemplateTable::aaload() 814 { 815 transition(itos, atos); 816 __ mov(r1, r0); 817 __ pop_ptr(r0); 818 // r0: array 819 // r1: index 820 index_check(r0, r1); // leaves index in r1, kills rscratch1 821 __ profile_array_type<ArrayLoadData>(r2, r0, r4); 822 if (UseFlatArray) { 823 Label is_flat_array, done; 824 825 __ test_flat_array_oop(r0, r8 /*temp*/, is_flat_array); 826 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); 827 do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY); 828 829 __ b(done); 830 __ bind(is_flat_array); 831 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_load), r0, r1); 832 // Ensure the stores to copy the inline field contents are visible 833 // before any subsequent store that publishes this reference. 834 __ membar(Assembler::StoreStore); 835 __ bind(done); 836 } else { 837 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); 838 do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY); 839 } 840 __ profile_element_type(r2, r0, r4); 841 } 842 843 void TemplateTable::baload() 844 { 845 transition(itos, itos); 846 __ mov(r1, r0); 847 __ pop_ptr(r0); 848 // r0: array 849 // r1: index 850 index_check(r0, r1); // leaves index in r1, kills rscratch1 851 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0); 852 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg); 853 } 854 855 void TemplateTable::caload() 856 { 857 transition(itos, itos); 858 __ mov(r1, r0); 859 __ pop_ptr(r0); 860 // r0: array 861 // r1: index 862 index_check(r0, r1); // leaves index in r1, kills rscratch1 863 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1); 864 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg); 865 } 866 867 // iload followed by caload frequent pair 868 void TemplateTable::fast_icaload() 869 { 870 transition(vtos, itos); 871 // load index out of locals 872 locals_index(r2); 873 __ ldr(r1, iaddress(r2)); 874 875 __ pop_ptr(r0); 876 877 // r0: array 878 // r1: index 879 index_check(r0, r1); // leaves index in r1, kills rscratch1 880 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1); 881 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg); 882 } 883 884 void TemplateTable::saload() 885 { 886 transition(itos, itos); 887 __ mov(r1, r0); 888 __ pop_ptr(r0); 889 // r0: array 890 // r1: index 891 index_check(r0, r1); // leaves index in r1, kills rscratch1 892 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1); 893 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg); 894 } 895 896 void TemplateTable::iload(int n) 897 { 898 transition(vtos, itos); 899 __ ldr(r0, iaddress(n)); 900 } 901 902 void TemplateTable::lload(int n) 903 { 904 transition(vtos, ltos); 905 __ ldr(r0, laddress(n)); 906 } 907 908 void TemplateTable::fload(int n) 909 { 910 transition(vtos, ftos); 911 __ ldrs(v0, faddress(n)); 912 } 913 914 void TemplateTable::dload(int n) 915 { 916 transition(vtos, dtos); 917 __ ldrd(v0, daddress(n)); 918 } 919 920 void TemplateTable::aload(int n) 921 { 922 transition(vtos, atos); 923 __ ldr(r0, iaddress(n)); 924 } 925 926 void TemplateTable::aload_0() { 927 aload_0_internal(); 928 } 929 930 void TemplateTable::nofast_aload_0() { 931 aload_0_internal(may_not_rewrite); 932 } 933 934 void TemplateTable::aload_0_internal(RewriteControl rc) { 935 // According to bytecode histograms, the pairs: 936 // 937 // _aload_0, _fast_igetfield 938 // _aload_0, _fast_agetfield 939 // _aload_0, _fast_fgetfield 940 // 941 // occur frequently. If RewriteFrequentPairs is set, the (slow) 942 // _aload_0 bytecode checks if the next bytecode is either 943 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 944 // rewrites the current bytecode into a pair bytecode; otherwise it 945 // rewrites the current bytecode into _fast_aload_0 that doesn't do 946 // the pair check anymore. 947 // 948 // Note: If the next bytecode is _getfield, the rewrite must be 949 // delayed, otherwise we may miss an opportunity for a pair. 950 // 951 // Also rewrite frequent pairs 952 // aload_0, aload_1 953 // aload_0, iload_1 954 // These bytecodes with a small amount of code are most profitable 955 // to rewrite 956 if (RewriteFrequentPairs && rc == may_rewrite) { 957 Label rewrite, done; 958 const Register bc = r4; 959 960 // get next bytecode 961 __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); 962 963 // if _getfield then wait with rewrite 964 __ cmpw(r1, Bytecodes::Bytecodes::_getfield); 965 __ br(Assembler::EQ, done); 966 967 // if _igetfield then rewrite to _fast_iaccess_0 968 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 969 __ cmpw(r1, Bytecodes::_fast_igetfield); 970 __ movw(bc, Bytecodes::_fast_iaccess_0); 971 __ br(Assembler::EQ, rewrite); 972 973 // if _agetfield then rewrite to _fast_aaccess_0 974 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 975 __ cmpw(r1, Bytecodes::_fast_agetfield); 976 __ movw(bc, Bytecodes::_fast_aaccess_0); 977 __ br(Assembler::EQ, rewrite); 978 979 // if _fgetfield then rewrite to _fast_faccess_0 980 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); 981 __ cmpw(r1, Bytecodes::_fast_fgetfield); 982 __ movw(bc, Bytecodes::_fast_faccess_0); 983 __ br(Assembler::EQ, rewrite); 984 985 // else rewrite to _fast_aload0 986 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition"); 987 __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0); 988 989 // rewrite 990 // bc: new bytecode 991 __ bind(rewrite); 992 patch_bytecode(Bytecodes::_aload_0, bc, r1, false); 993 994 __ bind(done); 995 } 996 997 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 998 aload(0); 999 } 1000 1001 void TemplateTable::istore() 1002 { 1003 transition(itos, vtos); 1004 locals_index(r1); 1005 // FIXME: We're being very pernickerty here storing a jint in a 1006 // local with strw, which costs an extra instruction over what we'd 1007 // be able to do with a simple str. We should just store the whole 1008 // word. 1009 __ lea(rscratch1, iaddress(r1)); 1010 __ strw(r0, Address(rscratch1)); 1011 } 1012 1013 void TemplateTable::lstore() 1014 { 1015 transition(ltos, vtos); 1016 locals_index(r1); 1017 __ str(r0, laddress(r1, rscratch1, _masm)); 1018 } 1019 1020 void TemplateTable::fstore() { 1021 transition(ftos, vtos); 1022 locals_index(r1); 1023 __ lea(rscratch1, iaddress(r1)); 1024 __ strs(v0, Address(rscratch1)); 1025 } 1026 1027 void TemplateTable::dstore() { 1028 transition(dtos, vtos); 1029 locals_index(r1); 1030 __ strd(v0, daddress(r1, rscratch1, _masm)); 1031 } 1032 1033 void TemplateTable::astore() 1034 { 1035 transition(vtos, vtos); 1036 __ pop_ptr(r0); 1037 locals_index(r1); 1038 __ str(r0, aaddress(r1)); 1039 } 1040 1041 void TemplateTable::wide_istore() { 1042 transition(vtos, vtos); 1043 __ pop_i(); 1044 locals_index_wide(r1); 1045 __ lea(rscratch1, iaddress(r1)); 1046 __ strw(r0, Address(rscratch1)); 1047 } 1048 1049 void TemplateTable::wide_lstore() { 1050 transition(vtos, vtos); 1051 __ pop_l(); 1052 locals_index_wide(r1); 1053 __ str(r0, laddress(r1, rscratch1, _masm)); 1054 } 1055 1056 void TemplateTable::wide_fstore() { 1057 transition(vtos, vtos); 1058 __ pop_f(); 1059 locals_index_wide(r1); 1060 __ lea(rscratch1, faddress(r1)); 1061 __ strs(v0, rscratch1); 1062 } 1063 1064 void TemplateTable::wide_dstore() { 1065 transition(vtos, vtos); 1066 __ pop_d(); 1067 locals_index_wide(r1); 1068 __ strd(v0, daddress(r1, rscratch1, _masm)); 1069 } 1070 1071 void TemplateTable::wide_astore() { 1072 transition(vtos, vtos); 1073 __ pop_ptr(r0); 1074 locals_index_wide(r1); 1075 __ str(r0, aaddress(r1)); 1076 } 1077 1078 void TemplateTable::iastore() { 1079 transition(itos, vtos); 1080 __ pop_i(r1); 1081 __ pop_ptr(r3); 1082 // r0: value 1083 // r1: index 1084 // r3: array 1085 index_check(r3, r1); // prefer index in r1 1086 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2); 1087 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg, noreg); 1088 } 1089 1090 void TemplateTable::lastore() { 1091 transition(ltos, vtos); 1092 __ pop_i(r1); 1093 __ pop_ptr(r3); 1094 // r0: value 1095 // r1: index 1096 // r3: array 1097 index_check(r3, r1); // prefer index in r1 1098 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3); 1099 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg, noreg); 1100 } 1101 1102 void TemplateTable::fastore() { 1103 transition(ftos, vtos); 1104 __ pop_i(r1); 1105 __ pop_ptr(r3); 1106 // v0: value 1107 // r1: index 1108 // r3: array 1109 index_check(r3, r1); // prefer index in r1 1110 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2); 1111 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg, noreg); 1112 } 1113 1114 void TemplateTable::dastore() { 1115 transition(dtos, vtos); 1116 __ pop_i(r1); 1117 __ pop_ptr(r3); 1118 // v0: value 1119 // r1: index 1120 // r3: array 1121 index_check(r3, r1); // prefer index in r1 1122 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3); 1123 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg, noreg); 1124 } 1125 1126 void TemplateTable::aastore() { 1127 Label is_null, is_flat_array, ok_is_subtype, done; 1128 transition(vtos, vtos); 1129 // stack: ..., array, index, value 1130 __ ldr(r0, at_tos()); // value 1131 __ ldr(r2, at_tos_p1()); // index 1132 __ ldr(r3, at_tos_p2()); // array 1133 1134 index_check(r3, r2); // kills r1 1135 1136 __ profile_array_type<ArrayStoreData>(r4, r3, r5); 1137 __ profile_multiple_element_types(r4, r0, r5, r6); 1138 1139 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); 1140 Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop)); 1141 // Be careful not to clobber r4 below 1142 1143 // do array store check - check for null value first 1144 __ cbz(r0, is_null); 1145 1146 // Move array class to r5 1147 __ load_klass(r5, r3); 1148 1149 if (UseFlatArray) { 1150 __ ldrw(r6, Address(r5, Klass::layout_helper_offset())); 1151 __ test_flat_array_layout(r6, is_flat_array); 1152 } 1153 1154 // Move subklass into r1 1155 __ load_klass(r1, r0); 1156 1157 // Move array element superklass into r0 1158 __ ldr(r0, Address(r5, ObjArrayKlass::element_klass_offset())); 1159 // Compress array + index*oopSize + 12 into a single register. Frees r2. 1160 1161 // Generate subtype check. Blows r2, r5 1162 // Superklass in r0. Subklass in r1. 1163 1164 // is "r1 <: r0" ? (value subclass <: array element superclass) 1165 __ gen_subtype_check(r1, ok_is_subtype, false); 1166 1167 // Come here on failure 1168 // object is at TOS 1169 __ b(Interpreter::_throw_ArrayStoreException_entry); 1170 1171 // Come here on success 1172 __ bind(ok_is_subtype); 1173 1174 // Get the value we will store 1175 __ ldr(r0, at_tos()); 1176 // Now store using the appropriate barrier 1177 do_oop_store(_masm, element_address, r0, IS_ARRAY); 1178 __ b(done); 1179 1180 // Have a null in r0, r3=array, r2=index. Store null at ary[idx] 1181 __ bind(is_null); 1182 if (EnableValhalla) { 1183 Label is_null_into_value_array_npe, store_null; 1184 1185 if (UseFlatArray) { 1186 __ test_flat_array_oop(r3, r8, is_flat_array); 1187 } 1188 1189 // No way to store null in a null-free array 1190 __ test_null_free_array_oop(r3, r8, is_null_into_value_array_npe); 1191 __ b(store_null); 1192 1193 __ bind(is_null_into_value_array_npe); 1194 __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry)); 1195 1196 __ bind(store_null); 1197 } 1198 1199 // Store a null 1200 do_oop_store(_masm, element_address, noreg, IS_ARRAY); 1201 __ b(done); 1202 1203 if (UseFlatArray) { 1204 Label is_type_ok; 1205 __ bind(is_flat_array); // Store non-null value to flat 1206 1207 __ ldr(r0, at_tos()); // value 1208 __ ldr(r3, at_tos_p1()); // index 1209 __ ldr(r2, at_tos_p2()); // array 1210 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_store), r0, r2, r3); 1211 } 1212 1213 // Pop stack arguments 1214 __ bind(done); 1215 __ add(esp, esp, 3 * Interpreter::stackElementSize); 1216 } 1217 1218 void TemplateTable::bastore() 1219 { 1220 transition(itos, vtos); 1221 __ pop_i(r1); 1222 __ pop_ptr(r3); 1223 // r0: value 1224 // r1: index 1225 // r3: array 1226 index_check(r3, r1); // prefer index in r1 1227 1228 // Need to check whether array is boolean or byte 1229 // since both types share the bastore bytecode. 1230 __ load_klass(r2, r3); 1231 __ ldrw(r2, Address(r2, Klass::layout_helper_offset())); 1232 int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit()); 1233 Label L_skip; 1234 __ tbz(r2, diffbit_index, L_skip); 1235 __ andw(r0, r0, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 1236 __ bind(L_skip); 1237 1238 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0); 1239 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg, noreg); 1240 } 1241 1242 void TemplateTable::castore() 1243 { 1244 transition(itos, vtos); 1245 __ pop_i(r1); 1246 __ pop_ptr(r3); 1247 // r0: value 1248 // r1: index 1249 // r3: array 1250 index_check(r3, r1); // prefer index in r1 1251 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1); 1252 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg, noreg); 1253 } 1254 1255 void TemplateTable::sastore() 1256 { 1257 castore(); 1258 } 1259 1260 void TemplateTable::istore(int n) 1261 { 1262 transition(itos, vtos); 1263 __ str(r0, iaddress(n)); 1264 } 1265 1266 void TemplateTable::lstore(int n) 1267 { 1268 transition(ltos, vtos); 1269 __ str(r0, laddress(n)); 1270 } 1271 1272 void TemplateTable::fstore(int n) 1273 { 1274 transition(ftos, vtos); 1275 __ strs(v0, faddress(n)); 1276 } 1277 1278 void TemplateTable::dstore(int n) 1279 { 1280 transition(dtos, vtos); 1281 __ strd(v0, daddress(n)); 1282 } 1283 1284 void TemplateTable::astore(int n) 1285 { 1286 transition(vtos, vtos); 1287 __ pop_ptr(r0); 1288 __ str(r0, iaddress(n)); 1289 } 1290 1291 void TemplateTable::pop() 1292 { 1293 transition(vtos, vtos); 1294 __ add(esp, esp, Interpreter::stackElementSize); 1295 } 1296 1297 void TemplateTable::pop2() 1298 { 1299 transition(vtos, vtos); 1300 __ add(esp, esp, 2 * Interpreter::stackElementSize); 1301 } 1302 1303 void TemplateTable::dup() 1304 { 1305 transition(vtos, vtos); 1306 __ ldr(r0, Address(esp, 0)); 1307 __ push(r0); 1308 // stack: ..., a, a 1309 } 1310 1311 void TemplateTable::dup_x1() 1312 { 1313 transition(vtos, vtos); 1314 // stack: ..., a, b 1315 __ ldr(r0, at_tos()); // load b 1316 __ ldr(r2, at_tos_p1()); // load a 1317 __ str(r0, at_tos_p1()); // store b 1318 __ str(r2, at_tos()); // store a 1319 __ push(r0); // push b 1320 // stack: ..., b, a, b 1321 } 1322 1323 void TemplateTable::dup_x2() 1324 { 1325 transition(vtos, vtos); 1326 // stack: ..., a, b, c 1327 __ ldr(r0, at_tos()); // load c 1328 __ ldr(r2, at_tos_p2()); // load a 1329 __ str(r0, at_tos_p2()); // store c in a 1330 __ push(r0); // push c 1331 // stack: ..., c, b, c, c 1332 __ ldr(r0, at_tos_p2()); // load b 1333 __ str(r2, at_tos_p2()); // store a in b 1334 // stack: ..., c, a, c, c 1335 __ str(r0, at_tos_p1()); // store b in c 1336 // stack: ..., c, a, b, c 1337 } 1338 1339 void TemplateTable::dup2() 1340 { 1341 transition(vtos, vtos); 1342 // stack: ..., a, b 1343 __ ldr(r0, at_tos_p1()); // load a 1344 __ push(r0); // push a 1345 __ ldr(r0, at_tos_p1()); // load b 1346 __ push(r0); // push b 1347 // stack: ..., a, b, a, b 1348 } 1349 1350 void TemplateTable::dup2_x1() 1351 { 1352 transition(vtos, vtos); 1353 // stack: ..., a, b, c 1354 __ ldr(r2, at_tos()); // load c 1355 __ ldr(r0, at_tos_p1()); // load b 1356 __ push(r0); // push b 1357 __ push(r2); // push c 1358 // stack: ..., a, b, c, b, c 1359 __ str(r2, at_tos_p3()); // store c in b 1360 // stack: ..., a, c, c, b, c 1361 __ ldr(r2, at_tos_p4()); // load a 1362 __ str(r2, at_tos_p2()); // store a in 2nd c 1363 // stack: ..., a, c, a, b, c 1364 __ str(r0, at_tos_p4()); // store b in a 1365 // stack: ..., b, c, a, b, c 1366 } 1367 1368 void TemplateTable::dup2_x2() 1369 { 1370 transition(vtos, vtos); 1371 // stack: ..., a, b, c, d 1372 __ ldr(r2, at_tos()); // load d 1373 __ ldr(r0, at_tos_p1()); // load c 1374 __ push(r0) ; // push c 1375 __ push(r2); // push d 1376 // stack: ..., a, b, c, d, c, d 1377 __ ldr(r0, at_tos_p4()); // load b 1378 __ str(r0, at_tos_p2()); // store b in d 1379 __ str(r2, at_tos_p4()); // store d in b 1380 // stack: ..., a, d, c, b, c, d 1381 __ ldr(r2, at_tos_p5()); // load a 1382 __ ldr(r0, at_tos_p3()); // load c 1383 __ str(r2, at_tos_p3()); // store a in c 1384 __ str(r0, at_tos_p5()); // store c in a 1385 // stack: ..., c, d, a, b, c, d 1386 } 1387 1388 void TemplateTable::swap() 1389 { 1390 transition(vtos, vtos); 1391 // stack: ..., a, b 1392 __ ldr(r2, at_tos_p1()); // load a 1393 __ ldr(r0, at_tos()); // load b 1394 __ str(r2, at_tos()); // store a in b 1395 __ str(r0, at_tos_p1()); // store b in a 1396 // stack: ..., b, a 1397 } 1398 1399 void TemplateTable::iop2(Operation op) 1400 { 1401 transition(itos, itos); 1402 // r0 <== r1 op r0 1403 __ pop_i(r1); 1404 switch (op) { 1405 case add : __ addw(r0, r1, r0); break; 1406 case sub : __ subw(r0, r1, r0); break; 1407 case mul : __ mulw(r0, r1, r0); break; 1408 case _and : __ andw(r0, r1, r0); break; 1409 case _or : __ orrw(r0, r1, r0); break; 1410 case _xor : __ eorw(r0, r1, r0); break; 1411 case shl : __ lslvw(r0, r1, r0); break; 1412 case shr : __ asrvw(r0, r1, r0); break; 1413 case ushr : __ lsrvw(r0, r1, r0);break; 1414 default : ShouldNotReachHere(); 1415 } 1416 } 1417 1418 void TemplateTable::lop2(Operation op) 1419 { 1420 transition(ltos, ltos); 1421 // r0 <== r1 op r0 1422 __ pop_l(r1); 1423 switch (op) { 1424 case add : __ add(r0, r1, r0); break; 1425 case sub : __ sub(r0, r1, r0); break; 1426 case mul : __ mul(r0, r1, r0); break; 1427 case _and : __ andr(r0, r1, r0); break; 1428 case _or : __ orr(r0, r1, r0); break; 1429 case _xor : __ eor(r0, r1, r0); break; 1430 default : ShouldNotReachHere(); 1431 } 1432 } 1433 1434 void TemplateTable::idiv() 1435 { 1436 transition(itos, itos); 1437 // explicitly check for div0 1438 Label no_div0; 1439 __ cbnzw(r0, no_div0); 1440 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry); 1441 __ br(rscratch1); 1442 __ bind(no_div0); 1443 __ pop_i(r1); 1444 // r0 <== r1 idiv r0 1445 __ corrected_idivl(r0, r1, r0, /* want_remainder */ false); 1446 } 1447 1448 void TemplateTable::irem() 1449 { 1450 transition(itos, itos); 1451 // explicitly check for div0 1452 Label no_div0; 1453 __ cbnzw(r0, no_div0); 1454 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry); 1455 __ br(rscratch1); 1456 __ bind(no_div0); 1457 __ pop_i(r1); 1458 // r0 <== r1 irem r0 1459 __ corrected_idivl(r0, r1, r0, /* want_remainder */ true); 1460 } 1461 1462 void TemplateTable::lmul() 1463 { 1464 transition(ltos, ltos); 1465 __ pop_l(r1); 1466 __ mul(r0, r0, r1); 1467 } 1468 1469 void TemplateTable::ldiv() 1470 { 1471 transition(ltos, ltos); 1472 // explicitly check for div0 1473 Label no_div0; 1474 __ cbnz(r0, no_div0); 1475 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry); 1476 __ br(rscratch1); 1477 __ bind(no_div0); 1478 __ pop_l(r1); 1479 // r0 <== r1 ldiv r0 1480 __ corrected_idivq(r0, r1, r0, /* want_remainder */ false); 1481 } 1482 1483 void TemplateTable::lrem() 1484 { 1485 transition(ltos, ltos); 1486 // explicitly check for div0 1487 Label no_div0; 1488 __ cbnz(r0, no_div0); 1489 __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry); 1490 __ br(rscratch1); 1491 __ bind(no_div0); 1492 __ pop_l(r1); 1493 // r0 <== r1 lrem r0 1494 __ corrected_idivq(r0, r1, r0, /* want_remainder */ true); 1495 } 1496 1497 void TemplateTable::lshl() 1498 { 1499 transition(itos, ltos); 1500 // shift count is in r0 1501 __ pop_l(r1); 1502 __ lslv(r0, r1, r0); 1503 } 1504 1505 void TemplateTable::lshr() 1506 { 1507 transition(itos, ltos); 1508 // shift count is in r0 1509 __ pop_l(r1); 1510 __ asrv(r0, r1, r0); 1511 } 1512 1513 void TemplateTable::lushr() 1514 { 1515 transition(itos, ltos); 1516 // shift count is in r0 1517 __ pop_l(r1); 1518 __ lsrv(r0, r1, r0); 1519 } 1520 1521 void TemplateTable::fop2(Operation op) 1522 { 1523 transition(ftos, ftos); 1524 switch (op) { 1525 case add: 1526 // n.b. use ldrd because this is a 64 bit slot 1527 __ pop_f(v1); 1528 __ fadds(v0, v1, v0); 1529 break; 1530 case sub: 1531 __ pop_f(v1); 1532 __ fsubs(v0, v1, v0); 1533 break; 1534 case mul: 1535 __ pop_f(v1); 1536 __ fmuls(v0, v1, v0); 1537 break; 1538 case div: 1539 __ pop_f(v1); 1540 __ fdivs(v0, v1, v0); 1541 break; 1542 case rem: 1543 __ fmovs(v1, v0); 1544 __ pop_f(v0); 1545 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1546 break; 1547 default: 1548 ShouldNotReachHere(); 1549 break; 1550 } 1551 } 1552 1553 void TemplateTable::dop2(Operation op) 1554 { 1555 transition(dtos, dtos); 1556 switch (op) { 1557 case add: 1558 // n.b. use ldrd because this is a 64 bit slot 1559 __ pop_d(v1); 1560 __ faddd(v0, v1, v0); 1561 break; 1562 case sub: 1563 __ pop_d(v1); 1564 __ fsubd(v0, v1, v0); 1565 break; 1566 case mul: 1567 __ pop_d(v1); 1568 __ fmuld(v0, v1, v0); 1569 break; 1570 case div: 1571 __ pop_d(v1); 1572 __ fdivd(v0, v1, v0); 1573 break; 1574 case rem: 1575 __ fmovd(v1, v0); 1576 __ pop_d(v0); 1577 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1578 break; 1579 default: 1580 ShouldNotReachHere(); 1581 break; 1582 } 1583 } 1584 1585 void TemplateTable::ineg() 1586 { 1587 transition(itos, itos); 1588 __ negw(r0, r0); 1589 1590 } 1591 1592 void TemplateTable::lneg() 1593 { 1594 transition(ltos, ltos); 1595 __ neg(r0, r0); 1596 } 1597 1598 void TemplateTable::fneg() 1599 { 1600 transition(ftos, ftos); 1601 __ fnegs(v0, v0); 1602 } 1603 1604 void TemplateTable::dneg() 1605 { 1606 transition(dtos, dtos); 1607 __ fnegd(v0, v0); 1608 } 1609 1610 void TemplateTable::iinc() 1611 { 1612 transition(vtos, vtos); 1613 __ load_signed_byte(r1, at_bcp(2)); // get constant 1614 locals_index(r2); 1615 __ ldr(r0, iaddress(r2)); 1616 __ addw(r0, r0, r1); 1617 __ str(r0, iaddress(r2)); 1618 } 1619 1620 void TemplateTable::wide_iinc() 1621 { 1622 transition(vtos, vtos); 1623 // __ mov(r1, zr); 1624 __ ldrw(r1, at_bcp(2)); // get constant and index 1625 __ rev16(r1, r1); 1626 __ ubfx(r2, r1, 0, 16); 1627 __ neg(r2, r2); 1628 __ sbfx(r1, r1, 16, 16); 1629 __ ldr(r0, iaddress(r2)); 1630 __ addw(r0, r0, r1); 1631 __ str(r0, iaddress(r2)); 1632 } 1633 1634 void TemplateTable::convert() 1635 { 1636 // Checking 1637 #ifdef ASSERT 1638 { 1639 TosState tos_in = ilgl; 1640 TosState tos_out = ilgl; 1641 switch (bytecode()) { 1642 case Bytecodes::_i2l: // fall through 1643 case Bytecodes::_i2f: // fall through 1644 case Bytecodes::_i2d: // fall through 1645 case Bytecodes::_i2b: // fall through 1646 case Bytecodes::_i2c: // fall through 1647 case Bytecodes::_i2s: tos_in = itos; break; 1648 case Bytecodes::_l2i: // fall through 1649 case Bytecodes::_l2f: // fall through 1650 case Bytecodes::_l2d: tos_in = ltos; break; 1651 case Bytecodes::_f2i: // fall through 1652 case Bytecodes::_f2l: // fall through 1653 case Bytecodes::_f2d: tos_in = ftos; break; 1654 case Bytecodes::_d2i: // fall through 1655 case Bytecodes::_d2l: // fall through 1656 case Bytecodes::_d2f: tos_in = dtos; break; 1657 default : ShouldNotReachHere(); 1658 } 1659 switch (bytecode()) { 1660 case Bytecodes::_l2i: // fall through 1661 case Bytecodes::_f2i: // fall through 1662 case Bytecodes::_d2i: // fall through 1663 case Bytecodes::_i2b: // fall through 1664 case Bytecodes::_i2c: // fall through 1665 case Bytecodes::_i2s: tos_out = itos; break; 1666 case Bytecodes::_i2l: // fall through 1667 case Bytecodes::_f2l: // fall through 1668 case Bytecodes::_d2l: tos_out = ltos; break; 1669 case Bytecodes::_i2f: // fall through 1670 case Bytecodes::_l2f: // fall through 1671 case Bytecodes::_d2f: tos_out = ftos; break; 1672 case Bytecodes::_i2d: // fall through 1673 case Bytecodes::_l2d: // fall through 1674 case Bytecodes::_f2d: tos_out = dtos; break; 1675 default : ShouldNotReachHere(); 1676 } 1677 transition(tos_in, tos_out); 1678 } 1679 #endif // ASSERT 1680 // static const int64_t is_nan = 0x8000000000000000L; 1681 1682 // Conversion 1683 switch (bytecode()) { 1684 case Bytecodes::_i2l: 1685 __ sxtw(r0, r0); 1686 break; 1687 case Bytecodes::_i2f: 1688 __ scvtfws(v0, r0); 1689 break; 1690 case Bytecodes::_i2d: 1691 __ scvtfwd(v0, r0); 1692 break; 1693 case Bytecodes::_i2b: 1694 __ sxtbw(r0, r0); 1695 break; 1696 case Bytecodes::_i2c: 1697 __ uxthw(r0, r0); 1698 break; 1699 case Bytecodes::_i2s: 1700 __ sxthw(r0, r0); 1701 break; 1702 case Bytecodes::_l2i: 1703 __ uxtw(r0, r0); 1704 break; 1705 case Bytecodes::_l2f: 1706 __ scvtfs(v0, r0); 1707 break; 1708 case Bytecodes::_l2d: 1709 __ scvtfd(v0, r0); 1710 break; 1711 case Bytecodes::_f2i: 1712 { 1713 Label L_Okay; 1714 __ clear_fpsr(); 1715 __ fcvtzsw(r0, v0); 1716 __ get_fpsr(r1); 1717 __ cbzw(r1, L_Okay); 1718 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i)); 1719 __ bind(L_Okay); 1720 } 1721 break; 1722 case Bytecodes::_f2l: 1723 { 1724 Label L_Okay; 1725 __ clear_fpsr(); 1726 __ fcvtzs(r0, v0); 1727 __ get_fpsr(r1); 1728 __ cbzw(r1, L_Okay); 1729 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1730 __ bind(L_Okay); 1731 } 1732 break; 1733 case Bytecodes::_f2d: 1734 __ fcvts(v0, v0); 1735 break; 1736 case Bytecodes::_d2i: 1737 { 1738 Label L_Okay; 1739 __ clear_fpsr(); 1740 __ fcvtzdw(r0, v0); 1741 __ get_fpsr(r1); 1742 __ cbzw(r1, L_Okay); 1743 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); 1744 __ bind(L_Okay); 1745 } 1746 break; 1747 case Bytecodes::_d2l: 1748 { 1749 Label L_Okay; 1750 __ clear_fpsr(); 1751 __ fcvtzd(r0, v0); 1752 __ get_fpsr(r1); 1753 __ cbzw(r1, L_Okay); 1754 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1755 __ bind(L_Okay); 1756 } 1757 break; 1758 case Bytecodes::_d2f: 1759 __ fcvtd(v0, v0); 1760 break; 1761 default: 1762 ShouldNotReachHere(); 1763 } 1764 } 1765 1766 void TemplateTable::lcmp() 1767 { 1768 transition(ltos, itos); 1769 Label done; 1770 __ pop_l(r1); 1771 __ cmp(r1, r0); 1772 __ mov(r0, (uint64_t)-1L); 1773 __ br(Assembler::LT, done); 1774 // __ mov(r0, 1UL); 1775 // __ csel(r0, r0, zr, Assembler::NE); 1776 // and here is a faster way 1777 __ csinc(r0, zr, zr, Assembler::EQ); 1778 __ bind(done); 1779 } 1780 1781 void TemplateTable::float_cmp(bool is_float, int unordered_result) 1782 { 1783 Label done; 1784 if (is_float) { 1785 // XXX get rid of pop here, use ... reg, mem32 1786 __ pop_f(v1); 1787 __ fcmps(v1, v0); 1788 } else { 1789 // XXX get rid of pop here, use ... reg, mem64 1790 __ pop_d(v1); 1791 __ fcmpd(v1, v0); 1792 } 1793 if (unordered_result < 0) { 1794 // we want -1 for unordered or less than, 0 for equal and 1 for 1795 // greater than. 1796 __ mov(r0, (uint64_t)-1L); 1797 // for FP LT tests less than or unordered 1798 __ br(Assembler::LT, done); 1799 // install 0 for EQ otherwise 1 1800 __ csinc(r0, zr, zr, Assembler::EQ); 1801 } else { 1802 // we want -1 for less than, 0 for equal and 1 for unordered or 1803 // greater than. 1804 __ mov(r0, 1L); 1805 // for FP HI tests greater than or unordered 1806 __ br(Assembler::HI, done); 1807 // install 0 for EQ otherwise ~0 1808 __ csinv(r0, zr, zr, Assembler::EQ); 1809 1810 } 1811 __ bind(done); 1812 } 1813 1814 void TemplateTable::branch(bool is_jsr, bool is_wide) 1815 { 1816 __ profile_taken_branch(r0, r1); 1817 const ByteSize be_offset = MethodCounters::backedge_counter_offset() + 1818 InvocationCounter::counter_offset(); 1819 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + 1820 InvocationCounter::counter_offset(); 1821 1822 // load branch displacement 1823 if (!is_wide) { 1824 __ ldrh(r2, at_bcp(1)); 1825 __ rev16(r2, r2); 1826 // sign extend the 16 bit value in r2 1827 __ sbfm(r2, r2, 0, 15); 1828 } else { 1829 __ ldrw(r2, at_bcp(1)); 1830 __ revw(r2, r2); 1831 // sign extend the 32 bit value in r2 1832 __ sbfm(r2, r2, 0, 31); 1833 } 1834 1835 // Handle all the JSR stuff here, then exit. 1836 // It's much shorter and cleaner than intermingling with the non-JSR 1837 // normal-branch stuff occurring below. 1838 1839 if (is_jsr) { 1840 // Pre-load the next target bytecode into rscratch1 1841 __ load_unsigned_byte(rscratch1, Address(rbcp, r2)); 1842 // compute return address as bci 1843 __ ldr(rscratch2, Address(rmethod, Method::const_offset())); 1844 __ add(rscratch2, rscratch2, 1845 in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3)); 1846 __ sub(r1, rbcp, rscratch2); 1847 __ push_i(r1); 1848 // Adjust the bcp by the 16-bit displacement in r2 1849 __ add(rbcp, rbcp, r2); 1850 __ dispatch_only(vtos, /*generate_poll*/true); 1851 return; 1852 } 1853 1854 // Normal (non-jsr) branch handling 1855 1856 // Adjust the bcp by the displacement in r2 1857 __ add(rbcp, rbcp, r2); 1858 1859 assert(UseLoopCounter || !UseOnStackReplacement, 1860 "on-stack-replacement requires loop counters"); 1861 Label backedge_counter_overflow; 1862 Label dispatch; 1863 if (UseLoopCounter) { 1864 // increment backedge counter for backward branches 1865 // r0: MDO 1866 // w1: MDO bumped taken-count 1867 // r2: target offset 1868 __ cmp(r2, zr); 1869 __ br(Assembler::GT, dispatch); // count only if backward branch 1870 1871 // ECN: FIXME: This code smells 1872 // check if MethodCounters exists 1873 Label has_counters; 1874 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset())); 1875 __ cbnz(rscratch1, has_counters); 1876 __ push(r0); 1877 __ push(r1); 1878 __ push(r2); 1879 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 1880 InterpreterRuntime::build_method_counters), rmethod); 1881 __ pop(r2); 1882 __ pop(r1); 1883 __ pop(r0); 1884 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset())); 1885 __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory 1886 __ bind(has_counters); 1887 1888 Label no_mdo; 1889 int increment = InvocationCounter::count_increment; 1890 if (ProfileInterpreter) { 1891 // Are we profiling? 1892 __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset()))); 1893 __ cbz(r1, no_mdo); 1894 // Increment the MDO backedge counter 1895 const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) + 1896 in_bytes(InvocationCounter::counter_offset())); 1897 const Address mask(r1, in_bytes(MethodData::backedge_mask_offset())); 1898 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, 1899 r0, rscratch1, false, Assembler::EQ, 1900 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch); 1901 __ b(dispatch); 1902 } 1903 __ bind(no_mdo); 1904 // Increment backedge counter in MethodCounters* 1905 __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset())); 1906 const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset())); 1907 __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask, 1908 r0, rscratch2, false, Assembler::EQ, 1909 UseOnStackReplacement ? &backedge_counter_overflow : &dispatch); 1910 __ bind(dispatch); 1911 } 1912 1913 // Pre-load the next target bytecode into rscratch1 1914 __ load_unsigned_byte(rscratch1, Address(rbcp, 0)); 1915 1916 // continue with the bytecode @ target 1917 // rscratch1: target bytecode 1918 // rbcp: target bcp 1919 __ dispatch_only(vtos, /*generate_poll*/true); 1920 1921 if (UseLoopCounter && UseOnStackReplacement) { 1922 // invocation counter overflow 1923 __ bind(backedge_counter_overflow); 1924 __ neg(r2, r2); 1925 __ add(r2, r2, rbcp); // branch bcp 1926 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) 1927 __ call_VM(noreg, 1928 CAST_FROM_FN_PTR(address, 1929 InterpreterRuntime::frequency_counter_overflow), 1930 r2); 1931 __ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode 1932 1933 // r0: osr nmethod (osr ok) or null (osr not possible) 1934 // w1: target bytecode 1935 // r2: scratch 1936 __ cbz(r0, dispatch); // test result -- no osr if null 1937 // nmethod may have been invalidated (VM may block upon call_VM return) 1938 __ ldrb(r2, Address(r0, nmethod::state_offset())); 1939 if (nmethod::in_use != 0) 1940 __ sub(r2, r2, nmethod::in_use); 1941 __ cbnz(r2, dispatch); 1942 1943 // We have the address of an on stack replacement routine in r0 1944 // We need to prepare to execute the OSR method. First we must 1945 // migrate the locals and monitors off of the stack. 1946 1947 __ mov(r19, r0); // save the nmethod 1948 1949 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); 1950 1951 // r0 is OSR buffer, move it to expected parameter location 1952 __ mov(j_rarg0, r0); 1953 1954 // remove activation 1955 // get sender esp 1956 __ ldr(esp, 1957 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1958 // remove frame anchor 1959 __ leave(); 1960 // Ensure compiled code always sees stack at proper alignment 1961 __ andr(sp, esp, -16); 1962 1963 // and begin the OSR nmethod 1964 __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset())); 1965 __ br(rscratch1); 1966 } 1967 } 1968 1969 1970 void TemplateTable::if_0cmp(Condition cc) 1971 { 1972 transition(itos, vtos); 1973 // assume branch is more often taken than not (loops use backward branches) 1974 Label not_taken; 1975 if (cc == equal) 1976 __ cbnzw(r0, not_taken); 1977 else if (cc == not_equal) 1978 __ cbzw(r0, not_taken); 1979 else { 1980 __ andsw(zr, r0, r0); 1981 __ br(j_not(cc), not_taken); 1982 } 1983 1984 branch(false, false); 1985 __ bind(not_taken); 1986 __ profile_not_taken_branch(r0); 1987 } 1988 1989 void TemplateTable::if_icmp(Condition cc) 1990 { 1991 transition(itos, vtos); 1992 // assume branch is more often taken than not (loops use backward branches) 1993 Label not_taken; 1994 __ pop_i(r1); 1995 __ cmpw(r1, r0, Assembler::LSL); 1996 __ br(j_not(cc), not_taken); 1997 branch(false, false); 1998 __ bind(not_taken); 1999 __ profile_not_taken_branch(r0); 2000 } 2001 2002 void TemplateTable::if_nullcmp(Condition cc) 2003 { 2004 transition(atos, vtos); 2005 // assume branch is more often taken than not (loops use backward branches) 2006 Label not_taken; 2007 if (cc == equal) 2008 __ cbnz(r0, not_taken); 2009 else 2010 __ cbz(r0, not_taken); 2011 branch(false, false); 2012 __ bind(not_taken); 2013 __ profile_not_taken_branch(r0); 2014 } 2015 2016 void TemplateTable::if_acmp(Condition cc) { 2017 transition(atos, vtos); 2018 // assume branch is more often taken than not (loops use backward branches) 2019 Label taken, not_taken; 2020 __ pop_ptr(r1); 2021 2022 __ profile_acmp(r2, r1, r0, r4); 2023 2024 Register is_inline_type_mask = rscratch1; 2025 __ mov(is_inline_type_mask, markWord::inline_type_pattern); 2026 2027 if (EnableValhalla) { 2028 __ cmp(r1, r0); 2029 __ br(Assembler::EQ, (cc == equal) ? taken : not_taken); 2030 2031 // might be substitutable, test if either r0 or r1 is null 2032 __ andr(r2, r0, r1); 2033 __ cbz(r2, (cc == equal) ? not_taken : taken); 2034 2035 // and both are values ? 2036 __ ldr(r2, Address(r1, oopDesc::mark_offset_in_bytes())); 2037 __ andr(r2, r2, is_inline_type_mask); 2038 __ ldr(r4, Address(r0, oopDesc::mark_offset_in_bytes())); 2039 __ andr(r4, r4, is_inline_type_mask); 2040 __ andr(r2, r2, r4); 2041 __ cmp(r2, is_inline_type_mask); 2042 __ br(Assembler::NE, (cc == equal) ? not_taken : taken); 2043 2044 // same value klass ? 2045 __ load_metadata(r2, r1); 2046 __ load_metadata(r4, r0); 2047 __ cmp(r2, r4); 2048 __ br(Assembler::NE, (cc == equal) ? not_taken : taken); 2049 2050 // Know both are the same type, let's test for substitutability... 2051 if (cc == equal) { 2052 invoke_is_substitutable(r0, r1, taken, not_taken); 2053 } else { 2054 invoke_is_substitutable(r0, r1, not_taken, taken); 2055 } 2056 __ stop("Not reachable"); 2057 } 2058 2059 __ cmpoop(r1, r0); 2060 __ br(j_not(cc), not_taken); 2061 __ bind(taken); 2062 branch(false, false); 2063 __ bind(not_taken); 2064 __ profile_not_taken_branch(r0, true); 2065 } 2066 2067 void TemplateTable::invoke_is_substitutable(Register aobj, Register bobj, 2068 Label& is_subst, Label& not_subst) { 2069 2070 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::is_substitutable), aobj, bobj); 2071 // Restored... r0 answer, jmp to outcome... 2072 __ cbz(r0, not_subst); 2073 __ b(is_subst); 2074 } 2075 2076 2077 void TemplateTable::ret() { 2078 transition(vtos, vtos); 2079 locals_index(r1); 2080 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp 2081 __ profile_ret(r1, r2); 2082 __ ldr(rbcp, Address(rmethod, Method::const_offset())); 2083 __ lea(rbcp, Address(rbcp, r1)); 2084 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); 2085 __ dispatch_next(vtos, 0, /*generate_poll*/true); 2086 } 2087 2088 void TemplateTable::wide_ret() { 2089 transition(vtos, vtos); 2090 locals_index_wide(r1); 2091 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp 2092 __ profile_ret(r1, r2); 2093 __ ldr(rbcp, Address(rmethod, Method::const_offset())); 2094 __ lea(rbcp, Address(rbcp, r1)); 2095 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); 2096 __ dispatch_next(vtos, 0, /*generate_poll*/true); 2097 } 2098 2099 2100 void TemplateTable::tableswitch() { 2101 Label default_case, continue_execution; 2102 transition(itos, vtos); 2103 // align rbcp 2104 __ lea(r1, at_bcp(BytesPerInt)); 2105 __ andr(r1, r1, -BytesPerInt); 2106 // load lo & hi 2107 __ ldrw(r2, Address(r1, BytesPerInt)); 2108 __ ldrw(r3, Address(r1, 2 * BytesPerInt)); 2109 __ rev32(r2, r2); 2110 __ rev32(r3, r3); 2111 // check against lo & hi 2112 __ cmpw(r0, r2); 2113 __ br(Assembler::LT, default_case); 2114 __ cmpw(r0, r3); 2115 __ br(Assembler::GT, default_case); 2116 // lookup dispatch offset 2117 __ subw(r0, r0, r2); 2118 __ lea(r3, Address(r1, r0, Address::uxtw(2))); 2119 __ ldrw(r3, Address(r3, 3 * BytesPerInt)); 2120 __ profile_switch_case(r0, r1, r2); 2121 // continue execution 2122 __ bind(continue_execution); 2123 __ rev32(r3, r3); 2124 __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0))); 2125 __ add(rbcp, rbcp, r3, ext::sxtw); 2126 __ dispatch_only(vtos, /*generate_poll*/true); 2127 // handle default 2128 __ bind(default_case); 2129 __ profile_switch_default(r0); 2130 __ ldrw(r3, Address(r1, 0)); 2131 __ b(continue_execution); 2132 } 2133 2134 void TemplateTable::lookupswitch() { 2135 transition(itos, itos); 2136 __ stop("lookupswitch bytecode should have been rewritten"); 2137 } 2138 2139 void TemplateTable::fast_linearswitch() { 2140 transition(itos, vtos); 2141 Label loop_entry, loop, found, continue_execution; 2142 // bswap r0 so we can avoid bswapping the table entries 2143 __ rev32(r0, r0); 2144 // align rbcp 2145 __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of 2146 // this instruction (change offsets 2147 // below) 2148 __ andr(r19, r19, -BytesPerInt); 2149 // set counter 2150 __ ldrw(r1, Address(r19, BytesPerInt)); 2151 __ rev32(r1, r1); 2152 __ b(loop_entry); 2153 // table search 2154 __ bind(loop); 2155 __ lea(rscratch1, Address(r19, r1, Address::lsl(3))); 2156 __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt)); 2157 __ cmpw(r0, rscratch1); 2158 __ br(Assembler::EQ, found); 2159 __ bind(loop_entry); 2160 __ subs(r1, r1, 1); 2161 __ br(Assembler::PL, loop); 2162 // default case 2163 __ profile_switch_default(r0); 2164 __ ldrw(r3, Address(r19, 0)); 2165 __ b(continue_execution); 2166 // entry found -> get offset 2167 __ bind(found); 2168 __ lea(rscratch1, Address(r19, r1, Address::lsl(3))); 2169 __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt)); 2170 __ profile_switch_case(r1, r0, r19); 2171 // continue execution 2172 __ bind(continue_execution); 2173 __ rev32(r3, r3); 2174 __ add(rbcp, rbcp, r3, ext::sxtw); 2175 __ ldrb(rscratch1, Address(rbcp, 0)); 2176 __ dispatch_only(vtos, /*generate_poll*/true); 2177 } 2178 2179 void TemplateTable::fast_binaryswitch() { 2180 transition(itos, vtos); 2181 // Implementation using the following core algorithm: 2182 // 2183 // int binary_search(int key, LookupswitchPair* array, int n) { 2184 // // Binary search according to "Methodik des Programmierens" by 2185 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2186 // int i = 0; 2187 // int j = n; 2188 // while (i+1 < j) { 2189 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2190 // // with Q: for all i: 0 <= i < n: key < a[i] 2191 // // where a stands for the array and assuming that the (inexisting) 2192 // // element a[n] is infinitely big. 2193 // int h = (i + j) >> 1; 2194 // // i < h < j 2195 // if (key < array[h].fast_match()) { 2196 // j = h; 2197 // } else { 2198 // i = h; 2199 // } 2200 // } 2201 // // R: a[i] <= key < a[i+1] or Q 2202 // // (i.e., if key is within array, i is the correct index) 2203 // return i; 2204 // } 2205 2206 // Register allocation 2207 const Register key = r0; // already set (tosca) 2208 const Register array = r1; 2209 const Register i = r2; 2210 const Register j = r3; 2211 const Register h = rscratch1; 2212 const Register temp = rscratch2; 2213 2214 // Find array start 2215 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to 2216 // get rid of this 2217 // instruction (change 2218 // offsets below) 2219 __ andr(array, array, -BytesPerInt); 2220 2221 // Initialize i & j 2222 __ mov(i, 0); // i = 0; 2223 __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array); 2224 2225 // Convert j into native byteordering 2226 __ rev32(j, j); 2227 2228 // And start 2229 Label entry; 2230 __ b(entry); 2231 2232 // binary search loop 2233 { 2234 Label loop; 2235 __ bind(loop); 2236 // int h = (i + j) >> 1; 2237 __ addw(h, i, j); // h = i + j; 2238 __ lsrw(h, h, 1); // h = (i + j) >> 1; 2239 // if (key < array[h].fast_match()) { 2240 // j = h; 2241 // } else { 2242 // i = h; 2243 // } 2244 // Convert array[h].match to native byte-ordering before compare 2245 __ ldr(temp, Address(array, h, Address::lsl(3))); 2246 __ rev32(temp, temp); 2247 __ cmpw(key, temp); 2248 // j = h if (key < array[h].fast_match()) 2249 __ csel(j, h, j, Assembler::LT); 2250 // i = h if (key >= array[h].fast_match()) 2251 __ csel(i, h, i, Assembler::GE); 2252 // while (i+1 < j) 2253 __ bind(entry); 2254 __ addw(h, i, 1); // i+1 2255 __ cmpw(h, j); // i+1 < j 2256 __ br(Assembler::LT, loop); 2257 } 2258 2259 // end of binary search, result index is i (must check again!) 2260 Label default_case; 2261 // Convert array[i].match to native byte-ordering before compare 2262 __ ldr(temp, Address(array, i, Address::lsl(3))); 2263 __ rev32(temp, temp); 2264 __ cmpw(key, temp); 2265 __ br(Assembler::NE, default_case); 2266 2267 // entry found -> j = offset 2268 __ add(j, array, i, ext::uxtx, 3); 2269 __ ldrw(j, Address(j, BytesPerInt)); 2270 __ profile_switch_case(i, key, array); 2271 __ rev32(j, j); 2272 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0))); 2273 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0))); 2274 __ dispatch_only(vtos, /*generate_poll*/true); 2275 2276 // default case -> j = default offset 2277 __ bind(default_case); 2278 __ profile_switch_default(i); 2279 __ ldrw(j, Address(array, -2 * BytesPerInt)); 2280 __ rev32(j, j); 2281 __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0))); 2282 __ lea(rbcp, Address(rbcp, j, Address::sxtw(0))); 2283 __ dispatch_only(vtos, /*generate_poll*/true); 2284 } 2285 2286 2287 void TemplateTable::_return(TosState state) 2288 { 2289 transition(state, state); 2290 assert(_desc->calls_vm(), 2291 "inconsistent calls_vm information"); // call in remove_activation 2292 2293 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2294 assert(state == vtos, "only valid state"); 2295 2296 __ ldr(c_rarg1, aaddress(0)); 2297 __ load_klass(r3, c_rarg1); 2298 __ ldrb(r3, Address(r3, Klass::misc_flags_offset())); 2299 Label skip_register_finalizer; 2300 __ tbz(r3, exact_log2(KlassFlags::_misc_has_finalizer), skip_register_finalizer); 2301 2302 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1); 2303 2304 __ bind(skip_register_finalizer); 2305 } 2306 2307 // Issue a StoreStore barrier after all stores but before return 2308 // from any constructor for any class with a final field. We don't 2309 // know if this is a finalizer, so we always do so. 2310 if (_desc->bytecode() == Bytecodes::_return) 2311 __ membar(MacroAssembler::StoreStore); 2312 2313 if (_desc->bytecode() != Bytecodes::_return_register_finalizer) { 2314 Label no_safepoint; 2315 __ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset())); 2316 __ tbz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), no_safepoint); 2317 __ push(state); 2318 __ push_cont_fastpath(rthread); 2319 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 2320 __ pop_cont_fastpath(rthread); 2321 __ pop(state); 2322 __ bind(no_safepoint); 2323 } 2324 2325 // Narrow result if state is itos but result type is smaller. 2326 // Need to narrow in the return bytecode rather than in generate_return_entry 2327 // since compiled code callers expect the result to already be narrowed. 2328 if (state == itos) { 2329 __ narrow(r0); 2330 } 2331 2332 __ remove_activation(state); 2333 __ ret(lr); 2334 } 2335 2336 // ---------------------------------------------------------------------------- 2337 // Volatile variables demand their effects be made known to all CPU's 2338 // in order. Store buffers on most chips allow reads & writes to 2339 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2340 // without some kind of memory barrier (i.e., it's not sufficient that 2341 // the interpreter does not reorder volatile references, the hardware 2342 // also must not reorder them). 2343 // 2344 // According to the new Java Memory Model (JMM): 2345 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2346 // writes act as acquire & release, so: 2347 // (2) A read cannot let unrelated NON-volatile memory refs that 2348 // happen after the read float up to before the read. It's OK for 2349 // non-volatile memory refs that happen before the volatile read to 2350 // float down below it. 2351 // (3) Similar a volatile write cannot let unrelated NON-volatile 2352 // memory refs that happen BEFORE the write float down to after the 2353 // write. It's OK for non-volatile memory refs that happen after the 2354 // volatile write to float up before it. 2355 // 2356 // We only put in barriers around volatile refs (they are expensive), 2357 // not _between_ memory refs (that would require us to track the 2358 // flavor of the previous memory refs). Requirements (2) and (3) 2359 // require some barriers before volatile stores and after volatile 2360 // loads. These nearly cover requirement (1) but miss the 2361 // volatile-store-volatile-load case. This final case is placed after 2362 // volatile-stores although it could just as well go before 2363 // volatile-loads. 2364 2365 void TemplateTable::resolve_cache_and_index_for_method(int byte_no, 2366 Register Rcache, 2367 Register index) { 2368 const Register temp = r19; 2369 assert_different_registers(Rcache, index, temp); 2370 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2371 2372 Label resolved, clinit_barrier_slow; 2373 2374 Bytecodes::Code code = bytecode(); 2375 __ load_method_entry(Rcache, index); 2376 switch(byte_no) { 2377 case f1_byte: 2378 __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode1_offset()))); 2379 break; 2380 case f2_byte: 2381 __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode2_offset()))); 2382 break; 2383 } 2384 // Load-acquire the bytecode to match store-release in InterpreterRuntime 2385 __ ldarb(temp, temp); 2386 __ subs(zr, temp, (int) code); // have we resolved this bytecode? 2387 __ br(Assembler::EQ, resolved); 2388 2389 // resolve first time through 2390 // Class initialization barrier slow path lands here as well. 2391 __ bind(clinit_barrier_slow); 2392 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2393 __ mov(temp, (int) code); 2394 __ call_VM(noreg, entry, temp); 2395 2396 // Update registers with resolved info 2397 __ load_method_entry(Rcache, index); 2398 // n.b. unlike x86 Rcache is now rcpool plus the indexed offset 2399 // so all clients ofthis method must be modified accordingly 2400 __ bind(resolved); 2401 2402 // Class initialization barrier for static methods 2403 if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) { 2404 __ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset()))); 2405 __ load_method_holder(temp, temp); 2406 __ clinit_barrier(temp, rscratch1, nullptr, &clinit_barrier_slow); 2407 } 2408 } 2409 2410 void TemplateTable::resolve_cache_and_index_for_field(int byte_no, 2411 Register Rcache, 2412 Register index) { 2413 const Register temp = r19; 2414 assert_different_registers(Rcache, index, temp); 2415 2416 Label resolved; 2417 2418 Bytecodes::Code code = bytecode(); 2419 switch (code) { 2420 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2421 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2422 default: break; 2423 } 2424 2425 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2426 __ load_field_entry(Rcache, index); 2427 if (byte_no == f1_byte) { 2428 __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::get_code_offset()))); 2429 } else { 2430 __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::put_code_offset()))); 2431 } 2432 // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in() 2433 __ ldarb(temp, temp); 2434 __ subs(zr, temp, (int) code); // have we resolved this bytecode? 2435 __ br(Assembler::EQ, resolved); 2436 2437 // resolve first time through 2438 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2439 __ mov(temp, (int) code); 2440 __ call_VM(noreg, entry, temp); 2441 2442 // Update registers with resolved info 2443 __ load_field_entry(Rcache, index); 2444 __ bind(resolved); 2445 } 2446 2447 void TemplateTable::load_resolved_field_entry(Register obj, 2448 Register cache, 2449 Register tos_state, 2450 Register offset, 2451 Register flags, 2452 bool is_static = false) { 2453 assert_different_registers(cache, tos_state, flags, offset); 2454 2455 // Field offset 2456 __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); 2457 2458 // Flags 2459 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset()))); 2460 2461 // TOS state 2462 if (tos_state != noreg) { 2463 __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset()))); 2464 } 2465 2466 // Klass overwrite register 2467 if (is_static) { 2468 __ ldr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset())); 2469 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2470 __ ldr(obj, Address(obj, mirror_offset)); 2471 __ resolve_oop_handle(obj, r5, rscratch2); 2472 } 2473 } 2474 2475 void TemplateTable::load_resolved_method_entry_special_or_static(Register cache, 2476 Register method, 2477 Register flags) { 2478 2479 // setup registers 2480 const Register index = flags; 2481 assert_different_registers(method, cache, flags); 2482 2483 // determine constant pool cache field offsets 2484 resolve_cache_and_index_for_method(f1_byte, cache, index); 2485 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); 2486 __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); 2487 } 2488 2489 void TemplateTable::load_resolved_method_entry_handle(Register cache, 2490 Register method, 2491 Register ref_index, 2492 Register flags) { 2493 // setup registers 2494 const Register index = ref_index; 2495 assert_different_registers(method, flags); 2496 assert_different_registers(method, cache, index); 2497 2498 // determine constant pool cache field offsets 2499 resolve_cache_and_index_for_method(f1_byte, cache, index); 2500 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); 2501 2502 // maybe push appendix to arguments (just before return address) 2503 Label L_no_push; 2504 __ tbz(flags, ResolvedMethodEntry::has_appendix_shift, L_no_push); 2505 // invokehandle uses an index into the resolved references array 2506 __ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset()))); 2507 // Push the appendix as a trailing parameter. 2508 // This must be done before we get the receiver, 2509 // since the parameter_size includes it. 2510 Register appendix = method; 2511 __ load_resolved_reference_at_index(appendix, ref_index); 2512 __ push(appendix); // push appendix (MethodType, CallSite, etc.) 2513 __ bind(L_no_push); 2514 2515 __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); 2516 } 2517 2518 void TemplateTable::load_resolved_method_entry_interface(Register cache, 2519 Register klass, 2520 Register method_or_table_index, 2521 Register flags) { 2522 // setup registers 2523 const Register index = method_or_table_index; 2524 assert_different_registers(method_or_table_index, cache, flags); 2525 2526 // determine constant pool cache field offsets 2527 resolve_cache_and_index_for_method(f1_byte, cache, index); 2528 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); 2529 2530 // Invokeinterface can behave in different ways: 2531 // If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will 2532 // behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or 2533 // vtable index is placed in the register. 2534 // Otherwise, the registers will be populated with the klass and method. 2535 2536 Label NotVirtual; Label NotVFinal; Label Done; 2537 __ tbz(flags, ResolvedMethodEntry::is_forced_virtual_shift, NotVirtual); 2538 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal); 2539 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); 2540 __ b(Done); 2541 2542 __ bind(NotVFinal); 2543 __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset()))); 2544 __ b(Done); 2545 2546 __ bind(NotVirtual); 2547 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); 2548 __ ldr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset()))); 2549 __ bind(Done); 2550 } 2551 2552 void TemplateTable::load_resolved_method_entry_virtual(Register cache, 2553 Register method_or_table_index, 2554 Register flags) { 2555 // setup registers 2556 const Register index = flags; 2557 assert_different_registers(method_or_table_index, cache, flags); 2558 2559 // determine constant pool cache field offsets 2560 resolve_cache_and_index_for_method(f2_byte, cache, index); 2561 __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset()))); 2562 2563 // method_or_table_index can either be an itable index or a method depending on the virtual final flag 2564 Label NotVFinal; Label Done; 2565 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal); 2566 __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); 2567 __ b(Done); 2568 2569 __ bind(NotVFinal); 2570 __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset()))); 2571 __ bind(Done); 2572 } 2573 2574 // The rmethod register is input and overwritten to be the adapter method for the 2575 // indy call. Link Register (lr) is set to the return address for the adapter and 2576 // an appendix may be pushed to the stack. Registers r0-r3 are clobbered 2577 void TemplateTable::load_invokedynamic_entry(Register method) { 2578 // setup registers 2579 const Register appendix = r0; 2580 const Register cache = r2; 2581 const Register index = r3; 2582 assert_different_registers(method, appendix, cache, index, rcpool); 2583 2584 __ save_bcp(); 2585 2586 Label resolved; 2587 2588 __ load_resolved_indy_entry(cache, index); 2589 // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in() 2590 __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset()))); 2591 __ ldar(method, method); 2592 2593 // Compare the method to zero 2594 __ cbnz(method, resolved); 2595 2596 Bytecodes::Code code = bytecode(); 2597 2598 // Call to the interpreter runtime to resolve invokedynamic 2599 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2600 __ mov(method, code); // this is essentially Bytecodes::_invokedynamic 2601 __ call_VM(noreg, entry, method); 2602 // Update registers with resolved info 2603 __ load_resolved_indy_entry(cache, index); 2604 // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in() 2605 __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset()))); 2606 __ ldar(method, method); 2607 2608 #ifdef ASSERT 2609 __ cbnz(method, resolved); 2610 __ stop("Should be resolved by now"); 2611 #endif // ASSERT 2612 __ bind(resolved); 2613 2614 Label L_no_push; 2615 // Check if there is an appendix 2616 __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset()))); 2617 __ tbz(index, ResolvedIndyEntry::has_appendix_shift, L_no_push); 2618 2619 // Get appendix 2620 __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset()))); 2621 // Push the appendix as a trailing parameter 2622 // since the parameter_size includes it. 2623 __ push(method); 2624 __ mov(method, index); 2625 __ load_resolved_reference_at_index(appendix, method); 2626 __ verify_oop(appendix); 2627 __ pop(method); 2628 __ push(appendix); // push appendix (MethodType, CallSite, etc.) 2629 __ bind(L_no_push); 2630 2631 // compute return type 2632 __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset()))); 2633 // load return address 2634 // Return address is loaded into link register(lr) and not pushed to the stack 2635 // like x86 2636 { 2637 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 2638 __ mov(rscratch1, table_addr); 2639 __ ldr(lr, Address(rscratch1, index, Address::lsl(3))); 2640 } 2641 } 2642 2643 // The registers cache and index expected to be set before call. 2644 // Correct values of the cache and index registers are preserved. 2645 void TemplateTable::jvmti_post_field_access(Register cache, Register index, 2646 bool is_static, bool has_tos) { 2647 // do the JVMTI work here to avoid disturbing the register state below 2648 // We use c_rarg registers here because we want to use the register used in 2649 // the call to the VM 2650 if (JvmtiExport::can_post_field_access()) { 2651 // Check to see if a field access watch has been set before we 2652 // take the time to call into the VM. 2653 Label L1; 2654 assert_different_registers(cache, index, r0); 2655 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); 2656 __ ldrw(r0, Address(rscratch1)); 2657 __ cbzw(r0, L1); 2658 2659 __ load_field_entry(c_rarg2, index); 2660 2661 if (is_static) { 2662 __ mov(c_rarg1, zr); // null object reference 2663 } else { 2664 __ ldr(c_rarg1, at_tos()); // get object pointer without popping it 2665 __ verify_oop(c_rarg1); 2666 } 2667 // c_rarg1: object pointer or null 2668 // c_rarg2: cache entry pointer 2669 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 2670 InterpreterRuntime::post_field_access), 2671 c_rarg1, c_rarg2); 2672 __ load_field_entry(cache, index); 2673 __ bind(L1); 2674 } 2675 } 2676 2677 void TemplateTable::pop_and_check_object(Register r) 2678 { 2679 __ pop_ptr(r); 2680 __ null_check(r); // for field access must check obj. 2681 __ verify_oop(r); 2682 } 2683 2684 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) 2685 { 2686 const Register cache = r2; 2687 const Register obj = r4; 2688 const Register klass = r5; 2689 const Register inline_klass = r7; 2690 const Register field_index = r23; 2691 const Register index = r3; 2692 const Register tos_state = r3; 2693 const Register off = r19; 2694 const Register flags = r6; 2695 const Register bc = r4; // uses same reg as obj, so don't mix them 2696 2697 resolve_cache_and_index_for_field(byte_no, cache, index); 2698 jvmti_post_field_access(cache, index, is_static, false); 2699 2700 // Valhalla extras 2701 __ load_unsigned_short(field_index, Address(cache, in_bytes(ResolvedFieldEntry::field_index_offset()))); 2702 __ ldr(klass, Address(cache, ResolvedFieldEntry::field_holder_offset())); 2703 2704 load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); 2705 2706 if (!is_static) { 2707 // obj is on the stack 2708 pop_and_check_object(obj); 2709 } 2710 2711 // 8179954: We need to make sure that the code generated for 2712 // volatile accesses forms a sequentially-consistent set of 2713 // operations when combined with STLR and LDAR. Without a leading 2714 // membar it's possible for a simple Dekker test to fail if loads 2715 // use LDR;DMB but stores use STLR. This can happen if C2 compiles 2716 // the stores in one method and we interpret the loads in another. 2717 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){ 2718 Label notVolatile; 2719 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile); 2720 __ membar(MacroAssembler::AnyAny); 2721 __ bind(notVolatile); 2722 } 2723 2724 const Address field(obj, off); 2725 2726 Label Done, notByte, notBool, notInt, notShort, notChar, 2727 notLong, notFloat, notObj, notDouble; 2728 2729 assert(btos == 0, "change code, btos != 0"); 2730 __ cbnz(tos_state, notByte); 2731 2732 // Don't rewrite getstatic, only getfield 2733 if (is_static) rc = may_not_rewrite; 2734 2735 // btos 2736 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg); 2737 __ push(btos); 2738 // Rewrite bytecode to be faster 2739 if (rc == may_rewrite) { 2740 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1); 2741 } 2742 __ b(Done); 2743 2744 __ bind(notByte); 2745 __ cmp(tos_state, (u1)ztos); 2746 __ br(Assembler::NE, notBool); 2747 2748 // ztos (same code as btos) 2749 __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg); 2750 __ push(ztos); 2751 // Rewrite bytecode to be faster 2752 if (rc == may_rewrite) { 2753 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2754 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1); 2755 } 2756 __ b(Done); 2757 2758 __ bind(notBool); 2759 __ cmp(tos_state, (u1)atos); 2760 __ br(Assembler::NE, notObj); 2761 // atos 2762 if (!EnableValhalla) { 2763 do_oop_load(_masm, field, r0, IN_HEAP); 2764 __ push(atos); 2765 if (rc == may_rewrite) { 2766 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1); 2767 } 2768 __ b(Done); 2769 } else { // Valhalla 2770 if (is_static) { 2771 __ load_heap_oop(r0, field, rscratch1, rscratch2); 2772 Label is_null_free_inline_type, uninitialized; 2773 // Issue below if the static field has not been initialized yet 2774 __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_null_free_inline_type); 2775 // field is not a null free inline type 2776 __ push(atos); 2777 __ b(Done); 2778 // field is a null free inline type, must not return null even if uninitialized 2779 __ bind(is_null_free_inline_type); 2780 __ cbz(r0, uninitialized); 2781 __ push(atos); 2782 __ b(Done); 2783 __ bind(uninitialized); 2784 Label slow_case, finish; 2785 __ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset())); 2786 __ cmp(rscratch1, (u1)InstanceKlass::fully_initialized); 2787 __ br(Assembler::NE, slow_case); 2788 __ get_default_value_oop(klass, off /* temp */, r0); 2789 __ b(finish); 2790 __ bind(slow_case); 2791 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_inline_type_field), obj, cache); 2792 __ bind(finish); 2793 __ verify_oop(r0); 2794 __ push(atos); 2795 __ b(Done); 2796 } else { 2797 Label is_flat, nonnull, is_inline_type, has_null_marker, rewrite_inline; 2798 __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_inline_type); 2799 __ test_field_has_null_marker(flags, noreg /*temp*/, has_null_marker); 2800 // Non-inline field case 2801 __ load_heap_oop(r0, field, rscratch1, rscratch2); 2802 __ push(atos); 2803 if (rc == may_rewrite) { 2804 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1); 2805 } 2806 __ b(Done); 2807 __ bind(is_inline_type); 2808 __ test_field_is_flat(flags, noreg /* temp */, is_flat); 2809 // field is not flat 2810 __ load_heap_oop(r0, field, rscratch1, rscratch2); 2811 __ cbnz(r0, nonnull); 2812 __ get_inline_type_field_klass(klass, field_index, inline_klass); 2813 __ get_default_value_oop(inline_klass, klass /* temp */, r0); 2814 __ bind(nonnull); 2815 __ verify_oop(r0); 2816 __ push(atos); 2817 __ b(rewrite_inline); 2818 __ bind(is_flat); 2819 // field is flat 2820 __ mov(r0, obj); 2821 __ read_flat_field(cache, field_index, off, inline_klass /* temp */, r0); 2822 __ verify_oop(r0); 2823 __ push(atos); 2824 __ b(rewrite_inline); 2825 __ bind(has_null_marker); 2826 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_nullable_flat_field), obj, cache); 2827 __ verify_oop(r0); 2828 __ push(atos); 2829 __ bind(rewrite_inline); 2830 if (rc == may_rewrite) { 2831 patch_bytecode(Bytecodes::_fast_vgetfield, bc, r1); 2832 } 2833 __ b(Done); 2834 } 2835 } 2836 2837 __ bind(notObj); 2838 __ cmp(tos_state, (u1)itos); 2839 __ br(Assembler::NE, notInt); 2840 // itos 2841 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg); 2842 __ push(itos); 2843 // Rewrite bytecode to be faster 2844 if (rc == may_rewrite) { 2845 patch_bytecode(Bytecodes::_fast_igetfield, bc, r1); 2846 } 2847 __ b(Done); 2848 2849 __ bind(notInt); 2850 __ cmp(tos_state, (u1)ctos); 2851 __ br(Assembler::NE, notChar); 2852 // ctos 2853 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg); 2854 __ push(ctos); 2855 // Rewrite bytecode to be faster 2856 if (rc == may_rewrite) { 2857 patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1); 2858 } 2859 __ b(Done); 2860 2861 __ bind(notChar); 2862 __ cmp(tos_state, (u1)stos); 2863 __ br(Assembler::NE, notShort); 2864 // stos 2865 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg); 2866 __ push(stos); 2867 // Rewrite bytecode to be faster 2868 if (rc == may_rewrite) { 2869 patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1); 2870 } 2871 __ b(Done); 2872 2873 __ bind(notShort); 2874 __ cmp(tos_state, (u1)ltos); 2875 __ br(Assembler::NE, notLong); 2876 // ltos 2877 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg); 2878 __ push(ltos); 2879 // Rewrite bytecode to be faster 2880 if (rc == may_rewrite) { 2881 patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1); 2882 } 2883 __ b(Done); 2884 2885 __ bind(notLong); 2886 __ cmp(tos_state, (u1)ftos); 2887 __ br(Assembler::NE, notFloat); 2888 // ftos 2889 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); 2890 __ push(ftos); 2891 // Rewrite bytecode to be faster 2892 if (rc == may_rewrite) { 2893 patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1); 2894 } 2895 __ b(Done); 2896 2897 __ bind(notFloat); 2898 #ifdef ASSERT 2899 __ cmp(tos_state, (u1)dtos); 2900 __ br(Assembler::NE, notDouble); 2901 #endif 2902 // dtos 2903 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg); 2904 __ push(dtos); 2905 // Rewrite bytecode to be faster 2906 if (rc == may_rewrite) { 2907 patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1); 2908 } 2909 #ifdef ASSERT 2910 __ b(Done); 2911 2912 __ bind(notDouble); 2913 __ stop("Bad state"); 2914 #endif 2915 2916 __ bind(Done); 2917 2918 Label notVolatile; 2919 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile); 2920 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); 2921 __ bind(notVolatile); 2922 } 2923 2924 2925 void TemplateTable::getfield(int byte_no) 2926 { 2927 getfield_or_static(byte_no, false); 2928 } 2929 2930 void TemplateTable::nofast_getfield(int byte_no) { 2931 getfield_or_static(byte_no, false, may_not_rewrite); 2932 } 2933 2934 void TemplateTable::getstatic(int byte_no) 2935 { 2936 getfield_or_static(byte_no, true); 2937 } 2938 2939 // The registers cache and index expected to be set before call. 2940 // The function may destroy various registers, just not the cache and index registers. 2941 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { 2942 transition(vtos, vtos); 2943 2944 if (JvmtiExport::can_post_field_modification()) { 2945 // Check to see if a field modification watch has been set before 2946 // we take the time to call into the VM. 2947 Label L1; 2948 assert_different_registers(cache, index, r0); 2949 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); 2950 __ ldrw(r0, Address(rscratch1)); 2951 __ cbz(r0, L1); 2952 2953 __ mov(c_rarg2, cache); 2954 2955 if (is_static) { 2956 // Life is simple. Null out the object pointer. 2957 __ mov(c_rarg1, zr); 2958 } else { 2959 // Life is harder. The stack holds the value on top, followed by 2960 // the object. We don't know the size of the value, though; it 2961 // could be one or two words depending on its type. As a result, 2962 // we must find the type to determine where the object is. 2963 __ load_unsigned_byte(c_rarg3, Address(c_rarg2, in_bytes(ResolvedFieldEntry::type_offset()))); 2964 Label nope2, done, ok; 2965 __ ldr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue 2966 __ cmpw(c_rarg3, ltos); 2967 __ br(Assembler::EQ, ok); 2968 __ cmpw(c_rarg3, dtos); 2969 __ br(Assembler::NE, nope2); 2970 __ bind(ok); 2971 __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue) 2972 __ bind(nope2); 2973 } 2974 // object (tos) 2975 __ mov(c_rarg3, esp); 2976 // c_rarg1: object pointer set up above (null if static) 2977 // c_rarg2: cache entry pointer 2978 // c_rarg3: jvalue object on the stack 2979 __ call_VM(noreg, 2980 CAST_FROM_FN_PTR(address, 2981 InterpreterRuntime::post_field_modification), 2982 c_rarg1, c_rarg2, c_rarg3); 2983 __ load_field_entry(cache, index); 2984 __ bind(L1); 2985 } 2986 } 2987 2988 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2989 transition(vtos, vtos); 2990 2991 const Register cache = r2; 2992 const Register index = r3; 2993 const Register tos_state = r3; 2994 const Register obj = r2; 2995 const Register off = r19; 2996 const Register flags = r6; 2997 const Register bc = r4; 2998 const Register inline_klass = r5; 2999 3000 resolve_cache_and_index_for_field(byte_no, cache, index); 3001 jvmti_post_field_mod(cache, index, is_static); 3002 load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); 3003 3004 Label Done; 3005 { 3006 Label notVolatile; 3007 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3008 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore); 3009 __ bind(notVolatile); 3010 } 3011 3012 // field address 3013 const Address field(obj, off); 3014 3015 Label notByte, notBool, notInt, notShort, notChar, 3016 notLong, notFloat, notObj, notDouble; 3017 3018 assert(btos == 0, "change code, btos != 0"); 3019 __ cbnz(tos_state, notByte); 3020 3021 // Don't rewrite putstatic, only putfield 3022 if (is_static) rc = may_not_rewrite; 3023 3024 // btos 3025 { 3026 __ pop(btos); 3027 if (!is_static) pop_and_check_object(obj); 3028 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg); 3029 if (rc == may_rewrite) { 3030 patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no); 3031 } 3032 __ b(Done); 3033 } 3034 3035 __ bind(notByte); 3036 __ cmp(tos_state, (u1)ztos); 3037 __ br(Assembler::NE, notBool); 3038 3039 // ztos 3040 { 3041 __ pop(ztos); 3042 if (!is_static) pop_and_check_object(obj); 3043 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg); 3044 if (rc == may_rewrite) { 3045 patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no); 3046 } 3047 __ b(Done); 3048 } 3049 3050 __ bind(notBool); 3051 __ cmp(tos_state, (u1)atos); 3052 __ br(Assembler::NE, notObj); 3053 3054 // atos 3055 { 3056 if (!EnableValhalla) { 3057 __ pop(atos); 3058 if (!is_static) pop_and_check_object(obj); 3059 // Store into the field 3060 do_oop_store(_masm, field, r0, IN_HEAP); 3061 if (rc == may_rewrite) { 3062 patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no); 3063 } 3064 __ b(Done); 3065 } else { // Valhalla 3066 __ pop(atos); 3067 if (is_static) { 3068 Label is_inline_type; 3069 __ test_field_is_not_null_free_inline_type(flags, noreg /* temp */, is_inline_type); 3070 __ null_check(r0); 3071 __ bind(is_inline_type); 3072 do_oop_store(_masm, field, r0, IN_HEAP); 3073 __ b(Done); 3074 } else { 3075 Label is_inline_type, is_flat, has_null_marker, rewrite_not_inline, rewrite_inline; 3076 __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_inline_type); 3077 __ test_field_has_null_marker(flags, noreg /*temp*/, has_null_marker); 3078 // Not an inline type 3079 pop_and_check_object(obj); 3080 // Store into the field 3081 do_oop_store(_masm, field, r0, IN_HEAP); 3082 __ bind(rewrite_not_inline); 3083 if (rc == may_rewrite) { 3084 patch_bytecode(Bytecodes::_fast_aputfield, bc, r19, true, byte_no); 3085 } 3086 __ b(Done); 3087 // Implementation of the inline type semantic 3088 __ bind(is_inline_type); 3089 __ null_check(r0); 3090 __ test_field_is_flat(flags, noreg /*temp*/, is_flat); 3091 // field is not flat 3092 pop_and_check_object(obj); 3093 // Store into the field 3094 do_oop_store(_masm, field, r0, IN_HEAP); 3095 __ b(rewrite_inline); 3096 __ bind(is_flat); 3097 __ load_field_entry(cache, index); // reload field entry (cache) because it was erased by tos_state 3098 __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedFieldEntry::field_index_offset()))); 3099 __ ldr(r2, Address(cache, in_bytes(ResolvedFieldEntry::field_holder_offset()))); 3100 __ inline_layout_info(r2, index, r6); 3101 pop_and_check_object(obj); 3102 __ load_klass(inline_klass, r0); 3103 __ data_for_oop(r0, r0, inline_klass); 3104 __ add(obj, obj, off); 3105 // because we use InlineLayoutInfo, we need special value access code specialized for fields (arrays will need a different API) 3106 __ flat_field_copy(IN_HEAP, r0, obj, r6); 3107 __ b(rewrite_inline); 3108 __ bind(has_null_marker); 3109 assert_different_registers(r0, cache, r19); 3110 pop_and_check_object(r19); 3111 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_nullable_flat_field), r19, r0, cache); 3112 __ bind(rewrite_inline); 3113 if (rc == may_rewrite) { 3114 patch_bytecode(Bytecodes::_fast_vputfield, bc, r19, true, byte_no); 3115 } 3116 __ b(Done); 3117 } 3118 } // Valhalla 3119 } 3120 3121 __ bind(notObj); 3122 __ cmp(tos_state, (u1)itos); 3123 __ br(Assembler::NE, notInt); 3124 3125 // itos 3126 { 3127 __ pop(itos); 3128 if (!is_static) pop_and_check_object(obj); 3129 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg); 3130 if (rc == may_rewrite) { 3131 patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no); 3132 } 3133 __ b(Done); 3134 } 3135 3136 __ bind(notInt); 3137 __ cmp(tos_state, (u1)ctos); 3138 __ br(Assembler::NE, notChar); 3139 3140 // ctos 3141 { 3142 __ pop(ctos); 3143 if (!is_static) pop_and_check_object(obj); 3144 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg); 3145 if (rc == may_rewrite) { 3146 patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no); 3147 } 3148 __ b(Done); 3149 } 3150 3151 __ bind(notChar); 3152 __ cmp(tos_state, (u1)stos); 3153 __ br(Assembler::NE, notShort); 3154 3155 // stos 3156 { 3157 __ pop(stos); 3158 if (!is_static) pop_and_check_object(obj); 3159 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg); 3160 if (rc == may_rewrite) { 3161 patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no); 3162 } 3163 __ b(Done); 3164 } 3165 3166 __ bind(notShort); 3167 __ cmp(tos_state, (u1)ltos); 3168 __ br(Assembler::NE, notLong); 3169 3170 // ltos 3171 { 3172 __ pop(ltos); 3173 if (!is_static) pop_and_check_object(obj); 3174 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg); 3175 if (rc == may_rewrite) { 3176 patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no); 3177 } 3178 __ b(Done); 3179 } 3180 3181 __ bind(notLong); 3182 __ cmp(tos_state, (u1)ftos); 3183 __ br(Assembler::NE, notFloat); 3184 3185 // ftos 3186 { 3187 __ pop(ftos); 3188 if (!is_static) pop_and_check_object(obj); 3189 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg); 3190 if (rc == may_rewrite) { 3191 patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no); 3192 } 3193 __ b(Done); 3194 } 3195 3196 __ bind(notFloat); 3197 #ifdef ASSERT 3198 __ cmp(tos_state, (u1)dtos); 3199 __ br(Assembler::NE, notDouble); 3200 #endif 3201 3202 // dtos 3203 { 3204 __ pop(dtos); 3205 if (!is_static) pop_and_check_object(obj); 3206 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg); 3207 if (rc == may_rewrite) { 3208 patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no); 3209 } 3210 } 3211 3212 #ifdef ASSERT 3213 __ b(Done); 3214 3215 __ bind(notDouble); 3216 __ stop("Bad state"); 3217 #endif 3218 3219 __ bind(Done); 3220 3221 { 3222 Label notVolatile; 3223 __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3224 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore); 3225 __ bind(notVolatile); 3226 } 3227 } 3228 3229 void TemplateTable::putfield(int byte_no) 3230 { 3231 putfield_or_static(byte_no, false); 3232 } 3233 3234 void TemplateTable::nofast_putfield(int byte_no) { 3235 putfield_or_static(byte_no, false, may_not_rewrite); 3236 } 3237 3238 void TemplateTable::putstatic(int byte_no) { 3239 putfield_or_static(byte_no, true); 3240 } 3241 3242 void TemplateTable::jvmti_post_fast_field_mod() { 3243 if (JvmtiExport::can_post_field_modification()) { 3244 // Check to see if a field modification watch has been set before 3245 // we take the time to call into the VM. 3246 Label L2; 3247 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); 3248 __ ldrw(c_rarg3, Address(rscratch1)); 3249 __ cbzw(c_rarg3, L2); 3250 __ pop_ptr(r19); // copy the object pointer from tos 3251 __ verify_oop(r19); 3252 __ push_ptr(r19); // put the object pointer back on tos 3253 // Save tos values before call_VM() clobbers them. Since we have 3254 // to do it for every data type, we use the saved values as the 3255 // jvalue object. 3256 switch (bytecode()) { // load values into the jvalue object 3257 case Bytecodes::_fast_vputfield: //fall through 3258 case Bytecodes::_fast_aputfield: __ push_ptr(r0); break; 3259 case Bytecodes::_fast_bputfield: // fall through 3260 case Bytecodes::_fast_zputfield: // fall through 3261 case Bytecodes::_fast_sputfield: // fall through 3262 case Bytecodes::_fast_cputfield: // fall through 3263 case Bytecodes::_fast_iputfield: __ push_i(r0); break; 3264 case Bytecodes::_fast_dputfield: __ push_d(); break; 3265 case Bytecodes::_fast_fputfield: __ push_f(); break; 3266 case Bytecodes::_fast_lputfield: __ push_l(r0); break; 3267 3268 default: 3269 ShouldNotReachHere(); 3270 } 3271 __ mov(c_rarg3, esp); // points to jvalue on the stack 3272 // access constant pool cache entry 3273 __ load_field_entry(c_rarg2, r0); 3274 __ verify_oop(r19); 3275 // r19: object pointer copied above 3276 // c_rarg2: cache entry pointer 3277 // c_rarg3: jvalue object on the stack 3278 __ call_VM(noreg, 3279 CAST_FROM_FN_PTR(address, 3280 InterpreterRuntime::post_field_modification), 3281 r19, c_rarg2, c_rarg3); 3282 3283 switch (bytecode()) { // restore tos values 3284 case Bytecodes::_fast_vputfield: //fall through 3285 case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break; 3286 case Bytecodes::_fast_bputfield: // fall through 3287 case Bytecodes::_fast_zputfield: // fall through 3288 case Bytecodes::_fast_sputfield: // fall through 3289 case Bytecodes::_fast_cputfield: // fall through 3290 case Bytecodes::_fast_iputfield: __ pop_i(r0); break; 3291 case Bytecodes::_fast_dputfield: __ pop_d(); break; 3292 case Bytecodes::_fast_fputfield: __ pop_f(); break; 3293 case Bytecodes::_fast_lputfield: __ pop_l(r0); break; 3294 default: break; 3295 } 3296 __ bind(L2); 3297 } 3298 } 3299 3300 void TemplateTable::fast_storefield(TosState state) 3301 { 3302 transition(state, vtos); 3303 3304 ByteSize base = ConstantPoolCache::base_offset(); 3305 3306 jvmti_post_fast_field_mod(); 3307 3308 // access constant pool cache 3309 __ load_field_entry(r2, r1); 3310 3311 // R1: field offset, R2: field holder, R3: flags 3312 load_resolved_field_entry(r2, r2, noreg, r1, r3); 3313 3314 { 3315 Label notVolatile; 3316 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3317 __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore); 3318 __ bind(notVolatile); 3319 } 3320 3321 Label notVolatile; 3322 3323 // Get object from stack 3324 pop_and_check_object(r2); 3325 3326 // field address 3327 const Address field(r2, r1); 3328 3329 // access field 3330 switch (bytecode()) { 3331 case Bytecodes::_fast_vputfield: 3332 { 3333 Label is_flat, has_null_marker, done; 3334 __ test_field_has_null_marker(r3, noreg /* temp */, has_null_marker); 3335 __ null_check(r0); 3336 __ test_field_is_flat(r3, noreg /* temp */, is_flat); 3337 // field is not flat 3338 do_oop_store(_masm, field, r0, IN_HEAP); 3339 __ b(done); 3340 __ bind(is_flat); 3341 // field is flat 3342 __ load_field_entry(r4, r3); 3343 __ load_unsigned_short(r3, Address(r4, in_bytes(ResolvedFieldEntry::field_index_offset()))); 3344 __ ldr(r4, Address(r4, in_bytes(ResolvedFieldEntry::field_holder_offset()))); 3345 __ inline_layout_info(r4, r3, r5); 3346 __ load_klass(r4, r0); 3347 __ data_for_oop(r0, r0, r4); 3348 __ lea(rscratch1, field); 3349 __ flat_field_copy(IN_HEAP, r0, rscratch1, r5); 3350 __ b(done); 3351 __ bind(has_null_marker); 3352 __ load_field_entry(r4, r1); 3353 __ mov(r1, r2); 3354 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_nullable_flat_field), r1, r0, r4); 3355 __ bind(done); 3356 } 3357 break; 3358 case Bytecodes::_fast_aputfield: 3359 do_oop_store(_masm, field, r0, IN_HEAP); 3360 break; 3361 case Bytecodes::_fast_lputfield: 3362 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg); 3363 break; 3364 case Bytecodes::_fast_iputfield: 3365 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg); 3366 break; 3367 case Bytecodes::_fast_zputfield: 3368 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg); 3369 break; 3370 case Bytecodes::_fast_bputfield: 3371 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg); 3372 break; 3373 case Bytecodes::_fast_sputfield: 3374 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg); 3375 break; 3376 case Bytecodes::_fast_cputfield: 3377 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg); 3378 break; 3379 case Bytecodes::_fast_fputfield: 3380 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg); 3381 break; 3382 case Bytecodes::_fast_dputfield: 3383 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg); 3384 break; 3385 default: 3386 ShouldNotReachHere(); 3387 } 3388 3389 { 3390 Label notVolatile; 3391 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3392 __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore); 3393 __ bind(notVolatile); 3394 } 3395 } 3396 3397 3398 void TemplateTable::fast_accessfield(TosState state) 3399 { 3400 transition(atos, state); 3401 // Do the JVMTI work here to avoid disturbing the register state below 3402 if (JvmtiExport::can_post_field_access()) { 3403 // Check to see if a field access watch has been set before we 3404 // take the time to call into the VM. 3405 Label L1; 3406 __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); 3407 __ ldrw(r2, Address(rscratch1)); 3408 __ cbzw(r2, L1); 3409 // access constant pool cache entry 3410 __ load_field_entry(c_rarg2, rscratch2); 3411 __ verify_oop(r0); 3412 __ push_ptr(r0); // save object pointer before call_VM() clobbers it 3413 __ mov(c_rarg1, r0); 3414 // c_rarg1: object pointer copied above 3415 // c_rarg2: cache entry pointer 3416 __ call_VM(noreg, 3417 CAST_FROM_FN_PTR(address, 3418 InterpreterRuntime::post_field_access), 3419 c_rarg1, c_rarg2); 3420 __ pop_ptr(r0); // restore object pointer 3421 __ bind(L1); 3422 } 3423 3424 // access constant pool cache 3425 __ load_field_entry(r2, r1); 3426 3427 __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); 3428 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset()))); 3429 3430 // r0: object 3431 __ verify_oop(r0); 3432 __ null_check(r0); 3433 const Address field(r0, r1); 3434 3435 // 8179954: We need to make sure that the code generated for 3436 // volatile accesses forms a sequentially-consistent set of 3437 // operations when combined with STLR and LDAR. Without a leading 3438 // membar it's possible for a simple Dekker test to fail if loads 3439 // use LDR;DMB but stores use STLR. This can happen if C2 compiles 3440 // the stores in one method and we interpret the loads in another. 3441 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) { 3442 Label notVolatile; 3443 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3444 __ membar(MacroAssembler::AnyAny); 3445 __ bind(notVolatile); 3446 } 3447 3448 // access field 3449 switch (bytecode()) { 3450 case Bytecodes::_fast_vgetfield: 3451 { 3452 Register index = r4, klass = r5, inline_klass = r6, tmp = r7; 3453 Label is_flat, has_null_marker, nonnull, Done; 3454 __ test_field_has_null_marker(r3, noreg /*temp*/, has_null_marker); 3455 __ test_field_is_flat(r3, noreg /* temp */, is_flat); 3456 // field is not flat 3457 __ load_heap_oop(r0, field, rscratch1, rscratch2); 3458 __ cbnz(r0, nonnull); 3459 __ load_unsigned_short(index, Address(r2, in_bytes(ResolvedFieldEntry::field_index_offset()))); 3460 __ ldr(klass, Address(r2, in_bytes(ResolvedFieldEntry::field_holder_offset()))); 3461 __ get_inline_type_field_klass(klass, index, inline_klass); 3462 __ get_default_value_oop(inline_klass, tmp /* temp */, r0); 3463 __ bind(nonnull); 3464 __ verify_oop(r0); 3465 __ b(Done); 3466 __ bind(is_flat); 3467 // field is flat 3468 __ load_unsigned_short(index, Address(r2, in_bytes(ResolvedFieldEntry::field_index_offset()))); 3469 __ read_flat_field(r2, index, r1, tmp /* temp */, r0); 3470 __ verify_oop(r0); 3471 __ b(Done); 3472 __ bind(has_null_marker); 3473 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_nullable_flat_field), r0, r2); 3474 __ verify_oop(r0); 3475 __ bind(Done); 3476 } 3477 break; 3478 case Bytecodes::_fast_agetfield: 3479 do_oop_load(_masm, field, r0, IN_HEAP); 3480 __ verify_oop(r0); 3481 break; 3482 case Bytecodes::_fast_lgetfield: 3483 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg); 3484 break; 3485 case Bytecodes::_fast_igetfield: 3486 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg); 3487 break; 3488 case Bytecodes::_fast_bgetfield: 3489 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg); 3490 break; 3491 case Bytecodes::_fast_sgetfield: 3492 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg); 3493 break; 3494 case Bytecodes::_fast_cgetfield: 3495 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg); 3496 break; 3497 case Bytecodes::_fast_fgetfield: 3498 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); 3499 break; 3500 case Bytecodes::_fast_dgetfield: 3501 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg); 3502 break; 3503 default: 3504 ShouldNotReachHere(); 3505 } 3506 { 3507 Label notVolatile; 3508 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3509 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); 3510 __ bind(notVolatile); 3511 } 3512 } 3513 3514 void TemplateTable::fast_xaccess(TosState state) 3515 { 3516 transition(vtos, state); 3517 3518 // get receiver 3519 __ ldr(r0, aaddress(0)); 3520 // access constant pool cache 3521 __ load_field_entry(r2, r3, 2); 3522 __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); 3523 3524 // 8179954: We need to make sure that the code generated for 3525 // volatile accesses forms a sequentially-consistent set of 3526 // operations when combined with STLR and LDAR. Without a leading 3527 // membar it's possible for a simple Dekker test to fail if loads 3528 // use LDR;DMB but stores use STLR. This can happen if C2 compiles 3529 // the stores in one method and we interpret the loads in another. 3530 if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) { 3531 Label notVolatile; 3532 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset()))); 3533 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3534 __ membar(MacroAssembler::AnyAny); 3535 __ bind(notVolatile); 3536 } 3537 3538 // make sure exception is reported in correct bcp range (getfield is 3539 // next instruction) 3540 __ increment(rbcp); 3541 __ null_check(r0); 3542 switch (state) { 3543 case itos: 3544 __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg); 3545 break; 3546 case atos: 3547 do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP); 3548 __ verify_oop(r0); 3549 break; 3550 case ftos: 3551 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg); 3552 break; 3553 default: 3554 ShouldNotReachHere(); 3555 } 3556 3557 { 3558 Label notVolatile; 3559 __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset()))); 3560 __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); 3561 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); 3562 __ bind(notVolatile); 3563 } 3564 3565 __ decrement(rbcp); 3566 } 3567 3568 3569 3570 //----------------------------------------------------------------------------- 3571 // Calls 3572 3573 void TemplateTable::prepare_invoke(Register cache, Register recv) { 3574 3575 Bytecodes::Code code = bytecode(); 3576 const bool load_receiver = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic); 3577 3578 // save 'interpreter return address' 3579 __ save_bcp(); 3580 3581 // Load TOS state for later 3582 __ load_unsigned_byte(rscratch2, Address(cache, in_bytes(ResolvedMethodEntry::type_offset()))); 3583 3584 // load receiver if needed (note: no return address pushed yet) 3585 if (load_receiver) { 3586 __ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset()))); 3587 __ add(rscratch1, esp, recv, ext::uxtx, 3); 3588 __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1))); 3589 __ verify_oop(recv); 3590 } 3591 3592 // load return address 3593 { 3594 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3595 __ mov(rscratch1, table_addr); 3596 __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3))); 3597 } 3598 } 3599 3600 3601 void TemplateTable::invokevirtual_helper(Register index, 3602 Register recv, 3603 Register flags) 3604 { 3605 // Uses temporary registers r0, r3 3606 assert_different_registers(index, recv, r0, r3); 3607 // Test for an invoke of a final method 3608 Label notFinal; 3609 __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, notFinal); 3610 3611 const Register method = index; // method must be rmethod 3612 assert(method == rmethod, 3613 "Method must be rmethod for interpreter calling convention"); 3614 3615 // do the call - the index is actually the method to call 3616 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method* 3617 3618 // It's final, need a null check here! 3619 __ null_check(recv); 3620 3621 // profile this call 3622 __ profile_final_call(r0); 3623 __ profile_arguments_type(r0, method, r4, true); 3624 3625 __ jump_from_interpreted(method, r0); 3626 3627 __ bind(notFinal); 3628 3629 // get receiver klass 3630 __ load_klass(r0, recv); 3631 3632 // profile this call 3633 __ profile_virtual_call(r0, rlocals, r3); 3634 3635 // get target Method & entry point 3636 __ lookup_virtual_method(r0, index, method); 3637 __ profile_arguments_type(r3, method, r4, true); 3638 // FIXME -- this looks completely redundant. is it? 3639 // __ ldr(r3, Address(method, Method::interpreter_entry_offset())); 3640 __ jump_from_interpreted(method, r3); 3641 } 3642 3643 void TemplateTable::invokevirtual(int byte_no) 3644 { 3645 transition(vtos, vtos); 3646 assert(byte_no == f2_byte, "use this argument"); 3647 3648 load_resolved_method_entry_virtual(r2, // ResolvedMethodEntry* 3649 rmethod, // Method* or itable index 3650 r3); // flags 3651 prepare_invoke(r2, r2); // recv 3652 3653 // rmethod: index (actually a Method*) 3654 // r2: receiver 3655 // r3: flags 3656 3657 invokevirtual_helper(rmethod, r2, r3); 3658 } 3659 3660 void TemplateTable::invokespecial(int byte_no) 3661 { 3662 transition(vtos, vtos); 3663 assert(byte_no == f1_byte, "use this argument"); 3664 3665 load_resolved_method_entry_special_or_static(r2, // ResolvedMethodEntry* 3666 rmethod, // Method* 3667 r3); // flags 3668 prepare_invoke(r2, r2); // get receiver also for null check 3669 __ verify_oop(r2); 3670 __ null_check(r2); 3671 // do the call 3672 __ profile_call(r0); 3673 __ profile_arguments_type(r0, rmethod, rbcp, false); 3674 __ jump_from_interpreted(rmethod, r0); 3675 } 3676 3677 void TemplateTable::invokestatic(int byte_no) 3678 { 3679 transition(vtos, vtos); 3680 assert(byte_no == f1_byte, "use this argument"); 3681 3682 load_resolved_method_entry_special_or_static(r2, // ResolvedMethodEntry* 3683 rmethod, // Method* 3684 r3); // flags 3685 prepare_invoke(r2, r2); // get receiver also for null check 3686 3687 // do the call 3688 __ profile_call(r0); 3689 __ profile_arguments_type(r0, rmethod, r4, false); 3690 __ jump_from_interpreted(rmethod, r0); 3691 } 3692 3693 void TemplateTable::fast_invokevfinal(int byte_no) 3694 { 3695 __ call_Unimplemented(); 3696 } 3697 3698 void TemplateTable::invokeinterface(int byte_no) { 3699 transition(vtos, vtos); 3700 assert(byte_no == f1_byte, "use this argument"); 3701 3702 load_resolved_method_entry_interface(r2, // ResolvedMethodEntry* 3703 r0, // Klass* 3704 rmethod, // Method* or itable/vtable index 3705 r3); // flags 3706 prepare_invoke(r2, r2); // receiver 3707 3708 // r0: interface klass (from f1) 3709 // rmethod: method (from f2) 3710 // r2: receiver 3711 // r3: flags 3712 3713 // First check for Object case, then private interface method, 3714 // then regular interface method. 3715 3716 // Special case of invokeinterface called for virtual method of 3717 // java.lang.Object. See cpCache.cpp for details. 3718 Label notObjectMethod; 3719 __ tbz(r3, ResolvedMethodEntry::is_forced_virtual_shift, notObjectMethod); 3720 3721 invokevirtual_helper(rmethod, r2, r3); 3722 __ bind(notObjectMethod); 3723 3724 Label no_such_interface; 3725 3726 // Check for private method invocation - indicated by vfinal 3727 Label notVFinal; 3728 __ tbz(r3, ResolvedMethodEntry::is_vfinal_shift, notVFinal); 3729 3730 // Get receiver klass into r3 3731 __ load_klass(r3, r2); 3732 3733 Label subtype; 3734 __ check_klass_subtype(r3, r0, r4, subtype); 3735 // If we get here the typecheck failed 3736 __ b(no_such_interface); 3737 __ bind(subtype); 3738 3739 __ profile_final_call(r0); 3740 __ profile_arguments_type(r0, rmethod, r4, true); 3741 __ jump_from_interpreted(rmethod, r0); 3742 3743 __ bind(notVFinal); 3744 3745 // Get receiver klass into r3 3746 __ restore_locals(); 3747 __ load_klass(r3, r2); 3748 3749 Label no_such_method; 3750 3751 // Preserve method for throw_AbstractMethodErrorVerbose. 3752 __ mov(r16, rmethod); 3753 // Receiver subtype check against REFC. 3754 // Superklass in r0. Subklass in r3. Blows rscratch2, r13 3755 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3756 r3, r0, noreg, 3757 // outputs: scan temp. reg, scan temp. reg 3758 rscratch2, r13, 3759 no_such_interface, 3760 /*return_method=*/false); 3761 3762 // profile this call 3763 __ profile_virtual_call(r3, r13, r19); 3764 3765 // Get declaring interface class from method, and itable index 3766 3767 __ load_method_holder(r0, rmethod); 3768 __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset())); 3769 __ subw(rmethod, rmethod, Method::itable_index_max); 3770 __ negw(rmethod, rmethod); 3771 3772 // Preserve recvKlass for throw_AbstractMethodErrorVerbose. 3773 __ mov(rlocals, r3); 3774 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3775 rlocals, r0, rmethod, 3776 // outputs: method, scan temp. reg 3777 rmethod, r13, 3778 no_such_interface); 3779 3780 // rmethod,: Method to call 3781 // r2: receiver 3782 // Check for abstract method error 3783 // Note: This should be done more efficiently via a throw_abstract_method_error 3784 // interpreter entry point and a conditional jump to it in case of a null 3785 // method. 3786 __ cbz(rmethod, no_such_method); 3787 3788 __ profile_arguments_type(r3, rmethod, r13, true); 3789 3790 // do the call 3791 // r2: receiver 3792 // rmethod,: Method 3793 __ jump_from_interpreted(rmethod, r3); 3794 __ should_not_reach_here(); 3795 3796 // exception handling code follows... 3797 // note: must restore interpreter registers to canonical 3798 // state for exception handling to work correctly! 3799 3800 __ bind(no_such_method); 3801 // throw exception 3802 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) 3803 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 3804 // Pass arguments for generating a verbose error message. 3805 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16); 3806 // the call_VM checks for exception, so we should never return here. 3807 __ should_not_reach_here(); 3808 3809 __ bind(no_such_interface); 3810 // throw exception 3811 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) 3812 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 3813 // Pass arguments for generating a verbose error message. 3814 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 3815 InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0); 3816 // the call_VM checks for exception, so we should never return here. 3817 __ should_not_reach_here(); 3818 return; 3819 } 3820 3821 void TemplateTable::invokehandle(int byte_no) { 3822 transition(vtos, vtos); 3823 assert(byte_no == f1_byte, "use this argument"); 3824 3825 load_resolved_method_entry_handle(r2, // ResolvedMethodEntry* 3826 rmethod, // Method* 3827 r0, // Resolved reference 3828 r3); // flags 3829 prepare_invoke(r2, r2); 3830 3831 __ verify_method_ptr(r2); 3832 __ verify_oop(r2); 3833 __ null_check(r2); 3834 3835 // FIXME: profile the LambdaForm also 3836 3837 // r13 is safe to use here as a scratch reg because it is about to 3838 // be clobbered by jump_from_interpreted(). 3839 __ profile_final_call(r13); 3840 __ profile_arguments_type(r13, rmethod, r4, true); 3841 3842 __ jump_from_interpreted(rmethod, r0); 3843 } 3844 3845 void TemplateTable::invokedynamic(int byte_no) { 3846 transition(vtos, vtos); 3847 assert(byte_no == f1_byte, "use this argument"); 3848 3849 load_invokedynamic_entry(rmethod); 3850 3851 // r0: CallSite object (from cpool->resolved_references[]) 3852 // rmethod: MH.linkToCallSite method 3853 3854 // Note: r0_callsite is already pushed 3855 3856 // %%% should make a type profile for any invokedynamic that takes a ref argument 3857 // profile this call 3858 __ profile_call(rbcp); 3859 __ profile_arguments_type(r3, rmethod, r13, false); 3860 3861 __ verify_oop(r0); 3862 3863 __ jump_from_interpreted(rmethod, r0); 3864 } 3865 3866 3867 //----------------------------------------------------------------------------- 3868 // Allocation 3869 3870 void TemplateTable::_new() { 3871 transition(vtos, atos); 3872 3873 __ get_unsigned_2_byte_index_at_bcp(r3, 1); 3874 Label slow_case; 3875 Label done; 3876 Label initialize_header; 3877 3878 __ get_cpool_and_tags(r4, r0); 3879 // Make sure the class we're about to instantiate has been resolved. 3880 // This is done before loading InstanceKlass to be consistent with the order 3881 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3882 const int tags_offset = Array<u1>::base_offset_in_bytes(); 3883 __ lea(rscratch1, Address(r0, r3, Address::lsl(0))); 3884 __ lea(rscratch1, Address(rscratch1, tags_offset)); 3885 __ ldarb(rscratch1, rscratch1); 3886 __ cmp(rscratch1, (u1)JVM_CONSTANT_Class); 3887 __ br(Assembler::NE, slow_case); 3888 3889 // get InstanceKlass 3890 __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1); 3891 3892 // make sure klass is initialized 3893 assert(VM_Version::supports_fast_class_init_checks(), "Optimization requires support for fast class initialization checks"); 3894 __ clinit_barrier(r4, rscratch1, nullptr /*L_fast_path*/, &slow_case); 3895 3896 __ allocate_instance(r4, r0, r3, r1, true, slow_case); 3897 if (DTraceAllocProbes) { 3898 // Trigger dtrace event for fastpath 3899 __ push(atos); // save the return value 3900 __ call_VM_leaf( 3901 CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), r0); 3902 __ pop(atos); // restore the return value 3903 3904 } 3905 __ b(done); 3906 3907 // slow case 3908 __ bind(slow_case); 3909 __ get_constant_pool(c_rarg1); 3910 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); 3911 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2); 3912 __ verify_oop(r0); 3913 3914 // continue 3915 __ bind(done); 3916 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3917 __ membar(Assembler::StoreStore); 3918 } 3919 3920 void TemplateTable::newarray() { 3921 transition(itos, atos); 3922 __ load_unsigned_byte(c_rarg1, at_bcp(1)); 3923 __ mov(c_rarg2, r0); 3924 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), 3925 c_rarg1, c_rarg2); 3926 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3927 __ membar(Assembler::StoreStore); 3928 } 3929 3930 void TemplateTable::anewarray() { 3931 transition(itos, atos); 3932 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); 3933 __ get_constant_pool(c_rarg1); 3934 __ mov(c_rarg3, r0); 3935 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), 3936 c_rarg1, c_rarg2, c_rarg3); 3937 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3938 __ membar(Assembler::StoreStore); 3939 } 3940 3941 void TemplateTable::arraylength() { 3942 transition(atos, itos); 3943 __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes())); 3944 } 3945 3946 void TemplateTable::checkcast() 3947 { 3948 transition(atos, atos); 3949 Label done, is_null, ok_is_subtype, quicked, resolved; 3950 __ cbz(r0, is_null); 3951 3952 // Get cpool & tags index 3953 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array 3954 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index 3955 // See if bytecode has already been quicked 3956 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes()); 3957 __ lea(r1, Address(rscratch1, r19)); 3958 __ ldarb(r1, r1); 3959 __ cmp(r1, (u1)JVM_CONSTANT_Class); 3960 __ br(Assembler::EQ, quicked); 3961 3962 __ push(atos); // save receiver for result, and for GC 3963 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3964 // vm_result_2 has metadata result 3965 __ get_vm_result_2(r0, rthread); 3966 __ pop(r3); // restore receiver 3967 __ b(resolved); 3968 3969 // Get superklass in r0 and subklass in r3 3970 __ bind(quicked); 3971 __ mov(r3, r0); // Save object in r3; r0 needed for subtype check 3972 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass 3973 3974 __ bind(resolved); 3975 __ load_klass(r19, r3); 3976 3977 // Generate subtype check. Blows r2, r5. Object in r3. 3978 // Superklass in r0. Subklass in r19. 3979 __ gen_subtype_check(r19, ok_is_subtype); 3980 3981 // Come here on failure 3982 __ push(r3); 3983 // object is at TOS 3984 __ b(Interpreter::_throw_ClassCastException_entry); 3985 3986 // Come here on success 3987 __ bind(ok_is_subtype); 3988 __ mov(r0, r3); // Restore object in r3 3989 3990 __ b(done); 3991 __ bind(is_null); 3992 3993 // Collect counts on whether this test sees nulls a lot or not. 3994 if (ProfileInterpreter) { 3995 __ profile_null_seen(r2); 3996 } 3997 3998 __ bind(done); 3999 } 4000 4001 void TemplateTable::instanceof() { 4002 transition(atos, itos); 4003 Label done, is_null, ok_is_subtype, quicked, resolved; 4004 __ cbz(r0, is_null); 4005 4006 // Get cpool & tags index 4007 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array 4008 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index 4009 // See if bytecode has already been quicked 4010 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes()); 4011 __ lea(r1, Address(rscratch1, r19)); 4012 __ ldarb(r1, r1); 4013 __ cmp(r1, (u1)JVM_CONSTANT_Class); 4014 __ br(Assembler::EQ, quicked); 4015 4016 __ push(atos); // save receiver for result, and for GC 4017 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4018 // vm_result_2 has metadata result 4019 __ get_vm_result_2(r0, rthread); 4020 __ pop(r3); // restore receiver 4021 __ verify_oop(r3); 4022 __ load_klass(r3, r3); 4023 __ b(resolved); 4024 4025 // Get superklass in r0 and subklass in r3 4026 __ bind(quicked); 4027 __ load_klass(r3, r0); 4028 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); 4029 4030 __ bind(resolved); 4031 4032 // Generate subtype check. Blows r2, r5 4033 // Superklass in r0. Subklass in r3. 4034 __ gen_subtype_check(r3, ok_is_subtype); 4035 4036 // Come here on failure 4037 __ mov(r0, 0); 4038 __ b(done); 4039 // Come here on success 4040 __ bind(ok_is_subtype); 4041 __ mov(r0, 1); 4042 4043 // Collect counts on whether this test sees nulls a lot or not. 4044 if (ProfileInterpreter) { 4045 __ b(done); 4046 __ bind(is_null); 4047 __ profile_null_seen(r2); 4048 } else { 4049 __ bind(is_null); // same as 'done' 4050 } 4051 __ bind(done); 4052 // r0 = 0: obj == nullptr or obj is not an instanceof the specified klass 4053 // r0 = 1: obj != nullptr and obj is an instanceof the specified klass 4054 } 4055 4056 //----------------------------------------------------------------------------- 4057 // Breakpoints 4058 void TemplateTable::_breakpoint() { 4059 // Note: We get here even if we are single stepping.. 4060 // jbug inists on setting breakpoints at every bytecode 4061 // even if we are in single step mode. 4062 4063 transition(vtos, vtos); 4064 4065 // get the unpatched byte code 4066 __ get_method(c_rarg1); 4067 __ call_VM(noreg, 4068 CAST_FROM_FN_PTR(address, 4069 InterpreterRuntime::get_original_bytecode_at), 4070 c_rarg1, rbcp); 4071 __ mov(r19, r0); 4072 4073 // post the breakpoint event 4074 __ call_VM(noreg, 4075 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), 4076 rmethod, rbcp); 4077 4078 // complete the execution of original bytecode 4079 __ mov(rscratch1, r19); 4080 __ dispatch_only_normal(vtos); 4081 } 4082 4083 //----------------------------------------------------------------------------- 4084 // Exceptions 4085 4086 void TemplateTable::athrow() { 4087 transition(atos, vtos); 4088 __ null_check(r0); 4089 __ b(Interpreter::throw_exception_entry()); 4090 } 4091 4092 //----------------------------------------------------------------------------- 4093 // Synchronization 4094 // 4095 // Note: monitorenter & exit are symmetric routines; which is reflected 4096 // in the assembly code structure as well 4097 // 4098 // Stack layout: 4099 // 4100 // [expressions ] <--- esp = expression stack top 4101 // .. 4102 // [expressions ] 4103 // [monitor entry] <--- monitor block top = expression stack bot 4104 // .. 4105 // [monitor entry] 4106 // [frame data ] <--- monitor block bot 4107 // ... 4108 // [saved rfp ] <--- rfp 4109 void TemplateTable::monitorenter() 4110 { 4111 transition(atos, vtos); 4112 4113 // check for null object 4114 __ null_check(r0); 4115 4116 Label is_inline_type; 4117 __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); 4118 __ test_markword_is_inline_type(rscratch1, is_inline_type); 4119 4120 const Address monitor_block_top( 4121 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 4122 const Address monitor_block_bot( 4123 rfp, frame::interpreter_frame_initial_sp_offset * wordSize); 4124 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 4125 4126 Label allocated; 4127 4128 // initialize entry pointer 4129 __ mov(c_rarg1, zr); // points to free slot or null 4130 4131 // find a free slot in the monitor block (result in c_rarg1) 4132 { 4133 Label entry, loop, exit; 4134 __ ldr(c_rarg3, monitor_block_top); // derelativize pointer 4135 __ lea(c_rarg3, Address(rfp, c_rarg3, Address::lsl(Interpreter::logStackElementSize))); 4136 // c_rarg3 points to current entry, starting with top-most entry 4137 4138 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom 4139 4140 __ b(entry); 4141 4142 __ bind(loop); 4143 // check if current entry is used 4144 // if not used then remember entry in c_rarg1 4145 __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset())); 4146 __ cmp(zr, rscratch1); 4147 __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ); 4148 // check if current entry is for same object 4149 __ cmp(r0, rscratch1); 4150 // if same object then stop searching 4151 __ br(Assembler::EQ, exit); 4152 // otherwise advance to next entry 4153 __ add(c_rarg3, c_rarg3, entry_size); 4154 __ bind(entry); 4155 // check if bottom reached 4156 __ cmp(c_rarg3, c_rarg2); 4157 // if not at bottom then check this entry 4158 __ br(Assembler::NE, loop); 4159 __ bind(exit); 4160 } 4161 4162 __ cbnz(c_rarg1, allocated); // check if a slot has been found and 4163 // if found, continue with that on 4164 4165 // allocate one if there's no free slot 4166 { 4167 Label entry, loop; 4168 // 1. compute new pointers // rsp: old expression stack top 4169 4170 __ check_extended_sp(); 4171 __ sub(sp, sp, entry_size); // make room for the monitor 4172 __ sub(rscratch1, sp, rfp); 4173 __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize); 4174 __ str(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize)); 4175 4176 __ ldr(c_rarg1, monitor_block_bot); // derelativize pointer 4177 __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize))); 4178 // c_rarg1 points to the old expression stack bottom 4179 4180 __ sub(esp, esp, entry_size); // move expression stack top 4181 __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom 4182 __ mov(c_rarg3, esp); // set start value for copy loop 4183 __ sub(rscratch1, c_rarg1, rfp); // relativize pointer 4184 __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize); 4185 __ str(rscratch1, monitor_block_bot); // set new monitor block bottom 4186 4187 __ b(entry); 4188 // 2. move expression stack contents 4189 __ bind(loop); 4190 __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack 4191 // word from old location 4192 __ str(c_rarg2, Address(c_rarg3, 0)); // and store it at new location 4193 __ add(c_rarg3, c_rarg3, wordSize); // advance to next word 4194 __ bind(entry); 4195 __ cmp(c_rarg3, c_rarg1); // check if bottom reached 4196 __ br(Assembler::NE, loop); // if not at bottom then 4197 // copy next word 4198 } 4199 4200 // call run-time routine 4201 // c_rarg1: points to monitor entry 4202 __ bind(allocated); 4203 4204 // Increment bcp to point to the next bytecode, so exception 4205 // handling for async. exceptions work correctly. 4206 // The object has already been popped from the stack, so the 4207 // expression stack looks correct. 4208 __ increment(rbcp); 4209 4210 // store object 4211 __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset())); 4212 __ lock_object(c_rarg1); 4213 4214 // check to make sure this monitor doesn't cause stack overflow after locking 4215 __ save_bcp(); // in case of exception 4216 __ generate_stack_overflow_check(0); 4217 4218 // The bcp has already been incremented. Just need to dispatch to 4219 // next instruction. 4220 __ dispatch_next(vtos); 4221 4222 __ bind(is_inline_type); 4223 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 4224 InterpreterRuntime::throw_identity_exception), r0); 4225 __ should_not_reach_here(); 4226 } 4227 4228 4229 void TemplateTable::monitorexit() 4230 { 4231 transition(atos, vtos); 4232 4233 // check for null object 4234 __ null_check(r0); 4235 4236 const int is_inline_type_mask = markWord::inline_type_pattern; 4237 Label has_identity; 4238 __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); 4239 __ mov(rscratch2, is_inline_type_mask); 4240 __ andr(rscratch1, rscratch1, rscratch2); 4241 __ cmp(rscratch1, rscratch2); 4242 __ br(Assembler::NE, has_identity); 4243 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 4244 InterpreterRuntime::throw_illegal_monitor_state_exception)); 4245 __ should_not_reach_here(); 4246 __ bind(has_identity); 4247 4248 const Address monitor_block_top( 4249 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 4250 const Address monitor_block_bot( 4251 rfp, frame::interpreter_frame_initial_sp_offset * wordSize); 4252 const int entry_size = frame::interpreter_frame_monitor_size_in_bytes(); 4253 4254 Label found; 4255 4256 // find matching slot 4257 { 4258 Label entry, loop; 4259 __ ldr(c_rarg1, monitor_block_top); // derelativize pointer 4260 __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize))); 4261 // c_rarg1 points to current entry, starting with top-most entry 4262 4263 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom 4264 // of monitor block 4265 __ b(entry); 4266 4267 __ bind(loop); 4268 // check if current entry is for same object 4269 __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset())); 4270 __ cmp(r0, rscratch1); 4271 // if same object then stop searching 4272 __ br(Assembler::EQ, found); 4273 // otherwise advance to next entry 4274 __ add(c_rarg1, c_rarg1, entry_size); 4275 __ bind(entry); 4276 // check if bottom reached 4277 __ cmp(c_rarg1, c_rarg2); 4278 // if not at bottom then check this entry 4279 __ br(Assembler::NE, loop); 4280 } 4281 4282 // error handling. Unlocking was not block-structured 4283 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 4284 InterpreterRuntime::throw_illegal_monitor_state_exception)); 4285 __ should_not_reach_here(); 4286 4287 // call run-time routine 4288 __ bind(found); 4289 __ push_ptr(r0); // make sure object is on stack (contract with oopMaps) 4290 __ unlock_object(c_rarg1); 4291 __ pop_ptr(r0); // discard object 4292 } 4293 4294 4295 // Wide instructions 4296 void TemplateTable::wide() 4297 { 4298 __ load_unsigned_byte(r19, at_bcp(1)); 4299 __ mov(rscratch1, (address)Interpreter::_wentry_point); 4300 __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3))); 4301 __ br(rscratch1); 4302 } 4303 4304 4305 // Multi arrays 4306 void TemplateTable::multianewarray() { 4307 transition(vtos, atos); 4308 __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions 4309 // last dim is on top of stack; we want address of first one: 4310 // first_addr = last_addr + (ndims - 1) * wordSize 4311 __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3))); 4312 __ sub(c_rarg1, c_rarg1, wordSize); 4313 call_VM(r0, 4314 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), 4315 c_rarg1); 4316 __ load_unsigned_byte(r1, at_bcp(3)); 4317 __ lea(esp, Address(esp, r1, Address::uxtw(3))); 4318 }