1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/disassembler.hpp"
  29 #include "compiler/compilerDefinitions.inline.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "gc/shared/collectedHeap.hpp"
  32 #include "gc/shared/tlab_globals.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "interpreter/interpreterRuntime.hpp"
  35 #include "interpreter/interp_masm.hpp"
  36 #include "interpreter/templateTable.hpp"
  37 #include "memory/universe.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/method.inline.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "oops/resolvedFieldEntry.hpp"
  43 #include "oops/resolvedIndyEntry.hpp"
  44 #include "oops/resolvedMethodEntry.hpp"
  45 #include "prims/jvmtiExport.hpp"
  46 #include "prims/methodHandles.hpp"
  47 #include "runtime/frame.inline.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/synchronizer.hpp"
  51 #include "utilities/powerOfTwo.hpp"
  52 
  53 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  54 
  55 // Address computation: local variables
  56 
  57 static inline Address iaddress(int n) {
  58   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  59 }
  60 
  61 static inline Address laddress(int n) {
  62   return iaddress(n + 1);
  63 }
  64 
  65 static inline Address faddress(int n) {
  66   return iaddress(n);
  67 }
  68 
  69 static inline Address daddress(int n) {
  70   return laddress(n);
  71 }
  72 
  73 static inline Address aaddress(int n) {
  74   return iaddress(n);
  75 }
  76 
  77 static inline Address iaddress(Register r) {
  78   return Address(rlocals, r, Address::lsl(3));
  79 }
  80 
  81 static inline Address laddress(Register r, Register scratch,
  82                                InterpreterMacroAssembler* _masm) {
  83   __ lea(scratch, Address(rlocals, r, Address::lsl(3)));
  84   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  85 }
  86 
  87 static inline Address faddress(Register r) {
  88   return iaddress(r);
  89 }
  90 
  91 static inline Address daddress(Register r, Register scratch,
  92                                InterpreterMacroAssembler* _masm) {
  93   return laddress(r, scratch, _masm);
  94 }
  95 
  96 static inline Address aaddress(Register r) {
  97   return iaddress(r);
  98 }
  99 
 100 static inline Address at_rsp() {
 101   return Address(esp, 0);
 102 }
 103 
 104 // At top of Java expression stack which may be different than esp().  It
 105 // isn't for category 1 objects.
 106 static inline Address at_tos   () {
 107   return Address(esp,  Interpreter::expr_offset_in_bytes(0));
 108 }
 109 
 110 static inline Address at_tos_p1() {
 111   return Address(esp,  Interpreter::expr_offset_in_bytes(1));
 112 }
 113 
 114 static inline Address at_tos_p2() {
 115   return Address(esp,  Interpreter::expr_offset_in_bytes(2));
 116 }
 117 
 118 static inline Address at_tos_p3() {
 119   return Address(esp,  Interpreter::expr_offset_in_bytes(3));
 120 }
 121 
 122 static inline Address at_tos_p4() {
 123   return Address(esp,  Interpreter::expr_offset_in_bytes(4));
 124 }
 125 
 126 static inline Address at_tos_p5() {
 127   return Address(esp,  Interpreter::expr_offset_in_bytes(5));
 128 }
 129 
 130 // Condition conversion
 131 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 132   switch (cc) {
 133   case TemplateTable::equal        : return Assembler::NE;
 134   case TemplateTable::not_equal    : return Assembler::EQ;
 135   case TemplateTable::less         : return Assembler::GE;
 136   case TemplateTable::less_equal   : return Assembler::GT;
 137   case TemplateTable::greater      : return Assembler::LE;
 138   case TemplateTable::greater_equal: return Assembler::LT;
 139   }
 140   ShouldNotReachHere();
 141   return Assembler::EQ;
 142 }
 143 
 144 
 145 // Miscellaneous helper routines
 146 // Store an oop (or null) at the Address described by obj.
 147 // If val == noreg this means store a null
 148 static void do_oop_store(InterpreterMacroAssembler* _masm,
 149                          Address dst,
 150                          Register val,
 151                          DecoratorSet decorators) {
 152   assert(val == noreg || val == r0, "parameter is just for looks");
 153   __ store_heap_oop(dst, val, r10, r11, r3, decorators);
 154 }
 155 
 156 static void do_oop_load(InterpreterMacroAssembler* _masm,
 157                         Address src,
 158                         Register dst,
 159                         DecoratorSet decorators) {
 160   __ load_heap_oop(dst, src, r10, r11, decorators);
 161 }
 162 
 163 Address TemplateTable::at_bcp(int offset) {
 164   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 165   return Address(rbcp, offset);
 166 }
 167 
 168 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 169                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 170                                    int byte_no)
 171 {
 172   if (!RewriteBytecodes)  return;
 173   Label L_patch_done;
 174 
 175   switch (bc) {
 176   case Bytecodes::_fast_aputfield:
 177   case Bytecodes::_fast_bputfield:
 178   case Bytecodes::_fast_zputfield:
 179   case Bytecodes::_fast_cputfield:
 180   case Bytecodes::_fast_dputfield:
 181   case Bytecodes::_fast_fputfield:
 182   case Bytecodes::_fast_iputfield:
 183   case Bytecodes::_fast_lputfield:
 184   case Bytecodes::_fast_sputfield:
 185     {
 186       // We skip bytecode quickening for putfield instructions when
 187       // the put_code written to the constant pool cache is zero.
 188       // This is required so that every execution of this instruction
 189       // calls out to InterpreterRuntime::resolve_get_put to do
 190       // additional, required work.
 191       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 192       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 193       __ load_field_entry(temp_reg, bc_reg);
 194       if (byte_no == f1_byte) {
 195         __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset())));
 196       } else {
 197         __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset())));
 198       }
 199       // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in()
 200       __ ldarb(temp_reg, temp_reg);
 201       __ movw(bc_reg, bc);
 202       __ cbzw(temp_reg, L_patch_done);  // don't patch
 203     }
 204     break;
 205   default:
 206     assert(byte_no == -1, "sanity");
 207     // the pair bytecodes have already done the load.
 208     if (load_bc_into_bc_reg) {
 209       __ movw(bc_reg, bc);
 210     }
 211   }
 212 
 213   if (JvmtiExport::can_post_breakpoint()) {
 214     Label L_fast_patch;
 215     // if a breakpoint is present we can't rewrite the stream directly
 216     __ load_unsigned_byte(temp_reg, at_bcp(0));
 217     __ cmpw(temp_reg, Bytecodes::_breakpoint);
 218     __ br(Assembler::NE, L_fast_patch);
 219     // Let breakpoint table handling rewrite to quicker bytecode
 220     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
 221     __ b(L_patch_done);
 222     __ bind(L_fast_patch);
 223   }
 224 
 225 #ifdef ASSERT
 226   Label L_okay;
 227   __ load_unsigned_byte(temp_reg, at_bcp(0));
 228   __ cmpw(temp_reg, (int) Bytecodes::java_code(bc));
 229   __ br(Assembler::EQ, L_okay);
 230   __ cmpw(temp_reg, bc_reg);
 231   __ br(Assembler::EQ, L_okay);
 232   __ stop("patching the wrong bytecode");
 233   __ bind(L_okay);
 234 #endif
 235 
 236   // patch bytecode
 237   __ strb(bc_reg, at_bcp(0));
 238   __ bind(L_patch_done);
 239 }
 240 
 241 
 242 // Individual instructions
 243 
 244 void TemplateTable::nop() {
 245   transition(vtos, vtos);
 246   // nothing to do
 247 }
 248 
 249 void TemplateTable::shouldnotreachhere() {
 250   transition(vtos, vtos);
 251   __ stop("shouldnotreachhere bytecode");
 252 }
 253 
 254 void TemplateTable::aconst_null()
 255 {
 256   transition(vtos, atos);
 257   __ mov(r0, 0);
 258 }
 259 
 260 void TemplateTable::iconst(int value)
 261 {
 262   transition(vtos, itos);
 263   __ mov(r0, value);
 264 }
 265 
 266 void TemplateTable::lconst(int value)
 267 {
 268   __ mov(r0, value);
 269 }
 270 
 271 void TemplateTable::fconst(int value)
 272 {
 273   transition(vtos, ftos);
 274   switch (value) {
 275   case 0:
 276     __ fmovs(v0, 0.0);
 277     break;
 278   case 1:
 279     __ fmovs(v0, 1.0);
 280     break;
 281   case 2:
 282     __ fmovs(v0, 2.0);
 283     break;
 284   default:
 285     ShouldNotReachHere();
 286     break;
 287   }
 288 }
 289 
 290 void TemplateTable::dconst(int value)
 291 {
 292   transition(vtos, dtos);
 293   switch (value) {
 294   case 0:
 295     __ fmovd(v0, 0.0);
 296     break;
 297   case 1:
 298     __ fmovd(v0, 1.0);
 299     break;
 300   case 2:
 301     __ fmovd(v0, 2.0);
 302     break;
 303   default:
 304     ShouldNotReachHere();
 305     break;
 306   }
 307 }
 308 
 309 void TemplateTable::bipush()
 310 {
 311   transition(vtos, itos);
 312   __ load_signed_byte32(r0, at_bcp(1));
 313 }
 314 
 315 void TemplateTable::sipush()
 316 {
 317   transition(vtos, itos);
 318   __ load_unsigned_short(r0, at_bcp(1));
 319   __ revw(r0, r0);
 320   __ asrw(r0, r0, 16);
 321 }
 322 
 323 void TemplateTable::ldc(LdcType type)
 324 {
 325   transition(vtos, vtos);
 326   Label call_ldc, notFloat, notClass, notInt, Done;
 327 
 328   if (is_ldc_wide(type)) {
 329     __ get_unsigned_2_byte_index_at_bcp(r1, 1);
 330   } else {
 331     __ load_unsigned_byte(r1, at_bcp(1));
 332   }
 333   __ get_cpool_and_tags(r2, r0);
 334 
 335   const int base_offset = ConstantPool::header_size() * wordSize;
 336   const int tags_offset = Array<u1>::base_offset_in_bytes();
 337 
 338   // get type
 339   __ add(r3, r1, tags_offset);
 340   __ lea(r3, Address(r0, r3));
 341   __ ldarb(r3, r3);
 342 
 343   // unresolved class - get the resolved class
 344   __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass);
 345   __ br(Assembler::EQ, call_ldc);
 346 
 347   // unresolved class in error state - call into runtime to throw the error
 348   // from the first resolution attempt
 349   __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError);
 350   __ br(Assembler::EQ, call_ldc);
 351 
 352   // resolved class - need to call vm to get java mirror of the class
 353   __ cmp(r3, (u1)JVM_CONSTANT_Class);
 354   __ br(Assembler::NE, notClass);
 355 
 356   __ bind(call_ldc);
 357   __ mov(c_rarg1, is_ldc_wide(type) ? 1 : 0);
 358   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
 359   __ push_ptr(r0);
 360   __ verify_oop(r0);
 361   __ b(Done);
 362 
 363   __ bind(notClass);
 364   __ cmp(r3, (u1)JVM_CONSTANT_Float);
 365   __ br(Assembler::NE, notFloat);
 366   // ftos
 367   __ adds(r1, r2, r1, Assembler::LSL, 3);
 368   __ ldrs(v0, Address(r1, base_offset));
 369   __ push_f();
 370   __ b(Done);
 371 
 372   __ bind(notFloat);
 373 
 374   __ cmp(r3, (u1)JVM_CONSTANT_Integer);
 375   __ br(Assembler::NE, notInt);
 376 
 377   // itos
 378   __ adds(r1, r2, r1, Assembler::LSL, 3);
 379   __ ldrw(r0, Address(r1, base_offset));
 380   __ push_i(r0);
 381   __ b(Done);
 382 
 383   __ bind(notInt);
 384   condy_helper(Done);
 385 
 386   __ bind(Done);
 387 }
 388 
 389 // Fast path for caching oop constants.
 390 void TemplateTable::fast_aldc(LdcType type)
 391 {
 392   transition(vtos, atos);
 393 
 394   Register result = r0;
 395   Register tmp = r1;
 396   Register rarg = r2;
 397 
 398   int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1);
 399 
 400   Label resolved;
 401 
 402   // We are resolved if the resolved reference cache entry contains a
 403   // non-null object (String, MethodType, etc.)
 404   assert_different_registers(result, tmp);
 405   __ get_cache_index_at_bcp(tmp, 1, index_size);
 406   __ load_resolved_reference_at_index(result, tmp);
 407   __ cbnz(result, resolved);
 408 
 409   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 410 
 411   // first time invocation - must resolve first
 412   __ mov(rarg, (int)bytecode());
 413   __ call_VM(result, entry, rarg);
 414 
 415   __ bind(resolved);
 416 
 417   { // Check for the null sentinel.
 418     // If we just called the VM, it already did the mapping for us,
 419     // but it's harmless to retry.
 420     Label notNull;
 421 
 422     // Stash null_sentinel address to get its value later
 423     __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
 424     __ ldr(tmp, Address(rarg));
 425     __ resolve_oop_handle(tmp, r5, rscratch2);
 426     __ cmpoop(result, tmp);
 427     __ br(Assembler::NE, notNull);
 428     __ mov(result, 0);  // null object reference
 429     __ bind(notNull);
 430   }
 431 
 432   if (VerifyOops) {
 433     // Safe to call with 0 result
 434     __ verify_oop(result);
 435   }
 436 }
 437 
 438 void TemplateTable::ldc2_w()
 439 {
 440   transition(vtos, vtos);
 441   Label notDouble, notLong, Done;
 442   __ get_unsigned_2_byte_index_at_bcp(r0, 1);
 443 
 444   __ get_cpool_and_tags(r1, r2);
 445   const int base_offset = ConstantPool::header_size() * wordSize;
 446   const int tags_offset = Array<u1>::base_offset_in_bytes();
 447 
 448   // get type
 449   __ lea(r2, Address(r2, r0, Address::lsl(0)));
 450   __ load_unsigned_byte(r2, Address(r2, tags_offset));
 451   __ cmpw(r2, (int)JVM_CONSTANT_Double);
 452   __ br(Assembler::NE, notDouble);
 453 
 454   // dtos
 455   __ lea (r2, Address(r1, r0, Address::lsl(3)));
 456   __ ldrd(v0, Address(r2, base_offset));
 457   __ push_d();
 458   __ b(Done);
 459 
 460   __ bind(notDouble);
 461   __ cmpw(r2, (int)JVM_CONSTANT_Long);
 462   __ br(Assembler::NE, notLong);
 463 
 464   // ltos
 465   __ lea(r0, Address(r1, r0, Address::lsl(3)));
 466   __ ldr(r0, Address(r0, base_offset));
 467   __ push_l();
 468   __ b(Done);
 469 
 470   __ bind(notLong);
 471   condy_helper(Done);
 472 
 473   __ bind(Done);
 474 }
 475 
 476 void TemplateTable::condy_helper(Label& Done)
 477 {
 478   Register obj = r0;
 479   Register rarg = r1;
 480   Register flags = r2;
 481   Register off = r3;
 482 
 483   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 484 
 485   __ mov(rarg, (int) bytecode());
 486   __ call_VM(obj, entry, rarg);
 487 
 488   __ get_vm_result_2(flags, rthread);
 489 
 490   // VMr = obj = base address to find primitive value to push
 491   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 492   __ mov(off, flags);
 493   __ andw(off, off, ConstantPoolCache::field_index_mask);
 494 
 495   const Address field(obj, off);
 496 
 497   // What sort of thing are we loading?
 498   // x86 uses a shift and mask or wings it with a shift plus assert
 499   // the mask is not needed. aarch64 just uses bitfield extract
 500   __ ubfxw(flags, flags, ConstantPoolCache::tos_state_shift,
 501            ConstantPoolCache::tos_state_bits);
 502 
 503   switch (bytecode()) {
 504     case Bytecodes::_ldc:
 505     case Bytecodes::_ldc_w:
 506       {
 507         // tos in (itos, ftos, stos, btos, ctos, ztos)
 508         Label notInt, notFloat, notShort, notByte, notChar, notBool;
 509         __ cmpw(flags, itos);
 510         __ br(Assembler::NE, notInt);
 511         // itos
 512         __ ldrw(r0, field);
 513         __ push(itos);
 514         __ b(Done);
 515 
 516         __ bind(notInt);
 517         __ cmpw(flags, ftos);
 518         __ br(Assembler::NE, notFloat);
 519         // ftos
 520         __ load_float(field);
 521         __ push(ftos);
 522         __ b(Done);
 523 
 524         __ bind(notFloat);
 525         __ cmpw(flags, stos);
 526         __ br(Assembler::NE, notShort);
 527         // stos
 528         __ load_signed_short(r0, field);
 529         __ push(stos);
 530         __ b(Done);
 531 
 532         __ bind(notShort);
 533         __ cmpw(flags, btos);
 534         __ br(Assembler::NE, notByte);
 535         // btos
 536         __ load_signed_byte(r0, field);
 537         __ push(btos);
 538         __ b(Done);
 539 
 540         __ bind(notByte);
 541         __ cmpw(flags, ctos);
 542         __ br(Assembler::NE, notChar);
 543         // ctos
 544         __ load_unsigned_short(r0, field);
 545         __ push(ctos);
 546         __ b(Done);
 547 
 548         __ bind(notChar);
 549         __ cmpw(flags, ztos);
 550         __ br(Assembler::NE, notBool);
 551         // ztos
 552         __ load_signed_byte(r0, field);
 553         __ push(ztos);
 554         __ b(Done);
 555 
 556         __ bind(notBool);
 557         break;
 558       }
 559 
 560     case Bytecodes::_ldc2_w:
 561       {
 562         Label notLong, notDouble;
 563         __ cmpw(flags, ltos);
 564         __ br(Assembler::NE, notLong);
 565         // ltos
 566         __ ldr(r0, field);
 567         __ push(ltos);
 568         __ b(Done);
 569 
 570         __ bind(notLong);
 571         __ cmpw(flags, dtos);
 572         __ br(Assembler::NE, notDouble);
 573         // dtos
 574         __ load_double(field);
 575         __ push(dtos);
 576         __ b(Done);
 577 
 578        __ bind(notDouble);
 579         break;
 580       }
 581 
 582     default:
 583       ShouldNotReachHere();
 584     }
 585 
 586     __ stop("bad ldc/condy");
 587 }
 588 
 589 void TemplateTable::locals_index(Register reg, int offset)
 590 {
 591   __ ldrb(reg, at_bcp(offset));
 592   __ neg(reg, reg);
 593 }
 594 
 595 void TemplateTable::iload() {
 596   iload_internal();
 597 }
 598 
 599 void TemplateTable::nofast_iload() {
 600   iload_internal(may_not_rewrite);
 601 }
 602 
 603 void TemplateTable::iload_internal(RewriteControl rc) {
 604   transition(vtos, itos);
 605   if (RewriteFrequentPairs && rc == may_rewrite) {
 606     Label rewrite, done;
 607     Register bc = r4;
 608 
 609     // get next bytecode
 610     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 611 
 612     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 613     // last two iloads in a pair.  Comparing against fast_iload means that
 614     // the next bytecode is neither an iload or a caload, and therefore
 615     // an iload pair.
 616     __ cmpw(r1, Bytecodes::_iload);
 617     __ br(Assembler::EQ, done);
 618 
 619     // if _fast_iload rewrite to _fast_iload2
 620     __ cmpw(r1, Bytecodes::_fast_iload);
 621     __ movw(bc, Bytecodes::_fast_iload2);
 622     __ br(Assembler::EQ, rewrite);
 623 
 624     // if _caload rewrite to _fast_icaload
 625     __ cmpw(r1, Bytecodes::_caload);
 626     __ movw(bc, Bytecodes::_fast_icaload);
 627     __ br(Assembler::EQ, rewrite);
 628 
 629     // else rewrite to _fast_iload
 630     __ movw(bc, Bytecodes::_fast_iload);
 631 
 632     // rewrite
 633     // bc: new bytecode
 634     __ bind(rewrite);
 635     patch_bytecode(Bytecodes::_iload, bc, r1, false);
 636     __ bind(done);
 637 
 638   }
 639 
 640   // do iload, get the local value into tos
 641   locals_index(r1);
 642   __ ldr(r0, iaddress(r1));
 643 
 644 }
 645 
 646 void TemplateTable::fast_iload2()
 647 {
 648   transition(vtos, itos);
 649   locals_index(r1);
 650   __ ldr(r0, iaddress(r1));
 651   __ push(itos);
 652   locals_index(r1, 3);
 653   __ ldr(r0, iaddress(r1));
 654 }
 655 
 656 void TemplateTable::fast_iload()
 657 {
 658   transition(vtos, itos);
 659   locals_index(r1);
 660   __ ldr(r0, iaddress(r1));
 661 }
 662 
 663 void TemplateTable::lload()
 664 {
 665   transition(vtos, ltos);
 666   __ ldrb(r1, at_bcp(1));
 667   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 668   __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 669 }
 670 
 671 void TemplateTable::fload()
 672 {
 673   transition(vtos, ftos);
 674   locals_index(r1);
 675   // n.b. we use ldrd here because this is a 64 bit slot
 676   // this is comparable to the iload case
 677   __ ldrd(v0, faddress(r1));
 678 }
 679 
 680 void TemplateTable::dload()
 681 {
 682   transition(vtos, dtos);
 683   __ ldrb(r1, at_bcp(1));
 684   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 685   __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 686 }
 687 
 688 void TemplateTable::aload()
 689 {
 690   transition(vtos, atos);
 691   locals_index(r1);
 692   __ ldr(r0, iaddress(r1));
 693 }
 694 
 695 void TemplateTable::locals_index_wide(Register reg) {
 696   __ ldrh(reg, at_bcp(2));
 697   __ rev16w(reg, reg);
 698   __ neg(reg, reg);
 699 }
 700 
 701 void TemplateTable::wide_iload() {
 702   transition(vtos, itos);
 703   locals_index_wide(r1);
 704   __ ldr(r0, iaddress(r1));
 705 }
 706 
 707 void TemplateTable::wide_lload()
 708 {
 709   transition(vtos, ltos);
 710   __ ldrh(r1, at_bcp(2));
 711   __ rev16w(r1, r1);
 712   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 713   __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 714 }
 715 
 716 void TemplateTable::wide_fload()
 717 {
 718   transition(vtos, ftos);
 719   locals_index_wide(r1);
 720   // n.b. we use ldrd here because this is a 64 bit slot
 721   // this is comparable to the iload case
 722   __ ldrd(v0, faddress(r1));
 723 }
 724 
 725 void TemplateTable::wide_dload()
 726 {
 727   transition(vtos, dtos);
 728   __ ldrh(r1, at_bcp(2));
 729   __ rev16w(r1, r1);
 730   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 731   __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 732 }
 733 
 734 void TemplateTable::wide_aload()
 735 {
 736   transition(vtos, atos);
 737   locals_index_wide(r1);
 738   __ ldr(r0, aaddress(r1));
 739 }
 740 
 741 void TemplateTable::index_check(Register array, Register index)
 742 {
 743   // destroys r1, rscratch1
 744   // sign extend index for use by indexed load
 745   // __ movl2ptr(index, index);
 746   // check index
 747   Register length = rscratch1;
 748   __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
 749   __ cmpw(index, length);
 750   if (index != r1) {
 751     // ??? convention: move aberrant index into r1 for exception message
 752     assert(r1 != array, "different registers");
 753     __ mov(r1, index);
 754   }
 755   Label ok;
 756   __ br(Assembler::LO, ok);
 757     // ??? convention: move array into r3 for exception message
 758   __ mov(r3, array);
 759   __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
 760   __ br(rscratch1);
 761   __ bind(ok);
 762 }
 763 
 764 void TemplateTable::iaload()
 765 {
 766   transition(itos, itos);
 767   __ mov(r1, r0);
 768   __ pop_ptr(r0);
 769   // r0: array
 770   // r1: index
 771   index_check(r0, r1); // leaves index in r1, kills rscratch1
 772   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
 773   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 774 }
 775 
 776 void TemplateTable::laload()
 777 {
 778   transition(itos, ltos);
 779   __ mov(r1, r0);
 780   __ pop_ptr(r0);
 781   // r0: array
 782   // r1: index
 783   index_check(r0, r1); // leaves index in r1, kills rscratch1
 784   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
 785   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 786 }
 787 
 788 void TemplateTable::faload()
 789 {
 790   transition(itos, ftos);
 791   __ mov(r1, r0);
 792   __ pop_ptr(r0);
 793   // r0: array
 794   // r1: index
 795   index_check(r0, r1); // leaves index in r1, kills rscratch1
 796   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
 797   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 798 }
 799 
 800 void TemplateTable::daload()
 801 {
 802   transition(itos, dtos);
 803   __ mov(r1, r0);
 804   __ pop_ptr(r0);
 805   // r0: array
 806   // r1: index
 807   index_check(r0, r1); // leaves index in r1, kills rscratch1
 808   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
 809   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 810 }
 811 
 812 void TemplateTable::aaload()
 813 {
 814   transition(itos, atos);
 815   __ mov(r1, r0);
 816   __ pop_ptr(r0);
 817   // r0: array
 818   // r1: index
 819   index_check(r0, r1); // leaves index in r1, kills rscratch1
 820   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 821   do_oop_load(_masm,
 822               Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
 823               r0,
 824               IS_ARRAY);
 825 }
 826 
 827 void TemplateTable::baload()
 828 {
 829   transition(itos, itos);
 830   __ mov(r1, r0);
 831   __ pop_ptr(r0);
 832   // r0: array
 833   // r1: index
 834   index_check(r0, r1); // leaves index in r1, kills rscratch1
 835   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
 836   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
 837 }
 838 
 839 void TemplateTable::caload()
 840 {
 841   transition(itos, itos);
 842   __ mov(r1, r0);
 843   __ pop_ptr(r0);
 844   // r0: array
 845   // r1: index
 846   index_check(r0, r1); // leaves index in r1, kills rscratch1
 847   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
 848   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 849 }
 850 
 851 // iload followed by caload frequent pair
 852 void TemplateTable::fast_icaload()
 853 {
 854   transition(vtos, itos);
 855   // load index out of locals
 856   locals_index(r2);
 857   __ ldr(r1, iaddress(r2));
 858 
 859   __ pop_ptr(r0);
 860 
 861   // r0: array
 862   // r1: index
 863   index_check(r0, r1); // leaves index in r1, kills rscratch1
 864   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
 865   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 866 }
 867 
 868 void TemplateTable::saload()
 869 {
 870   transition(itos, itos);
 871   __ mov(r1, r0);
 872   __ pop_ptr(r0);
 873   // r0: array
 874   // r1: index
 875   index_check(r0, r1); // leaves index in r1, kills rscratch1
 876   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
 877   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 878 }
 879 
 880 void TemplateTable::iload(int n)
 881 {
 882   transition(vtos, itos);
 883   __ ldr(r0, iaddress(n));
 884 }
 885 
 886 void TemplateTable::lload(int n)
 887 {
 888   transition(vtos, ltos);
 889   __ ldr(r0, laddress(n));
 890 }
 891 
 892 void TemplateTable::fload(int n)
 893 {
 894   transition(vtos, ftos);
 895   __ ldrs(v0, faddress(n));
 896 }
 897 
 898 void TemplateTable::dload(int n)
 899 {
 900   transition(vtos, dtos);
 901   __ ldrd(v0, daddress(n));
 902 }
 903 
 904 void TemplateTable::aload(int n)
 905 {
 906   transition(vtos, atos);
 907   __ ldr(r0, iaddress(n));
 908 }
 909 
 910 void TemplateTable::aload_0() {
 911   aload_0_internal();
 912 }
 913 
 914 void TemplateTable::nofast_aload_0() {
 915   aload_0_internal(may_not_rewrite);
 916 }
 917 
 918 void TemplateTable::aload_0_internal(RewriteControl rc) {
 919   // According to bytecode histograms, the pairs:
 920   //
 921   // _aload_0, _fast_igetfield
 922   // _aload_0, _fast_agetfield
 923   // _aload_0, _fast_fgetfield
 924   //
 925   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 926   // _aload_0 bytecode checks if the next bytecode is either
 927   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 928   // rewrites the current bytecode into a pair bytecode; otherwise it
 929   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 930   // the pair check anymore.
 931   //
 932   // Note: If the next bytecode is _getfield, the rewrite must be
 933   //       delayed, otherwise we may miss an opportunity for a pair.
 934   //
 935   // Also rewrite frequent pairs
 936   //   aload_0, aload_1
 937   //   aload_0, iload_1
 938   // These bytecodes with a small amount of code are most profitable
 939   // to rewrite
 940   if (RewriteFrequentPairs && rc == may_rewrite) {
 941     Label rewrite, done;
 942     const Register bc = r4;
 943 
 944     // get next bytecode
 945     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 946 
 947     // if _getfield then wait with rewrite
 948     __ cmpw(r1, Bytecodes::Bytecodes::_getfield);
 949     __ br(Assembler::EQ, done);
 950 
 951     // if _igetfield then rewrite to _fast_iaccess_0
 952     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 953     __ cmpw(r1, Bytecodes::_fast_igetfield);
 954     __ movw(bc, Bytecodes::_fast_iaccess_0);
 955     __ br(Assembler::EQ, rewrite);
 956 
 957     // if _agetfield then rewrite to _fast_aaccess_0
 958     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 959     __ cmpw(r1, Bytecodes::_fast_agetfield);
 960     __ movw(bc, Bytecodes::_fast_aaccess_0);
 961     __ br(Assembler::EQ, rewrite);
 962 
 963     // if _fgetfield then rewrite to _fast_faccess_0
 964     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 965     __ cmpw(r1, Bytecodes::_fast_fgetfield);
 966     __ movw(bc, Bytecodes::_fast_faccess_0);
 967     __ br(Assembler::EQ, rewrite);
 968 
 969     // else rewrite to _fast_aload0
 970     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 971     __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0);
 972 
 973     // rewrite
 974     // bc: new bytecode
 975     __ bind(rewrite);
 976     patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
 977 
 978     __ bind(done);
 979   }
 980 
 981   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 982   aload(0);
 983 }
 984 
 985 void TemplateTable::istore()
 986 {
 987   transition(itos, vtos);
 988   locals_index(r1);
 989   // FIXME: We're being very pernickerty here storing a jint in a
 990   // local with strw, which costs an extra instruction over what we'd
 991   // be able to do with a simple str.  We should just store the whole
 992   // word.
 993   __ lea(rscratch1, iaddress(r1));
 994   __ strw(r0, Address(rscratch1));
 995 }
 996 
 997 void TemplateTable::lstore()
 998 {
 999   transition(ltos, vtos);
1000   locals_index(r1);
1001   __ str(r0, laddress(r1, rscratch1, _masm));
1002 }
1003 
1004 void TemplateTable::fstore() {
1005   transition(ftos, vtos);
1006   locals_index(r1);
1007   __ lea(rscratch1, iaddress(r1));
1008   __ strs(v0, Address(rscratch1));
1009 }
1010 
1011 void TemplateTable::dstore() {
1012   transition(dtos, vtos);
1013   locals_index(r1);
1014   __ strd(v0, daddress(r1, rscratch1, _masm));
1015 }
1016 
1017 void TemplateTable::astore()
1018 {
1019   transition(vtos, vtos);
1020   __ pop_ptr(r0);
1021   locals_index(r1);
1022   __ str(r0, aaddress(r1));
1023 }
1024 
1025 void TemplateTable::wide_istore() {
1026   transition(vtos, vtos);
1027   __ pop_i();
1028   locals_index_wide(r1);
1029   __ lea(rscratch1, iaddress(r1));
1030   __ strw(r0, Address(rscratch1));
1031 }
1032 
1033 void TemplateTable::wide_lstore() {
1034   transition(vtos, vtos);
1035   __ pop_l();
1036   locals_index_wide(r1);
1037   __ str(r0, laddress(r1, rscratch1, _masm));
1038 }
1039 
1040 void TemplateTable::wide_fstore() {
1041   transition(vtos, vtos);
1042   __ pop_f();
1043   locals_index_wide(r1);
1044   __ lea(rscratch1, faddress(r1));
1045   __ strs(v0, rscratch1);
1046 }
1047 
1048 void TemplateTable::wide_dstore() {
1049   transition(vtos, vtos);
1050   __ pop_d();
1051   locals_index_wide(r1);
1052   __ strd(v0, daddress(r1, rscratch1, _masm));
1053 }
1054 
1055 void TemplateTable::wide_astore() {
1056   transition(vtos, vtos);
1057   __ pop_ptr(r0);
1058   locals_index_wide(r1);
1059   __ str(r0, aaddress(r1));
1060 }
1061 
1062 void TemplateTable::iastore() {
1063   transition(itos, vtos);
1064   __ pop_i(r1);
1065   __ pop_ptr(r3);
1066   // r0: value
1067   // r1: index
1068   // r3: array
1069   index_check(r3, r1); // prefer index in r1
1070   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
1071   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg, noreg);
1072 }
1073 
1074 void TemplateTable::lastore() {
1075   transition(ltos, vtos);
1076   __ pop_i(r1);
1077   __ pop_ptr(r3);
1078   // r0: value
1079   // r1: index
1080   // r3: array
1081   index_check(r3, r1); // prefer index in r1
1082   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
1083   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg, noreg);
1084 }
1085 
1086 void TemplateTable::fastore() {
1087   transition(ftos, vtos);
1088   __ pop_i(r1);
1089   __ pop_ptr(r3);
1090   // v0: value
1091   // r1:  index
1092   // r3:  array
1093   index_check(r3, r1); // prefer index in r1
1094   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
1095   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg, noreg);
1096 }
1097 
1098 void TemplateTable::dastore() {
1099   transition(dtos, vtos);
1100   __ pop_i(r1);
1101   __ pop_ptr(r3);
1102   // v0: value
1103   // r1:  index
1104   // r3:  array
1105   index_check(r3, r1); // prefer index in r1
1106   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1107   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg, noreg);
1108 }
1109 
1110 void TemplateTable::aastore() {
1111   Label is_null, ok_is_subtype, done;
1112   transition(vtos, vtos);
1113   // stack: ..., array, index, value
1114   __ ldr(r0, at_tos());    // value
1115   __ ldr(r2, at_tos_p1()); // index
1116   __ ldr(r3, at_tos_p2()); // array
1117 
1118   Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1119 
1120   index_check(r3, r2);     // kills r1
1121   __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1122 
1123   // do array store check - check for null value first
1124   __ cbz(r0, is_null);
1125 
1126   // Move subklass into r1
1127   __ load_klass(r1, r0);
1128   // Move superklass into r0
1129   __ load_klass(r0, r3);
1130   __ ldr(r0, Address(r0,
1131                      ObjArrayKlass::element_klass_offset()));
1132   // Compress array + index*oopSize + 12 into a single register.  Frees r2.
1133 
1134   // Generate subtype check.  Blows r2, r5
1135   // Superklass in r0.  Subklass in r1.
1136   __ gen_subtype_check(r1, ok_is_subtype);
1137 
1138   // Come here on failure
1139   // object is at TOS
1140   __ b(Interpreter::_throw_ArrayStoreException_entry);
1141 
1142   // Come here on success
1143   __ bind(ok_is_subtype);
1144 
1145   // Get the value we will store
1146   __ ldr(r0, at_tos());
1147   // Now store using the appropriate barrier
1148   do_oop_store(_masm, element_address, r0, IS_ARRAY);
1149   __ b(done);
1150 
1151   // Have a null in r0, r3=array, r2=index.  Store null at ary[idx]
1152   __ bind(is_null);
1153   __ profile_null_seen(r2);
1154 
1155   // Store a null
1156   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1157 
1158   // Pop stack arguments
1159   __ bind(done);
1160   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1161 }
1162 
1163 void TemplateTable::bastore()
1164 {
1165   transition(itos, vtos);
1166   __ pop_i(r1);
1167   __ pop_ptr(r3);
1168   // r0: value
1169   // r1: index
1170   // r3: array
1171   index_check(r3, r1); // prefer index in r1
1172 
1173   // Need to check whether array is boolean or byte
1174   // since both types share the bastore bytecode.
1175   __ load_klass(r2, r3);
1176   __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
1177   int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
1178   Label L_skip;
1179   __ tbz(r2, diffbit_index, L_skip);
1180   __ andw(r0, r0, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1181   __ bind(L_skip);
1182 
1183   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
1184   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg, noreg);
1185 }
1186 
1187 void TemplateTable::castore()
1188 {
1189   transition(itos, vtos);
1190   __ pop_i(r1);
1191   __ pop_ptr(r3);
1192   // r0: value
1193   // r1: index
1194   // r3: array
1195   index_check(r3, r1); // prefer index in r1
1196   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
1197   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg, noreg);
1198 }
1199 
1200 void TemplateTable::sastore()
1201 {
1202   castore();
1203 }
1204 
1205 void TemplateTable::istore(int n)
1206 {
1207   transition(itos, vtos);
1208   __ str(r0, iaddress(n));
1209 }
1210 
1211 void TemplateTable::lstore(int n)
1212 {
1213   transition(ltos, vtos);
1214   __ str(r0, laddress(n));
1215 }
1216 
1217 void TemplateTable::fstore(int n)
1218 {
1219   transition(ftos, vtos);
1220   __ strs(v0, faddress(n));
1221 }
1222 
1223 void TemplateTable::dstore(int n)
1224 {
1225   transition(dtos, vtos);
1226   __ strd(v0, daddress(n));
1227 }
1228 
1229 void TemplateTable::astore(int n)
1230 {
1231   transition(vtos, vtos);
1232   __ pop_ptr(r0);
1233   __ str(r0, iaddress(n));
1234 }
1235 
1236 void TemplateTable::pop()
1237 {
1238   transition(vtos, vtos);
1239   __ add(esp, esp, Interpreter::stackElementSize);
1240 }
1241 
1242 void TemplateTable::pop2()
1243 {
1244   transition(vtos, vtos);
1245   __ add(esp, esp, 2 * Interpreter::stackElementSize);
1246 }
1247 
1248 void TemplateTable::dup()
1249 {
1250   transition(vtos, vtos);
1251   __ ldr(r0, Address(esp, 0));
1252   __ push(r0);
1253   // stack: ..., a, a
1254 }
1255 
1256 void TemplateTable::dup_x1()
1257 {
1258   transition(vtos, vtos);
1259   // stack: ..., a, b
1260   __ ldr(r0, at_tos());  // load b
1261   __ ldr(r2, at_tos_p1());  // load a
1262   __ str(r0, at_tos_p1());  // store b
1263   __ str(r2, at_tos());  // store a
1264   __ push(r0);                  // push b
1265   // stack: ..., b, a, b
1266 }
1267 
1268 void TemplateTable::dup_x2()
1269 {
1270   transition(vtos, vtos);
1271   // stack: ..., a, b, c
1272   __ ldr(r0, at_tos());  // load c
1273   __ ldr(r2, at_tos_p2());  // load a
1274   __ str(r0, at_tos_p2());  // store c in a
1275   __ push(r0);      // push c
1276   // stack: ..., c, b, c, c
1277   __ ldr(r0, at_tos_p2());  // load b
1278   __ str(r2, at_tos_p2());  // store a in b
1279   // stack: ..., c, a, c, c
1280   __ str(r0, at_tos_p1());  // store b in c
1281   // stack: ..., c, a, b, c
1282 }
1283 
1284 void TemplateTable::dup2()
1285 {
1286   transition(vtos, vtos);
1287   // stack: ..., a, b
1288   __ ldr(r0, at_tos_p1());  // load a
1289   __ push(r0);                  // push a
1290   __ ldr(r0, at_tos_p1());  // load b
1291   __ push(r0);                  // push b
1292   // stack: ..., a, b, a, b
1293 }
1294 
1295 void TemplateTable::dup2_x1()
1296 {
1297   transition(vtos, vtos);
1298   // stack: ..., a, b, c
1299   __ ldr(r2, at_tos());  // load c
1300   __ ldr(r0, at_tos_p1());  // load b
1301   __ push(r0);                  // push b
1302   __ push(r2);                  // push c
1303   // stack: ..., a, b, c, b, c
1304   __ str(r2, at_tos_p3());  // store c in b
1305   // stack: ..., a, c, c, b, c
1306   __ ldr(r2, at_tos_p4());  // load a
1307   __ str(r2, at_tos_p2());  // store a in 2nd c
1308   // stack: ..., a, c, a, b, c
1309   __ str(r0, at_tos_p4());  // store b in a
1310   // stack: ..., b, c, a, b, c
1311 }
1312 
1313 void TemplateTable::dup2_x2()
1314 {
1315   transition(vtos, vtos);
1316   // stack: ..., a, b, c, d
1317   __ ldr(r2, at_tos());  // load d
1318   __ ldr(r0, at_tos_p1());  // load c
1319   __ push(r0)            ;      // push c
1320   __ push(r2);                  // push d
1321   // stack: ..., a, b, c, d, c, d
1322   __ ldr(r0, at_tos_p4());  // load b
1323   __ str(r0, at_tos_p2());  // store b in d
1324   __ str(r2, at_tos_p4());  // store d in b
1325   // stack: ..., a, d, c, b, c, d
1326   __ ldr(r2, at_tos_p5());  // load a
1327   __ ldr(r0, at_tos_p3());  // load c
1328   __ str(r2, at_tos_p3());  // store a in c
1329   __ str(r0, at_tos_p5());  // store c in a
1330   // stack: ..., c, d, a, b, c, d
1331 }
1332 
1333 void TemplateTable::swap()
1334 {
1335   transition(vtos, vtos);
1336   // stack: ..., a, b
1337   __ ldr(r2, at_tos_p1());  // load a
1338   __ ldr(r0, at_tos());  // load b
1339   __ str(r2, at_tos());  // store a in b
1340   __ str(r0, at_tos_p1());  // store b in a
1341   // stack: ..., b, a
1342 }
1343 
1344 void TemplateTable::iop2(Operation op)
1345 {
1346   transition(itos, itos);
1347   // r0 <== r1 op r0
1348   __ pop_i(r1);
1349   switch (op) {
1350   case add  : __ addw(r0, r1, r0); break;
1351   case sub  : __ subw(r0, r1, r0); break;
1352   case mul  : __ mulw(r0, r1, r0); break;
1353   case _and : __ andw(r0, r1, r0); break;
1354   case _or  : __ orrw(r0, r1, r0); break;
1355   case _xor : __ eorw(r0, r1, r0); break;
1356   case shl  : __ lslvw(r0, r1, r0); break;
1357   case shr  : __ asrvw(r0, r1, r0); break;
1358   case ushr : __ lsrvw(r0, r1, r0);break;
1359   default   : ShouldNotReachHere();
1360   }
1361 }
1362 
1363 void TemplateTable::lop2(Operation op)
1364 {
1365   transition(ltos, ltos);
1366   // r0 <== r1 op r0
1367   __ pop_l(r1);
1368   switch (op) {
1369   case add  : __ add(r0, r1, r0); break;
1370   case sub  : __ sub(r0, r1, r0); break;
1371   case mul  : __ mul(r0, r1, r0); break;
1372   case _and : __ andr(r0, r1, r0); break;
1373   case _or  : __ orr(r0, r1, r0); break;
1374   case _xor : __ eor(r0, r1, r0); break;
1375   default   : ShouldNotReachHere();
1376   }
1377 }
1378 
1379 void TemplateTable::idiv()
1380 {
1381   transition(itos, itos);
1382   // explicitly check for div0
1383   Label no_div0;
1384   __ cbnzw(r0, no_div0);
1385   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1386   __ br(rscratch1);
1387   __ bind(no_div0);
1388   __ pop_i(r1);
1389   // r0 <== r1 idiv r0
1390   __ corrected_idivl(r0, r1, r0, /* want_remainder */ false);
1391 }
1392 
1393 void TemplateTable::irem()
1394 {
1395   transition(itos, itos);
1396   // explicitly check for div0
1397   Label no_div0;
1398   __ cbnzw(r0, no_div0);
1399   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1400   __ br(rscratch1);
1401   __ bind(no_div0);
1402   __ pop_i(r1);
1403   // r0 <== r1 irem r0
1404   __ corrected_idivl(r0, r1, r0, /* want_remainder */ true);
1405 }
1406 
1407 void TemplateTable::lmul()
1408 {
1409   transition(ltos, ltos);
1410   __ pop_l(r1);
1411   __ mul(r0, r0, r1);
1412 }
1413 
1414 void TemplateTable::ldiv()
1415 {
1416   transition(ltos, ltos);
1417   // explicitly check for div0
1418   Label no_div0;
1419   __ cbnz(r0, no_div0);
1420   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1421   __ br(rscratch1);
1422   __ bind(no_div0);
1423   __ pop_l(r1);
1424   // r0 <== r1 ldiv r0
1425   __ corrected_idivq(r0, r1, r0, /* want_remainder */ false);
1426 }
1427 
1428 void TemplateTable::lrem()
1429 {
1430   transition(ltos, ltos);
1431   // explicitly check for div0
1432   Label no_div0;
1433   __ cbnz(r0, no_div0);
1434   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1435   __ br(rscratch1);
1436   __ bind(no_div0);
1437   __ pop_l(r1);
1438   // r0 <== r1 lrem r0
1439   __ corrected_idivq(r0, r1, r0, /* want_remainder */ true);
1440 }
1441 
1442 void TemplateTable::lshl()
1443 {
1444   transition(itos, ltos);
1445   // shift count is in r0
1446   __ pop_l(r1);
1447   __ lslv(r0, r1, r0);
1448 }
1449 
1450 void TemplateTable::lshr()
1451 {
1452   transition(itos, ltos);
1453   // shift count is in r0
1454   __ pop_l(r1);
1455   __ asrv(r0, r1, r0);
1456 }
1457 
1458 void TemplateTable::lushr()
1459 {
1460   transition(itos, ltos);
1461   // shift count is in r0
1462   __ pop_l(r1);
1463   __ lsrv(r0, r1, r0);
1464 }
1465 
1466 void TemplateTable::fop2(Operation op)
1467 {
1468   transition(ftos, ftos);
1469   switch (op) {
1470   case add:
1471     // n.b. use ldrd because this is a 64 bit slot
1472     __ pop_f(v1);
1473     __ fadds(v0, v1, v0);
1474     break;
1475   case sub:
1476     __ pop_f(v1);
1477     __ fsubs(v0, v1, v0);
1478     break;
1479   case mul:
1480     __ pop_f(v1);
1481     __ fmuls(v0, v1, v0);
1482     break;
1483   case div:
1484     __ pop_f(v1);
1485     __ fdivs(v0, v1, v0);
1486     break;
1487   case rem:
1488     __ fmovs(v1, v0);
1489     __ pop_f(v0);
1490     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1491     break;
1492   default:
1493     ShouldNotReachHere();
1494     break;
1495   }
1496 }
1497 
1498 void TemplateTable::dop2(Operation op)
1499 {
1500   transition(dtos, dtos);
1501   switch (op) {
1502   case add:
1503     // n.b. use ldrd because this is a 64 bit slot
1504     __ pop_d(v1);
1505     __ faddd(v0, v1, v0);
1506     break;
1507   case sub:
1508     __ pop_d(v1);
1509     __ fsubd(v0, v1, v0);
1510     break;
1511   case mul:
1512     __ pop_d(v1);
1513     __ fmuld(v0, v1, v0);
1514     break;
1515   case div:
1516     __ pop_d(v1);
1517     __ fdivd(v0, v1, v0);
1518     break;
1519   case rem:
1520     __ fmovd(v1, v0);
1521     __ pop_d(v0);
1522     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1523     break;
1524   default:
1525     ShouldNotReachHere();
1526     break;
1527   }
1528 }
1529 
1530 void TemplateTable::ineg()
1531 {
1532   transition(itos, itos);
1533   __ negw(r0, r0);
1534 
1535 }
1536 
1537 void TemplateTable::lneg()
1538 {
1539   transition(ltos, ltos);
1540   __ neg(r0, r0);
1541 }
1542 
1543 void TemplateTable::fneg()
1544 {
1545   transition(ftos, ftos);
1546   __ fnegs(v0, v0);
1547 }
1548 
1549 void TemplateTable::dneg()
1550 {
1551   transition(dtos, dtos);
1552   __ fnegd(v0, v0);
1553 }
1554 
1555 void TemplateTable::iinc()
1556 {
1557   transition(vtos, vtos);
1558   __ load_signed_byte(r1, at_bcp(2)); // get constant
1559   locals_index(r2);
1560   __ ldr(r0, iaddress(r2));
1561   __ addw(r0, r0, r1);
1562   __ str(r0, iaddress(r2));
1563 }
1564 
1565 void TemplateTable::wide_iinc()
1566 {
1567   transition(vtos, vtos);
1568   // __ mov(r1, zr);
1569   __ ldrw(r1, at_bcp(2)); // get constant and index
1570   __ rev16(r1, r1);
1571   __ ubfx(r2, r1, 0, 16);
1572   __ neg(r2, r2);
1573   __ sbfx(r1, r1, 16, 16);
1574   __ ldr(r0, iaddress(r2));
1575   __ addw(r0, r0, r1);
1576   __ str(r0, iaddress(r2));
1577 }
1578 
1579 void TemplateTable::convert()
1580 {
1581   // Checking
1582 #ifdef ASSERT
1583   {
1584     TosState tos_in  = ilgl;
1585     TosState tos_out = ilgl;
1586     switch (bytecode()) {
1587     case Bytecodes::_i2l: // fall through
1588     case Bytecodes::_i2f: // fall through
1589     case Bytecodes::_i2d: // fall through
1590     case Bytecodes::_i2b: // fall through
1591     case Bytecodes::_i2c: // fall through
1592     case Bytecodes::_i2s: tos_in = itos; break;
1593     case Bytecodes::_l2i: // fall through
1594     case Bytecodes::_l2f: // fall through
1595     case Bytecodes::_l2d: tos_in = ltos; break;
1596     case Bytecodes::_f2i: // fall through
1597     case Bytecodes::_f2l: // fall through
1598     case Bytecodes::_f2d: tos_in = ftos; break;
1599     case Bytecodes::_d2i: // fall through
1600     case Bytecodes::_d2l: // fall through
1601     case Bytecodes::_d2f: tos_in = dtos; break;
1602     default             : ShouldNotReachHere();
1603     }
1604     switch (bytecode()) {
1605     case Bytecodes::_l2i: // fall through
1606     case Bytecodes::_f2i: // fall through
1607     case Bytecodes::_d2i: // fall through
1608     case Bytecodes::_i2b: // fall through
1609     case Bytecodes::_i2c: // fall through
1610     case Bytecodes::_i2s: tos_out = itos; break;
1611     case Bytecodes::_i2l: // fall through
1612     case Bytecodes::_f2l: // fall through
1613     case Bytecodes::_d2l: tos_out = ltos; break;
1614     case Bytecodes::_i2f: // fall through
1615     case Bytecodes::_l2f: // fall through
1616     case Bytecodes::_d2f: tos_out = ftos; break;
1617     case Bytecodes::_i2d: // fall through
1618     case Bytecodes::_l2d: // fall through
1619     case Bytecodes::_f2d: tos_out = dtos; break;
1620     default             : ShouldNotReachHere();
1621     }
1622     transition(tos_in, tos_out);
1623   }
1624 #endif // ASSERT
1625   // static const int64_t is_nan = 0x8000000000000000L;
1626 
1627   // Conversion
1628   switch (bytecode()) {
1629   case Bytecodes::_i2l:
1630     __ sxtw(r0, r0);
1631     break;
1632   case Bytecodes::_i2f:
1633     __ scvtfws(v0, r0);
1634     break;
1635   case Bytecodes::_i2d:
1636     __ scvtfwd(v0, r0);
1637     break;
1638   case Bytecodes::_i2b:
1639     __ sxtbw(r0, r0);
1640     break;
1641   case Bytecodes::_i2c:
1642     __ uxthw(r0, r0);
1643     break;
1644   case Bytecodes::_i2s:
1645     __ sxthw(r0, r0);
1646     break;
1647   case Bytecodes::_l2i:
1648     __ uxtw(r0, r0);
1649     break;
1650   case Bytecodes::_l2f:
1651     __ scvtfs(v0, r0);
1652     break;
1653   case Bytecodes::_l2d:
1654     __ scvtfd(v0, r0);
1655     break;
1656   case Bytecodes::_f2i:
1657   {
1658     Label L_Okay;
1659     __ clear_fpsr();
1660     __ fcvtzsw(r0, v0);
1661     __ get_fpsr(r1);
1662     __ cbzw(r1, L_Okay);
1663     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i));
1664     __ bind(L_Okay);
1665   }
1666     break;
1667   case Bytecodes::_f2l:
1668   {
1669     Label L_Okay;
1670     __ clear_fpsr();
1671     __ fcvtzs(r0, v0);
1672     __ get_fpsr(r1);
1673     __ cbzw(r1, L_Okay);
1674     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1675     __ bind(L_Okay);
1676   }
1677     break;
1678   case Bytecodes::_f2d:
1679     __ fcvts(v0, v0);
1680     break;
1681   case Bytecodes::_d2i:
1682   {
1683     Label L_Okay;
1684     __ clear_fpsr();
1685     __ fcvtzdw(r0, v0);
1686     __ get_fpsr(r1);
1687     __ cbzw(r1, L_Okay);
1688     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
1689     __ bind(L_Okay);
1690   }
1691     break;
1692   case Bytecodes::_d2l:
1693   {
1694     Label L_Okay;
1695     __ clear_fpsr();
1696     __ fcvtzd(r0, v0);
1697     __ get_fpsr(r1);
1698     __ cbzw(r1, L_Okay);
1699     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1700     __ bind(L_Okay);
1701   }
1702     break;
1703   case Bytecodes::_d2f:
1704     __ fcvtd(v0, v0);
1705     break;
1706   default:
1707     ShouldNotReachHere();
1708   }
1709 }
1710 
1711 void TemplateTable::lcmp()
1712 {
1713   transition(ltos, itos);
1714   Label done;
1715   __ pop_l(r1);
1716   __ cmp(r1, r0);
1717   __ mov(r0, (uint64_t)-1L);
1718   __ br(Assembler::LT, done);
1719   // __ mov(r0, 1UL);
1720   // __ csel(r0, r0, zr, Assembler::NE);
1721   // and here is a faster way
1722   __ csinc(r0, zr, zr, Assembler::EQ);
1723   __ bind(done);
1724 }
1725 
1726 void TemplateTable::float_cmp(bool is_float, int unordered_result)
1727 {
1728   Label done;
1729   if (is_float) {
1730     // XXX get rid of pop here, use ... reg, mem32
1731     __ pop_f(v1);
1732     __ fcmps(v1, v0);
1733   } else {
1734     // XXX get rid of pop here, use ... reg, mem64
1735     __ pop_d(v1);
1736     __ fcmpd(v1, v0);
1737   }
1738   if (unordered_result < 0) {
1739     // we want -1 for unordered or less than, 0 for equal and 1 for
1740     // greater than.
1741     __ mov(r0, (uint64_t)-1L);
1742     // for FP LT tests less than or unordered
1743     __ br(Assembler::LT, done);
1744     // install 0 for EQ otherwise 1
1745     __ csinc(r0, zr, zr, Assembler::EQ);
1746   } else {
1747     // we want -1 for less than, 0 for equal and 1 for unordered or
1748     // greater than.
1749     __ mov(r0, 1L);
1750     // for FP HI tests greater than or unordered
1751     __ br(Assembler::HI, done);
1752     // install 0 for EQ otherwise ~0
1753     __ csinv(r0, zr, zr, Assembler::EQ);
1754 
1755   }
1756   __ bind(done);
1757 }
1758 
1759 void TemplateTable::branch(bool is_jsr, bool is_wide)
1760 {
1761   __ profile_taken_branch(r0, r1);
1762   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1763                              InvocationCounter::counter_offset();
1764   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1765                               InvocationCounter::counter_offset();
1766 
1767   // load branch displacement
1768   if (!is_wide) {
1769     __ ldrh(r2, at_bcp(1));
1770     __ rev16(r2, r2);
1771     // sign extend the 16 bit value in r2
1772     __ sbfm(r2, r2, 0, 15);
1773   } else {
1774     __ ldrw(r2, at_bcp(1));
1775     __ revw(r2, r2);
1776     // sign extend the 32 bit value in r2
1777     __ sbfm(r2, r2, 0, 31);
1778   }
1779 
1780   // Handle all the JSR stuff here, then exit.
1781   // It's much shorter and cleaner than intermingling with the non-JSR
1782   // normal-branch stuff occurring below.
1783 
1784   if (is_jsr) {
1785     // Pre-load the next target bytecode into rscratch1
1786     __ load_unsigned_byte(rscratch1, Address(rbcp, r2));
1787     // compute return address as bci
1788     __ ldr(rscratch2, Address(rmethod, Method::const_offset()));
1789     __ add(rscratch2, rscratch2,
1790            in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
1791     __ sub(r1, rbcp, rscratch2);
1792     __ push_i(r1);
1793     // Adjust the bcp by the 16-bit displacement in r2
1794     __ add(rbcp, rbcp, r2);
1795     __ dispatch_only(vtos, /*generate_poll*/true);
1796     return;
1797   }
1798 
1799   // Normal (non-jsr) branch handling
1800 
1801   // Adjust the bcp by the displacement in r2
1802   __ add(rbcp, rbcp, r2);
1803 
1804   assert(UseLoopCounter || !UseOnStackReplacement,
1805          "on-stack-replacement requires loop counters");
1806   Label backedge_counter_overflow;
1807   Label dispatch;
1808   if (UseLoopCounter) {
1809     // increment backedge counter for backward branches
1810     // r0: MDO
1811     // w1: MDO bumped taken-count
1812     // r2: target offset
1813     __ cmp(r2, zr);
1814     __ br(Assembler::GT, dispatch); // count only if backward branch
1815 
1816     // ECN: FIXME: This code smells
1817     // check if MethodCounters exists
1818     Label has_counters;
1819     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1820     __ cbnz(rscratch1, has_counters);
1821     __ push(r0);
1822     __ push(r1);
1823     __ push(r2);
1824     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
1825             InterpreterRuntime::build_method_counters), rmethod);
1826     __ pop(r2);
1827     __ pop(r1);
1828     __ pop(r0);
1829     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1830     __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
1831     __ bind(has_counters);
1832 
1833     Label no_mdo;
1834     int increment = InvocationCounter::count_increment;
1835     if (ProfileInterpreter) {
1836       // Are we profiling?
1837       __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1838       __ cbz(r1, no_mdo);
1839       // Increment the MDO backedge counter
1840       const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
1841                                          in_bytes(InvocationCounter::counter_offset()));
1842       const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
1843       __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1844                                  r0, rscratch1, false, Assembler::EQ,
1845                                  UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1846       __ b(dispatch);
1847     }
1848     __ bind(no_mdo);
1849     // Increment backedge counter in MethodCounters*
1850     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1851     const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
1852     __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
1853                                r0, rscratch2, false, Assembler::EQ,
1854                                UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1855     __ bind(dispatch);
1856   }
1857 
1858   // Pre-load the next target bytecode into rscratch1
1859   __ load_unsigned_byte(rscratch1, Address(rbcp, 0));
1860 
1861   // continue with the bytecode @ target
1862   // rscratch1: target bytecode
1863   // rbcp: target bcp
1864   __ dispatch_only(vtos, /*generate_poll*/true);
1865 
1866   if (UseLoopCounter && UseOnStackReplacement) {
1867     // invocation counter overflow
1868     __ bind(backedge_counter_overflow);
1869     __ neg(r2, r2);
1870     __ add(r2, r2, rbcp);     // branch bcp
1871     // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1872     __ call_VM(noreg,
1873                CAST_FROM_FN_PTR(address,
1874                                 InterpreterRuntime::frequency_counter_overflow),
1875                r2);
1876     __ load_unsigned_byte(r1, Address(rbcp, 0));  // restore target bytecode
1877 
1878     // r0: osr nmethod (osr ok) or null (osr not possible)
1879     // w1: target bytecode
1880     // r2: scratch
1881     __ cbz(r0, dispatch);     // test result -- no osr if null
1882     // nmethod may have been invalidated (VM may block upon call_VM return)
1883     __ ldrb(r2, Address(r0, nmethod::state_offset()));
1884     if (nmethod::in_use != 0)
1885       __ sub(r2, r2, nmethod::in_use);
1886     __ cbnz(r2, dispatch);
1887 
1888     // We have the address of an on stack replacement routine in r0
1889     // We need to prepare to execute the OSR method. First we must
1890     // migrate the locals and monitors off of the stack.
1891 
1892     __ mov(r19, r0);                             // save the nmethod
1893 
1894     call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1895 
1896     // r0 is OSR buffer, move it to expected parameter location
1897     __ mov(j_rarg0, r0);
1898 
1899     // remove activation
1900     // get sender esp
1901     __ ldr(esp,
1902         Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
1903     // remove frame anchor
1904     __ leave();
1905     // Ensure compiled code always sees stack at proper alignment
1906     __ andr(sp, esp, -16);
1907 
1908     // and begin the OSR nmethod
1909     __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
1910     __ br(rscratch1);
1911   }
1912 }
1913 
1914 
1915 void TemplateTable::if_0cmp(Condition cc)
1916 {
1917   transition(itos, vtos);
1918   // assume branch is more often taken than not (loops use backward branches)
1919   Label not_taken;
1920   if (cc == equal)
1921     __ cbnzw(r0, not_taken);
1922   else if (cc == not_equal)
1923     __ cbzw(r0, not_taken);
1924   else {
1925     __ andsw(zr, r0, r0);
1926     __ br(j_not(cc), not_taken);
1927   }
1928 
1929   branch(false, false);
1930   __ bind(not_taken);
1931   __ profile_not_taken_branch(r0);
1932 }
1933 
1934 void TemplateTable::if_icmp(Condition cc)
1935 {
1936   transition(itos, vtos);
1937   // assume branch is more often taken than not (loops use backward branches)
1938   Label not_taken;
1939   __ pop_i(r1);
1940   __ cmpw(r1, r0, Assembler::LSL);
1941   __ br(j_not(cc), not_taken);
1942   branch(false, false);
1943   __ bind(not_taken);
1944   __ profile_not_taken_branch(r0);
1945 }
1946 
1947 void TemplateTable::if_nullcmp(Condition cc)
1948 {
1949   transition(atos, vtos);
1950   // assume branch is more often taken than not (loops use backward branches)
1951   Label not_taken;
1952   if (cc == equal)
1953     __ cbnz(r0, not_taken);
1954   else
1955     __ cbz(r0, not_taken);
1956   branch(false, false);
1957   __ bind(not_taken);
1958   __ profile_not_taken_branch(r0);
1959 }
1960 
1961 void TemplateTable::if_acmp(Condition cc)
1962 {
1963   transition(atos, vtos);
1964   // assume branch is more often taken than not (loops use backward branches)
1965   Label not_taken;
1966   __ pop_ptr(r1);
1967   __ cmpoop(r1, r0);
1968   __ br(j_not(cc), not_taken);
1969   branch(false, false);
1970   __ bind(not_taken);
1971   __ profile_not_taken_branch(r0);
1972 }
1973 
1974 void TemplateTable::ret() {
1975   transition(vtos, vtos);
1976   locals_index(r1);
1977   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
1978   __ profile_ret(r1, r2);
1979   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
1980   __ lea(rbcp, Address(rbcp, r1));
1981   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
1982   __ dispatch_next(vtos, 0, /*generate_poll*/true);
1983 }
1984 
1985 void TemplateTable::wide_ret() {
1986   transition(vtos, vtos);
1987   locals_index_wide(r1);
1988   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
1989   __ profile_ret(r1, r2);
1990   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
1991   __ lea(rbcp, Address(rbcp, r1));
1992   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
1993   __ dispatch_next(vtos, 0, /*generate_poll*/true);
1994 }
1995 
1996 
1997 void TemplateTable::tableswitch() {
1998   Label default_case, continue_execution;
1999   transition(itos, vtos);
2000   // align rbcp
2001   __ lea(r1, at_bcp(BytesPerInt));
2002   __ andr(r1, r1, -BytesPerInt);
2003   // load lo & hi
2004   __ ldrw(r2, Address(r1, BytesPerInt));
2005   __ ldrw(r3, Address(r1, 2 * BytesPerInt));
2006   __ rev32(r2, r2);
2007   __ rev32(r3, r3);
2008   // check against lo & hi
2009   __ cmpw(r0, r2);
2010   __ br(Assembler::LT, default_case);
2011   __ cmpw(r0, r3);
2012   __ br(Assembler::GT, default_case);
2013   // lookup dispatch offset
2014   __ subw(r0, r0, r2);
2015   __ lea(r3, Address(r1, r0, Address::uxtw(2)));
2016   __ ldrw(r3, Address(r3, 3 * BytesPerInt));
2017   __ profile_switch_case(r0, r1, r2);
2018   // continue execution
2019   __ bind(continue_execution);
2020   __ rev32(r3, r3);
2021   __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
2022   __ add(rbcp, rbcp, r3, ext::sxtw);
2023   __ dispatch_only(vtos, /*generate_poll*/true);
2024   // handle default
2025   __ bind(default_case);
2026   __ profile_switch_default(r0);
2027   __ ldrw(r3, Address(r1, 0));
2028   __ b(continue_execution);
2029 }
2030 
2031 void TemplateTable::lookupswitch() {
2032   transition(itos, itos);
2033   __ stop("lookupswitch bytecode should have been rewritten");
2034 }
2035 
2036 void TemplateTable::fast_linearswitch() {
2037   transition(itos, vtos);
2038   Label loop_entry, loop, found, continue_execution;
2039   // bswap r0 so we can avoid bswapping the table entries
2040   __ rev32(r0, r0);
2041   // align rbcp
2042   __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2043                                     // this instruction (change offsets
2044                                     // below)
2045   __ andr(r19, r19, -BytesPerInt);
2046   // set counter
2047   __ ldrw(r1, Address(r19, BytesPerInt));
2048   __ rev32(r1, r1);
2049   __ b(loop_entry);
2050   // table search
2051   __ bind(loop);
2052   __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2053   __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt));
2054   __ cmpw(r0, rscratch1);
2055   __ br(Assembler::EQ, found);
2056   __ bind(loop_entry);
2057   __ subs(r1, r1, 1);
2058   __ br(Assembler::PL, loop);
2059   // default case
2060   __ profile_switch_default(r0);
2061   __ ldrw(r3, Address(r19, 0));
2062   __ b(continue_execution);
2063   // entry found -> get offset
2064   __ bind(found);
2065   __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2066   __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt));
2067   __ profile_switch_case(r1, r0, r19);
2068   // continue execution
2069   __ bind(continue_execution);
2070   __ rev32(r3, r3);
2071   __ add(rbcp, rbcp, r3, ext::sxtw);
2072   __ ldrb(rscratch1, Address(rbcp, 0));
2073   __ dispatch_only(vtos, /*generate_poll*/true);
2074 }
2075 
2076 void TemplateTable::fast_binaryswitch() {
2077   transition(itos, vtos);
2078   // Implementation using the following core algorithm:
2079   //
2080   // int binary_search(int key, LookupswitchPair* array, int n) {
2081   //   // Binary search according to "Methodik des Programmierens" by
2082   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2083   //   int i = 0;
2084   //   int j = n;
2085   //   while (i+1 < j) {
2086   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2087   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2088   //     // where a stands for the array and assuming that the (inexisting)
2089   //     // element a[n] is infinitely big.
2090   //     int h = (i + j) >> 1;
2091   //     // i < h < j
2092   //     if (key < array[h].fast_match()) {
2093   //       j = h;
2094   //     } else {
2095   //       i = h;
2096   //     }
2097   //   }
2098   //   // R: a[i] <= key < a[i+1] or Q
2099   //   // (i.e., if key is within array, i is the correct index)
2100   //   return i;
2101   // }
2102 
2103   // Register allocation
2104   const Register key   = r0; // already set (tosca)
2105   const Register array = r1;
2106   const Register i     = r2;
2107   const Register j     = r3;
2108   const Register h     = rscratch1;
2109   const Register temp  = rscratch2;
2110 
2111   // Find array start
2112   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2113                                           // get rid of this
2114                                           // instruction (change
2115                                           // offsets below)
2116   __ andr(array, array, -BytesPerInt);
2117 
2118   // Initialize i & j
2119   __ mov(i, 0);                            // i = 0;
2120   __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array);
2121 
2122   // Convert j into native byteordering
2123   __ rev32(j, j);
2124 
2125   // And start
2126   Label entry;
2127   __ b(entry);
2128 
2129   // binary search loop
2130   {
2131     Label loop;
2132     __ bind(loop);
2133     // int h = (i + j) >> 1;
2134     __ addw(h, i, j);                           // h = i + j;
2135     __ lsrw(h, h, 1);                                   // h = (i + j) >> 1;
2136     // if (key < array[h].fast_match()) {
2137     //   j = h;
2138     // } else {
2139     //   i = h;
2140     // }
2141     // Convert array[h].match to native byte-ordering before compare
2142     __ ldr(temp, Address(array, h, Address::lsl(3)));
2143     __ rev32(temp, temp);
2144     __ cmpw(key, temp);
2145     // j = h if (key <  array[h].fast_match())
2146     __ csel(j, h, j, Assembler::LT);
2147     // i = h if (key >= array[h].fast_match())
2148     __ csel(i, h, i, Assembler::GE);
2149     // while (i+1 < j)
2150     __ bind(entry);
2151     __ addw(h, i, 1);          // i+1
2152     __ cmpw(h, j);             // i+1 < j
2153     __ br(Assembler::LT, loop);
2154   }
2155 
2156   // end of binary search, result index is i (must check again!)
2157   Label default_case;
2158   // Convert array[i].match to native byte-ordering before compare
2159   __ ldr(temp, Address(array, i, Address::lsl(3)));
2160   __ rev32(temp, temp);
2161   __ cmpw(key, temp);
2162   __ br(Assembler::NE, default_case);
2163 
2164   // entry found -> j = offset
2165   __ add(j, array, i, ext::uxtx, 3);
2166   __ ldrw(j, Address(j, BytesPerInt));
2167   __ profile_switch_case(i, key, array);
2168   __ rev32(j, j);
2169   __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2170   __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2171   __ dispatch_only(vtos, /*generate_poll*/true);
2172 
2173   // default case -> j = default offset
2174   __ bind(default_case);
2175   __ profile_switch_default(i);
2176   __ ldrw(j, Address(array, -2 * BytesPerInt));
2177   __ rev32(j, j);
2178   __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2179   __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2180   __ dispatch_only(vtos, /*generate_poll*/true);
2181 }
2182 
2183 
2184 void TemplateTable::_return(TosState state)
2185 {
2186   transition(state, state);
2187   assert(_desc->calls_vm(),
2188          "inconsistent calls_vm information"); // call in remove_activation
2189 
2190   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2191     assert(state == vtos, "only valid state");
2192 
2193     __ ldr(c_rarg1, aaddress(0));
2194     __ load_klass(r3, c_rarg1);
2195     __ ldrb(r3, Address(r3, Klass::misc_flags_offset()));
2196     Label skip_register_finalizer;
2197     __ tbz(r3, exact_log2(KlassFlags::_misc_has_finalizer), skip_register_finalizer);
2198 
2199     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2200 
2201     __ bind(skip_register_finalizer);
2202   }
2203 
2204   // Issue a StoreStore barrier after all stores but before return
2205   // from any constructor for any class with a final field.  We don't
2206   // know if this is a finalizer, so we always do so.
2207   if (_desc->bytecode() == Bytecodes::_return)
2208     __ membar(MacroAssembler::StoreStore);
2209 
2210   if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2211     Label no_safepoint;
2212     __ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
2213     __ tbz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), no_safepoint);
2214     __ push(state);
2215     __ push_cont_fastpath(rthread);
2216     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
2217     __ pop_cont_fastpath(rthread);
2218     __ pop(state);
2219     __ bind(no_safepoint);
2220   }
2221 
2222   // Narrow result if state is itos but result type is smaller.
2223   // Need to narrow in the return bytecode rather than in generate_return_entry
2224   // since compiled code callers expect the result to already be narrowed.
2225   if (state == itos) {
2226     __ narrow(r0);
2227   }
2228 
2229   __ remove_activation(state);
2230   __ ret(lr);
2231 }
2232 
2233 // ----------------------------------------------------------------------------
2234 // Volatile variables demand their effects be made known to all CPU's
2235 // in order.  Store buffers on most chips allow reads & writes to
2236 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2237 // without some kind of memory barrier (i.e., it's not sufficient that
2238 // the interpreter does not reorder volatile references, the hardware
2239 // also must not reorder them).
2240 //
2241 // According to the new Java Memory Model (JMM):
2242 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2243 //     writes act as acquire & release, so:
2244 // (2) A read cannot let unrelated NON-volatile memory refs that
2245 //     happen after the read float up to before the read.  It's OK for
2246 //     non-volatile memory refs that happen before the volatile read to
2247 //     float down below it.
2248 // (3) Similar a volatile write cannot let unrelated NON-volatile
2249 //     memory refs that happen BEFORE the write float down to after the
2250 //     write.  It's OK for non-volatile memory refs that happen after the
2251 //     volatile write to float up before it.
2252 //
2253 // We only put in barriers around volatile refs (they are expensive),
2254 // not _between_ memory refs (that would require us to track the
2255 // flavor of the previous memory refs).  Requirements (2) and (3)
2256 // require some barriers before volatile stores and after volatile
2257 // loads.  These nearly cover requirement (1) but miss the
2258 // volatile-store-volatile-load case.  This final case is placed after
2259 // volatile-stores although it could just as well go before
2260 // volatile-loads.
2261 
2262 void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
2263                                             Register Rcache,
2264                                             Register index) {
2265   const Register temp = r19;
2266   assert_different_registers(Rcache, index, temp);
2267   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2268 
2269   Label resolved, clinit_barrier_slow;
2270 
2271   Bytecodes::Code code = bytecode();
2272   __ load_method_entry(Rcache, index);
2273   switch(byte_no) {
2274     case f1_byte:
2275       __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode1_offset())));
2276       break;
2277     case f2_byte:
2278       __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode2_offset())));
2279       break;
2280   }
2281   // Load-acquire the bytecode to match store-release in InterpreterRuntime
2282   __ ldarb(temp, temp);
2283   __ subs(zr, temp, (int) code);  // have we resolved this bytecode?
2284   __ br(Assembler::EQ, resolved);
2285 
2286   // resolve first time through
2287   // Class initialization barrier slow path lands here as well.
2288   __ bind(clinit_barrier_slow);
2289   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2290   __ mov(temp, (int) code);
2291   __ call_VM(noreg, entry, temp);
2292 
2293   // Update registers with resolved info
2294   __ load_method_entry(Rcache, index);
2295   // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2296   // so all clients ofthis method must be modified accordingly
2297   __ bind(resolved);
2298 
2299   // Class initialization barrier for static methods
2300   if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2301     __ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
2302     __ load_method_holder(temp, temp);
2303     __ clinit_barrier(temp, rscratch1, nullptr, &clinit_barrier_slow);
2304   }
2305 }
2306 
2307 void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
2308                                             Register Rcache,
2309                                             Register index) {
2310   const Register temp = r19;
2311   assert_different_registers(Rcache, index, temp);
2312 
2313   Label resolved;
2314 
2315   Bytecodes::Code code = bytecode();
2316   switch (code) {
2317   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2318   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2319   default: break;
2320   }
2321 
2322   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2323   __ load_field_entry(Rcache, index);
2324   if (byte_no == f1_byte) {
2325     __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::get_code_offset())));
2326   } else {
2327     __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::put_code_offset())));
2328   }
2329   // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in()
2330   __ ldarb(temp, temp);
2331   __ subs(zr, temp, (int) code);  // have we resolved this bytecode?
2332   __ br(Assembler::EQ, resolved);
2333 
2334   // resolve first time through
2335   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2336   __ mov(temp, (int) code);
2337   __ call_VM(noreg, entry, temp);
2338 
2339   // Update registers with resolved info
2340   __ load_field_entry(Rcache, index);
2341   __ bind(resolved);
2342 }
2343 
2344 void TemplateTable::load_resolved_field_entry(Register obj,
2345                                               Register cache,
2346                                               Register tos_state,
2347                                               Register offset,
2348                                               Register flags,
2349                                               bool is_static = false) {
2350   assert_different_registers(cache, tos_state, flags, offset);
2351 
2352   // Field offset
2353   __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
2354 
2355   // Flags
2356   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset())));
2357 
2358   // TOS state
2359   if (tos_state != noreg) {
2360     __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset())));
2361   }
2362 
2363   // Klass overwrite register
2364   if (is_static) {
2365     __ ldr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset()));
2366     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2367     __ ldr(obj, Address(obj, mirror_offset));
2368     __ resolve_oop_handle(obj, r5, rscratch2);
2369   }
2370 }
2371 
2372 void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
2373                                                                  Register method,
2374                                                                  Register flags) {
2375 
2376   // setup registers
2377   const Register index = flags;
2378   assert_different_registers(method, cache, flags);
2379 
2380   // determine constant pool cache field offsets
2381   resolve_cache_and_index_for_method(f1_byte, cache, index);
2382   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2383   __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2384 }
2385 
2386 void TemplateTable::load_resolved_method_entry_handle(Register cache,
2387                                                       Register method,
2388                                                       Register ref_index,
2389                                                       Register flags) {
2390   // setup registers
2391   const Register index = ref_index;
2392   assert_different_registers(method, flags);
2393   assert_different_registers(method, cache, index);
2394 
2395   // determine constant pool cache field offsets
2396   resolve_cache_and_index_for_method(f1_byte, cache, index);
2397   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2398 
2399   // maybe push appendix to arguments (just before return address)
2400   Label L_no_push;
2401   __ tbz(flags, ResolvedMethodEntry::has_appendix_shift, L_no_push);
2402   // invokehandle uses an index into the resolved references array
2403   __ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
2404   // Push the appendix as a trailing parameter.
2405   // This must be done before we get the receiver,
2406   // since the parameter_size includes it.
2407   Register appendix = method;
2408   __ load_resolved_reference_at_index(appendix, ref_index);
2409   __ push(appendix);  // push appendix (MethodType, CallSite, etc.)
2410   __ bind(L_no_push);
2411 
2412   __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2413 }
2414 
2415 void TemplateTable::load_resolved_method_entry_interface(Register cache,
2416                                                          Register klass,
2417                                                          Register method_or_table_index,
2418                                                          Register flags) {
2419   // setup registers
2420   const Register index = method_or_table_index;
2421   assert_different_registers(method_or_table_index, cache, flags);
2422 
2423   // determine constant pool cache field offsets
2424   resolve_cache_and_index_for_method(f1_byte, cache, index);
2425   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2426 
2427   // Invokeinterface can behave in different ways:
2428   // If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
2429   // behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
2430   // vtable index is placed in the register.
2431   // Otherwise, the registers will be populated with the klass and method.
2432 
2433   Label NotVirtual; Label NotVFinal; Label Done;
2434   __ tbz(flags, ResolvedMethodEntry::is_forced_virtual_shift, NotVirtual);
2435   __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
2436   __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2437   __ b(Done);
2438 
2439   __ bind(NotVFinal);
2440   __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2441   __ b(Done);
2442 
2443   __ bind(NotVirtual);
2444   __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2445   __ ldr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
2446   __ bind(Done);
2447 }
2448 
2449 void TemplateTable::load_resolved_method_entry_virtual(Register cache,
2450                                                        Register method_or_table_index,
2451                                                        Register flags) {
2452   // setup registers
2453   const Register index = flags;
2454   assert_different_registers(method_or_table_index, cache, flags);
2455 
2456   // determine constant pool cache field offsets
2457   resolve_cache_and_index_for_method(f2_byte, cache, index);
2458   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2459 
2460   // method_or_table_index can either be an itable index or a method depending on the virtual final flag
2461   Label NotVFinal; Label Done;
2462   __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
2463   __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2464   __ b(Done);
2465 
2466   __ bind(NotVFinal);
2467   __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2468   __ bind(Done);
2469 }
2470 
2471 // The rmethod register is input and overwritten to be the adapter method for the
2472 // indy call. Link Register (lr) is set to the return address for the adapter and
2473 // an appendix may be pushed to the stack. Registers r0-r3 are clobbered
2474 void TemplateTable::load_invokedynamic_entry(Register method) {
2475   // setup registers
2476   const Register appendix = r0;
2477   const Register cache = r2;
2478   const Register index = r3;
2479   assert_different_registers(method, appendix, cache, index, rcpool);
2480 
2481   __ save_bcp();
2482 
2483   Label resolved;
2484 
2485   __ load_resolved_indy_entry(cache, index);
2486   // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in()
2487   __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2488   __ ldar(method, method);
2489 
2490   // Compare the method to zero
2491   __ cbnz(method, resolved);
2492 
2493   Bytecodes::Code code = bytecode();
2494 
2495   // Call to the interpreter runtime to resolve invokedynamic
2496   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2497   __ mov(method, code); // this is essentially Bytecodes::_invokedynamic
2498   __ call_VM(noreg, entry, method);
2499   // Update registers with resolved info
2500   __ load_resolved_indy_entry(cache, index);
2501   // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in()
2502   __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2503   __ ldar(method, method);
2504 
2505 #ifdef ASSERT
2506   __ cbnz(method, resolved);
2507   __ stop("Should be resolved by now");
2508 #endif // ASSERT
2509   __ bind(resolved);
2510 
2511   Label L_no_push;
2512   // Check if there is an appendix
2513   __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset())));
2514   __ tbz(index, ResolvedIndyEntry::has_appendix_shift, L_no_push);
2515 
2516   // Get appendix
2517   __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset())));
2518   // Push the appendix as a trailing parameter
2519   // since the parameter_size includes it.
2520   __ push(method);
2521   __ mov(method, index);
2522   __ load_resolved_reference_at_index(appendix, method);
2523   __ verify_oop(appendix);
2524   __ pop(method);
2525   __ push(appendix);  // push appendix (MethodType, CallSite, etc.)
2526   __ bind(L_no_push);
2527 
2528   // compute return type
2529   __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset())));
2530   // load return address
2531   // Return address is loaded into link register(lr) and not pushed to the stack
2532   // like x86
2533   {
2534     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2535     __ mov(rscratch1, table_addr);
2536     __ ldr(lr, Address(rscratch1, index, Address::lsl(3)));
2537   }
2538 }
2539 
2540 // The registers cache and index expected to be set before call.
2541 // Correct values of the cache and index registers are preserved.
2542 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2543                                             bool is_static, bool has_tos) {
2544   // do the JVMTI work here to avoid disturbing the register state below
2545   // We use c_rarg registers here because we want to use the register used in
2546   // the call to the VM
2547   if (JvmtiExport::can_post_field_access()) {
2548     // Check to see if a field access watch has been set before we
2549     // take the time to call into the VM.
2550     Label L1;
2551     assert_different_registers(cache, index, r0);
2552     __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2553     __ ldrw(r0, Address(rscratch1));
2554     __ cbzw(r0, L1);
2555 
2556     __ load_field_entry(c_rarg2, index);
2557 
2558     if (is_static) {
2559       __ mov(c_rarg1, zr); // null object reference
2560     } else {
2561       __ ldr(c_rarg1, at_tos()); // get object pointer without popping it
2562       __ verify_oop(c_rarg1);
2563     }
2564     // c_rarg1: object pointer or null
2565     // c_rarg2: cache entry pointer
2566     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2567                                        InterpreterRuntime::post_field_access),
2568                c_rarg1, c_rarg2);
2569     __ load_field_entry(cache, index);
2570     __ bind(L1);
2571   }
2572 }
2573 
2574 void TemplateTable::pop_and_check_object(Register r)
2575 {
2576   __ pop_ptr(r);
2577   __ null_check(r);  // for field access must check obj.
2578   __ verify_oop(r);
2579 }
2580 
2581 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2582 {
2583   const Register cache     = r4;
2584   const Register obj       = r4;
2585   const Register index     = r3;
2586   const Register tos_state = r3;
2587   const Register off       = r19;
2588   const Register flags     = r6;
2589   const Register bc        = r4; // uses same reg as obj, so don't mix them
2590 
2591   resolve_cache_and_index_for_field(byte_no, cache, index);
2592   jvmti_post_field_access(cache, index, is_static, false);
2593   load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
2594 
2595   if (!is_static) {
2596     // obj is on the stack
2597     pop_and_check_object(obj);
2598   }
2599 
2600   // 8179954: We need to make sure that the code generated for
2601   // volatile accesses forms a sequentially-consistent set of
2602   // operations when combined with STLR and LDAR.  Without a leading
2603   // membar it's possible for a simple Dekker test to fail if loads
2604   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
2605   // the stores in one method and we interpret the loads in another.
2606   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){
2607     Label notVolatile;
2608     __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2609     __ membar(MacroAssembler::AnyAny);
2610     __ bind(notVolatile);
2611   }
2612 
2613   const Address field(obj, off);
2614 
2615   Label Done, notByte, notBool, notInt, notShort, notChar,
2616               notLong, notFloat, notObj, notDouble;
2617 
2618   assert(btos == 0, "change code, btos != 0");
2619   __ cbnz(tos_state, notByte);
2620 
2621   // Don't rewrite getstatic, only getfield
2622   if (is_static) rc = may_not_rewrite;
2623 
2624   // btos
2625   __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2626   __ push(btos);
2627   // Rewrite bytecode to be faster
2628   if (rc == may_rewrite) {
2629     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2630   }
2631   __ b(Done);
2632 
2633   __ bind(notByte);
2634   __ cmp(tos_state, (u1)ztos);
2635   __ br(Assembler::NE, notBool);
2636 
2637   // ztos (same code as btos)
2638   __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2639   __ push(ztos);
2640   // Rewrite bytecode to be faster
2641   if (rc == may_rewrite) {
2642     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2643     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2644   }
2645   __ b(Done);
2646 
2647   __ bind(notBool);
2648   __ cmp(tos_state, (u1)atos);
2649   __ br(Assembler::NE, notObj);
2650   // atos
2651   do_oop_load(_masm, field, r0, IN_HEAP);
2652   __ push(atos);
2653   if (rc == may_rewrite) {
2654     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2655   }
2656   __ b(Done);
2657 
2658   __ bind(notObj);
2659   __ cmp(tos_state, (u1)itos);
2660   __ br(Assembler::NE, notInt);
2661   // itos
2662   __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2663   __ push(itos);
2664   // Rewrite bytecode to be faster
2665   if (rc == may_rewrite) {
2666     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2667   }
2668   __ b(Done);
2669 
2670   __ bind(notInt);
2671   __ cmp(tos_state, (u1)ctos);
2672   __ br(Assembler::NE, notChar);
2673   // ctos
2674   __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2675   __ push(ctos);
2676   // Rewrite bytecode to be faster
2677   if (rc == may_rewrite) {
2678     patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2679   }
2680   __ b(Done);
2681 
2682   __ bind(notChar);
2683   __ cmp(tos_state, (u1)stos);
2684   __ br(Assembler::NE, notShort);
2685   // stos
2686   __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
2687   __ push(stos);
2688   // Rewrite bytecode to be faster
2689   if (rc == may_rewrite) {
2690     patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2691   }
2692   __ b(Done);
2693 
2694   __ bind(notShort);
2695   __ cmp(tos_state, (u1)ltos);
2696   __ br(Assembler::NE, notLong);
2697   // ltos
2698   __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
2699   __ push(ltos);
2700   // Rewrite bytecode to be faster
2701   if (rc == may_rewrite) {
2702     patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2703   }
2704   __ b(Done);
2705 
2706   __ bind(notLong);
2707   __ cmp(tos_state, (u1)ftos);
2708   __ br(Assembler::NE, notFloat);
2709   // ftos
2710   __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2711   __ push(ftos);
2712   // Rewrite bytecode to be faster
2713   if (rc == may_rewrite) {
2714     patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2715   }
2716   __ b(Done);
2717 
2718   __ bind(notFloat);
2719 #ifdef ASSERT
2720   __ cmp(tos_state, (u1)dtos);
2721   __ br(Assembler::NE, notDouble);
2722 #endif
2723   // dtos
2724   __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2725   __ push(dtos);
2726   // Rewrite bytecode to be faster
2727   if (rc == may_rewrite) {
2728     patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2729   }
2730 #ifdef ASSERT
2731   __ b(Done);
2732 
2733   __ bind(notDouble);
2734   __ stop("Bad state");
2735 #endif
2736 
2737   __ bind(Done);
2738 
2739   Label notVolatile;
2740   __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2741   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2742   __ bind(notVolatile);
2743 }
2744 
2745 
2746 void TemplateTable::getfield(int byte_no)
2747 {
2748   getfield_or_static(byte_no, false);
2749 }
2750 
2751 void TemplateTable::nofast_getfield(int byte_no) {
2752   getfield_or_static(byte_no, false, may_not_rewrite);
2753 }
2754 
2755 void TemplateTable::getstatic(int byte_no)
2756 {
2757   getfield_or_static(byte_no, true);
2758 }
2759 
2760 // The registers cache and index expected to be set before call.
2761 // The function may destroy various registers, just not the cache and index registers.
2762 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2763   transition(vtos, vtos);
2764 
2765   if (JvmtiExport::can_post_field_modification()) {
2766     // Check to see if a field modification watch has been set before
2767     // we take the time to call into the VM.
2768     Label L1;
2769     assert_different_registers(cache, index, r0);
2770     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2771     __ ldrw(r0, Address(rscratch1));
2772     __ cbz(r0, L1);
2773 
2774     __ mov(c_rarg2, cache);
2775 
2776     if (is_static) {
2777       // Life is simple.  Null out the object pointer.
2778       __ mov(c_rarg1, zr);
2779     } else {
2780       // Life is harder. The stack holds the value on top, followed by
2781       // the object.  We don't know the size of the value, though; it
2782       // could be one or two words depending on its type. As a result,
2783       // we must find the type to determine where the object is.
2784       __ load_unsigned_byte(c_rarg3, Address(c_rarg2, in_bytes(ResolvedFieldEntry::type_offset())));
2785       Label nope2, done, ok;
2786       __ ldr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
2787       __ cmpw(c_rarg3, ltos);
2788       __ br(Assembler::EQ, ok);
2789       __ cmpw(c_rarg3, dtos);
2790       __ br(Assembler::NE, nope2);
2791       __ bind(ok);
2792       __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2793       __ bind(nope2);
2794     }
2795     // object (tos)
2796     __ mov(c_rarg3, esp);
2797     // c_rarg1: object pointer set up above (null if static)
2798     // c_rarg2: cache entry pointer
2799     // c_rarg3: jvalue object on the stack
2800     __ call_VM(noreg,
2801                CAST_FROM_FN_PTR(address,
2802                                 InterpreterRuntime::post_field_modification),
2803                c_rarg1, c_rarg2, c_rarg3);
2804     __ load_field_entry(cache, index);
2805     __ bind(L1);
2806   }
2807 }
2808 
2809 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2810   transition(vtos, vtos);
2811 
2812   const Register cache     = r2;
2813   const Register index     = r3;
2814   const Register tos_state = r3;
2815   const Register obj       = r2;
2816   const Register off       = r19;
2817   const Register flags     = r0;
2818   const Register bc        = r4;
2819 
2820   resolve_cache_and_index_for_field(byte_no, cache, index);
2821   jvmti_post_field_mod(cache, index, is_static);
2822   load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
2823 
2824   Label Done;
2825   __ mov(r5, flags);
2826 
2827   {
2828     Label notVolatile;
2829     __ tbz(r5, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2830     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2831     __ bind(notVolatile);
2832   }
2833 
2834   // field address
2835   const Address field(obj, off);
2836 
2837   Label notByte, notBool, notInt, notShort, notChar,
2838         notLong, notFloat, notObj, notDouble;
2839 
2840   assert(btos == 0, "change code, btos != 0");
2841   __ cbnz(tos_state, notByte);
2842 
2843   // Don't rewrite putstatic, only putfield
2844   if (is_static) rc = may_not_rewrite;
2845 
2846   // btos
2847   {
2848     __ pop(btos);
2849     if (!is_static) pop_and_check_object(obj);
2850     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg);
2851     if (rc == may_rewrite) {
2852       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2853     }
2854     __ b(Done);
2855   }
2856 
2857   __ bind(notByte);
2858   __ cmp(tos_state, (u1)ztos);
2859   __ br(Assembler::NE, notBool);
2860 
2861   // ztos
2862   {
2863     __ pop(ztos);
2864     if (!is_static) pop_and_check_object(obj);
2865     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg);
2866     if (rc == may_rewrite) {
2867       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2868     }
2869     __ b(Done);
2870   }
2871 
2872   __ bind(notBool);
2873   __ cmp(tos_state, (u1)atos);
2874   __ br(Assembler::NE, notObj);
2875 
2876   // atos
2877   {
2878     __ pop(atos);
2879     if (!is_static) pop_and_check_object(obj);
2880     // Store into the field
2881     do_oop_store(_masm, field, r0, IN_HEAP);
2882     if (rc == may_rewrite) {
2883       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2884     }
2885     __ b(Done);
2886   }
2887 
2888   __ bind(notObj);
2889   __ cmp(tos_state, (u1)itos);
2890   __ br(Assembler::NE, notInt);
2891 
2892   // itos
2893   {
2894     __ pop(itos);
2895     if (!is_static) pop_and_check_object(obj);
2896     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg);
2897     if (rc == may_rewrite) {
2898       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2899     }
2900     __ b(Done);
2901   }
2902 
2903   __ bind(notInt);
2904   __ cmp(tos_state, (u1)ctos);
2905   __ br(Assembler::NE, notChar);
2906 
2907   // ctos
2908   {
2909     __ pop(ctos);
2910     if (!is_static) pop_and_check_object(obj);
2911     __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg);
2912     if (rc == may_rewrite) {
2913       patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
2914     }
2915     __ b(Done);
2916   }
2917 
2918   __ bind(notChar);
2919   __ cmp(tos_state, (u1)stos);
2920   __ br(Assembler::NE, notShort);
2921 
2922   // stos
2923   {
2924     __ pop(stos);
2925     if (!is_static) pop_and_check_object(obj);
2926     __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg);
2927     if (rc == may_rewrite) {
2928       patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
2929     }
2930     __ b(Done);
2931   }
2932 
2933   __ bind(notShort);
2934   __ cmp(tos_state, (u1)ltos);
2935   __ br(Assembler::NE, notLong);
2936 
2937   // ltos
2938   {
2939     __ pop(ltos);
2940     if (!is_static) pop_and_check_object(obj);
2941     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
2942     if (rc == may_rewrite) {
2943       patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
2944     }
2945     __ b(Done);
2946   }
2947 
2948   __ bind(notLong);
2949   __ cmp(tos_state, (u1)ftos);
2950   __ br(Assembler::NE, notFloat);
2951 
2952   // ftos
2953   {
2954     __ pop(ftos);
2955     if (!is_static) pop_and_check_object(obj);
2956     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
2957     if (rc == may_rewrite) {
2958       patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
2959     }
2960     __ b(Done);
2961   }
2962 
2963   __ bind(notFloat);
2964 #ifdef ASSERT
2965   __ cmp(tos_state, (u1)dtos);
2966   __ br(Assembler::NE, notDouble);
2967 #endif
2968 
2969   // dtos
2970   {
2971     __ pop(dtos);
2972     if (!is_static) pop_and_check_object(obj);
2973     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg);
2974     if (rc == may_rewrite) {
2975       patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
2976     }
2977   }
2978 
2979 #ifdef ASSERT
2980   __ b(Done);
2981 
2982   __ bind(notDouble);
2983   __ stop("Bad state");
2984 #endif
2985 
2986   __ bind(Done);
2987 
2988   {
2989     Label notVolatile;
2990     __ tbz(r5, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2991     __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
2992     __ bind(notVolatile);
2993   }
2994 }
2995 
2996 void TemplateTable::putfield(int byte_no)
2997 {
2998   putfield_or_static(byte_no, false);
2999 }
3000 
3001 void TemplateTable::nofast_putfield(int byte_no) {
3002   putfield_or_static(byte_no, false, may_not_rewrite);
3003 }
3004 
3005 void TemplateTable::putstatic(int byte_no) {
3006   putfield_or_static(byte_no, true);
3007 }
3008 
3009 void TemplateTable::jvmti_post_fast_field_mod() {
3010   if (JvmtiExport::can_post_field_modification()) {
3011     // Check to see if a field modification watch has been set before
3012     // we take the time to call into the VM.
3013     Label L2;
3014     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3015     __ ldrw(c_rarg3, Address(rscratch1));
3016     __ cbzw(c_rarg3, L2);
3017     __ pop_ptr(r19);                  // copy the object pointer from tos
3018     __ verify_oop(r19);
3019     __ push_ptr(r19);                 // put the object pointer back on tos
3020     // Save tos values before call_VM() clobbers them. Since we have
3021     // to do it for every data type, we use the saved values as the
3022     // jvalue object.
3023     switch (bytecode()) {          // load values into the jvalue object
3024     case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
3025     case Bytecodes::_fast_bputfield: // fall through
3026     case Bytecodes::_fast_zputfield: // fall through
3027     case Bytecodes::_fast_sputfield: // fall through
3028     case Bytecodes::_fast_cputfield: // fall through
3029     case Bytecodes::_fast_iputfield: __ push_i(r0); break;
3030     case Bytecodes::_fast_dputfield: __ push_d(); break;
3031     case Bytecodes::_fast_fputfield: __ push_f(); break;
3032     case Bytecodes::_fast_lputfield: __ push_l(r0); break;
3033 
3034     default:
3035       ShouldNotReachHere();
3036     }
3037     __ mov(c_rarg3, esp);             // points to jvalue on the stack
3038     // access constant pool cache entry
3039     __ load_field_entry(c_rarg2, r0);
3040     __ verify_oop(r19);
3041     // r19: object pointer copied above
3042     // c_rarg2: cache entry pointer
3043     // c_rarg3: jvalue object on the stack
3044     __ call_VM(noreg,
3045                CAST_FROM_FN_PTR(address,
3046                                 InterpreterRuntime::post_field_modification),
3047                r19, c_rarg2, c_rarg3);
3048 
3049     switch (bytecode()) {             // restore tos values
3050     case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
3051     case Bytecodes::_fast_bputfield: // fall through
3052     case Bytecodes::_fast_zputfield: // fall through
3053     case Bytecodes::_fast_sputfield: // fall through
3054     case Bytecodes::_fast_cputfield: // fall through
3055     case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
3056     case Bytecodes::_fast_dputfield: __ pop_d(); break;
3057     case Bytecodes::_fast_fputfield: __ pop_f(); break;
3058     case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
3059     default: break;
3060     }
3061     __ bind(L2);
3062   }
3063 }
3064 
3065 void TemplateTable::fast_storefield(TosState state)
3066 {
3067   transition(state, vtos);
3068 
3069   ByteSize base = ConstantPoolCache::base_offset();
3070 
3071   jvmti_post_fast_field_mod();
3072 
3073   // access constant pool cache
3074   __ load_field_entry(r2, r1);
3075 
3076   // R1: field offset, R2: field holder, R3: flags
3077   load_resolved_field_entry(r2, r2, noreg, r1, r3);
3078 
3079   {
3080     Label notVolatile;
3081     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3082     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
3083     __ bind(notVolatile);
3084   }
3085 
3086   Label notVolatile;
3087 
3088   // Get object from stack
3089   pop_and_check_object(r2);
3090 
3091   // field address
3092   const Address field(r2, r1);
3093 
3094   // access field
3095   switch (bytecode()) {
3096   case Bytecodes::_fast_aputfield:
3097     do_oop_store(_masm, field, r0, IN_HEAP);
3098     break;
3099   case Bytecodes::_fast_lputfield:
3100     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
3101     break;
3102   case Bytecodes::_fast_iputfield:
3103     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg);
3104     break;
3105   case Bytecodes::_fast_zputfield:
3106     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg);
3107     break;
3108   case Bytecodes::_fast_bputfield:
3109     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg);
3110     break;
3111   case Bytecodes::_fast_sputfield:
3112     __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg);
3113     break;
3114   case Bytecodes::_fast_cputfield:
3115     __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg);
3116     break;
3117   case Bytecodes::_fast_fputfield:
3118     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3119     break;
3120   case Bytecodes::_fast_dputfield:
3121     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg);
3122     break;
3123   default:
3124     ShouldNotReachHere();
3125   }
3126 
3127   {
3128     Label notVolatile;
3129     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3130     __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
3131     __ bind(notVolatile);
3132   }
3133 }
3134 
3135 
3136 void TemplateTable::fast_accessfield(TosState state)
3137 {
3138   transition(atos, state);
3139   // Do the JVMTI work here to avoid disturbing the register state below
3140   if (JvmtiExport::can_post_field_access()) {
3141     // Check to see if a field access watch has been set before we
3142     // take the time to call into the VM.
3143     Label L1;
3144     __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3145     __ ldrw(r2, Address(rscratch1));
3146     __ cbzw(r2, L1);
3147     // access constant pool cache entry
3148     __ load_field_entry(c_rarg2, rscratch2);
3149     __ verify_oop(r0);
3150     __ push_ptr(r0);  // save object pointer before call_VM() clobbers it
3151     __ mov(c_rarg1, r0);
3152     // c_rarg1: object pointer copied above
3153     // c_rarg2: cache entry pointer
3154     __ call_VM(noreg,
3155                CAST_FROM_FN_PTR(address,
3156                                 InterpreterRuntime::post_field_access),
3157                c_rarg1, c_rarg2);
3158     __ pop_ptr(r0); // restore object pointer
3159     __ bind(L1);
3160   }
3161 
3162   // access constant pool cache
3163   __ load_field_entry(r2, r1);
3164 
3165   __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3166   __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3167 
3168   // r0: object
3169   __ verify_oop(r0);
3170   __ null_check(r0);
3171   const Address field(r0, r1);
3172 
3173   // 8179954: We need to make sure that the code generated for
3174   // volatile accesses forms a sequentially-consistent set of
3175   // operations when combined with STLR and LDAR.  Without a leading
3176   // membar it's possible for a simple Dekker test to fail if loads
3177   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3178   // the stores in one method and we interpret the loads in another.
3179   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3180     Label notVolatile;
3181     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3182     __ membar(MacroAssembler::AnyAny);
3183     __ bind(notVolatile);
3184   }
3185 
3186   // access field
3187   switch (bytecode()) {
3188   case Bytecodes::_fast_agetfield:
3189     do_oop_load(_masm, field, r0, IN_HEAP);
3190     __ verify_oop(r0);
3191     break;
3192   case Bytecodes::_fast_lgetfield:
3193     __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3194     break;
3195   case Bytecodes::_fast_igetfield:
3196     __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3197     break;
3198   case Bytecodes::_fast_bgetfield:
3199     __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3200     break;
3201   case Bytecodes::_fast_sgetfield:
3202     __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3203     break;
3204   case Bytecodes::_fast_cgetfield:
3205     __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3206     break;
3207   case Bytecodes::_fast_fgetfield:
3208     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3209     break;
3210   case Bytecodes::_fast_dgetfield:
3211     __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3212     break;
3213   default:
3214     ShouldNotReachHere();
3215   }
3216   {
3217     Label notVolatile;
3218     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3219     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3220     __ bind(notVolatile);
3221   }
3222 }
3223 
3224 void TemplateTable::fast_xaccess(TosState state)
3225 {
3226   transition(vtos, state);
3227 
3228   // get receiver
3229   __ ldr(r0, aaddress(0));
3230   // access constant pool cache
3231   __ load_field_entry(r2, r3, 2);
3232   __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3233 
3234   // 8179954: We need to make sure that the code generated for
3235   // volatile accesses forms a sequentially-consistent set of
3236   // operations when combined with STLR and LDAR.  Without a leading
3237   // membar it's possible for a simple Dekker test to fail if loads
3238   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3239   // the stores in one method and we interpret the loads in another.
3240   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3241     Label notVolatile;
3242     __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3243     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3244     __ membar(MacroAssembler::AnyAny);
3245     __ bind(notVolatile);
3246   }
3247 
3248   // make sure exception is reported in correct bcp range (getfield is
3249   // next instruction)
3250   __ increment(rbcp);
3251   __ null_check(r0);
3252   switch (state) {
3253   case itos:
3254     __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3255     break;
3256   case atos:
3257     do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
3258     __ verify_oop(r0);
3259     break;
3260   case ftos:
3261     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3262     break;
3263   default:
3264     ShouldNotReachHere();
3265   }
3266 
3267   {
3268     Label notVolatile;
3269     __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3270     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3271     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3272     __ bind(notVolatile);
3273   }
3274 
3275   __ decrement(rbcp);
3276 }
3277 
3278 
3279 
3280 //-----------------------------------------------------------------------------
3281 // Calls
3282 
3283 void TemplateTable::prepare_invoke(Register cache, Register recv) {
3284 
3285   Bytecodes::Code code = bytecode();
3286   const bool load_receiver       = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
3287 
3288   // save 'interpreter return address'
3289   __ save_bcp();
3290 
3291   // Load TOS state for later
3292   __ load_unsigned_byte(rscratch2, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())));
3293 
3294   // load receiver if needed (note: no return address pushed yet)
3295   if (load_receiver) {
3296     __ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
3297     __ add(rscratch1, esp, recv, ext::uxtx, 3);
3298     __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1)));
3299     __ verify_oop(recv);
3300   }
3301 
3302   // load return address
3303   {
3304     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3305     __ mov(rscratch1, table_addr);
3306     __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3)));
3307   }
3308 }
3309 
3310 
3311 void TemplateTable::invokevirtual_helper(Register index,
3312                                          Register recv,
3313                                          Register flags)
3314 {
3315   // Uses temporary registers r0, r3
3316   assert_different_registers(index, recv, r0, r3);
3317   // Test for an invoke of a final method
3318   Label notFinal;
3319   __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, notFinal);
3320 
3321   const Register method = index;  // method must be rmethod
3322   assert(method == rmethod,
3323          "Method must be rmethod for interpreter calling convention");
3324 
3325   // do the call - the index is actually the method to call
3326   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3327 
3328   // It's final, need a null check here!
3329   __ null_check(recv);
3330 
3331   // profile this call
3332   __ profile_final_call(r0);
3333   __ profile_arguments_type(r0, method, r4, true);
3334 
3335   __ jump_from_interpreted(method, r0);
3336 
3337   __ bind(notFinal);
3338 
3339   // get receiver klass
3340   __ load_klass(r0, recv);
3341 
3342   // profile this call
3343   __ profile_virtual_call(r0, rlocals, r3);
3344 
3345   // get target Method & entry point
3346   __ lookup_virtual_method(r0, index, method);
3347   __ profile_arguments_type(r3, method, r4, true);
3348   // FIXME -- this looks completely redundant. is it?
3349   // __ ldr(r3, Address(method, Method::interpreter_entry_offset()));
3350   __ jump_from_interpreted(method, r3);
3351 }
3352 
3353 void TemplateTable::invokevirtual(int byte_no)
3354 {
3355   transition(vtos, vtos);
3356   assert(byte_no == f2_byte, "use this argument");
3357 
3358   load_resolved_method_entry_virtual(r2,      // ResolvedMethodEntry*
3359                                      rmethod, // Method* or itable index
3360                                      r3);     // flags
3361   prepare_invoke(r2, r2); // recv
3362 
3363   // rmethod: index (actually a Method*)
3364   // r2: receiver
3365   // r3: flags
3366 
3367   invokevirtual_helper(rmethod, r2, r3);
3368 }
3369 
3370 void TemplateTable::invokespecial(int byte_no)
3371 {
3372   transition(vtos, vtos);
3373   assert(byte_no == f1_byte, "use this argument");
3374 
3375   load_resolved_method_entry_special_or_static(r2,      // ResolvedMethodEntry*
3376                                                rmethod, // Method*
3377                                                r3);     // flags
3378   prepare_invoke(r2, r2);  // get receiver also for null check
3379   __ verify_oop(r2);
3380   __ null_check(r2);
3381   // do the call
3382   __ profile_call(r0);
3383   __ profile_arguments_type(r0, rmethod, rbcp, false);
3384   __ jump_from_interpreted(rmethod, r0);
3385 }
3386 
3387 void TemplateTable::invokestatic(int byte_no)
3388 {
3389   transition(vtos, vtos);
3390   assert(byte_no == f1_byte, "use this argument");
3391 
3392   load_resolved_method_entry_special_or_static(r2,      // ResolvedMethodEntry*
3393                                                rmethod, // Method*
3394                                                r3);     // flags
3395   prepare_invoke(r2, r2);  // get receiver also for null check
3396 
3397   // do the call
3398   __ profile_call(r0);
3399   __ profile_arguments_type(r0, rmethod, r4, false);
3400   __ jump_from_interpreted(rmethod, r0);
3401 }
3402 
3403 void TemplateTable::fast_invokevfinal(int byte_no)
3404 {
3405   __ call_Unimplemented();
3406 }
3407 
3408 void TemplateTable::invokeinterface(int byte_no) {
3409   transition(vtos, vtos);
3410   assert(byte_no == f1_byte, "use this argument");
3411 
3412   load_resolved_method_entry_interface(r2,      // ResolvedMethodEntry*
3413                                        r0,      // Klass*
3414                                        rmethod, // Method* or itable/vtable index
3415                                        r3);     // flags
3416   prepare_invoke(r2, r2); // receiver
3417 
3418   // r0: interface klass (from f1)
3419   // rmethod: method (from f2)
3420   // r2: receiver
3421   // r3: flags
3422 
3423   // First check for Object case, then private interface method,
3424   // then regular interface method.
3425 
3426   // Special case of invokeinterface called for virtual method of
3427   // java.lang.Object.  See cpCache.cpp for details.
3428   Label notObjectMethod;
3429   __ tbz(r3, ResolvedMethodEntry::is_forced_virtual_shift, notObjectMethod);
3430 
3431   invokevirtual_helper(rmethod, r2, r3);
3432   __ bind(notObjectMethod);
3433 
3434   Label no_such_interface;
3435 
3436   // Check for private method invocation - indicated by vfinal
3437   Label notVFinal;
3438   __ tbz(r3, ResolvedMethodEntry::is_vfinal_shift, notVFinal);
3439 
3440   // Get receiver klass into r3
3441   __ load_klass(r3, r2);
3442 
3443   Label subtype;
3444   __ check_klass_subtype(r3, r0, r4, subtype);
3445   // If we get here the typecheck failed
3446   __ b(no_such_interface);
3447   __ bind(subtype);
3448 
3449   __ profile_final_call(r0);
3450   __ profile_arguments_type(r0, rmethod, r4, true);
3451   __ jump_from_interpreted(rmethod, r0);
3452 
3453   __ bind(notVFinal);
3454 
3455   // Get receiver klass into r3
3456   __ restore_locals();
3457   __ load_klass(r3, r2);
3458 
3459   Label no_such_method;
3460 
3461   // Preserve method for throw_AbstractMethodErrorVerbose.
3462   __ mov(r16, rmethod);
3463   // Receiver subtype check against REFC.
3464   // Superklass in r0. Subklass in r3. Blows rscratch2, r13
3465   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3466                              r3, r0, noreg,
3467                              // outputs: scan temp. reg, scan temp. reg
3468                              rscratch2, r13,
3469                              no_such_interface,
3470                              /*return_method=*/false);
3471 
3472   // profile this call
3473   __ profile_virtual_call(r3, r13, r19);
3474 
3475   // Get declaring interface class from method, and itable index
3476 
3477   __ load_method_holder(r0, rmethod);
3478   __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset()));
3479   __ subw(rmethod, rmethod, Method::itable_index_max);
3480   __ negw(rmethod, rmethod);
3481 
3482   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3483   __ mov(rlocals, r3);
3484   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3485                              rlocals, r0, rmethod,
3486                              // outputs: method, scan temp. reg
3487                              rmethod, r13,
3488                              no_such_interface);
3489 
3490   // rmethod,: Method to call
3491   // r2: receiver
3492   // Check for abstract method error
3493   // Note: This should be done more efficiently via a throw_abstract_method_error
3494   //       interpreter entry point and a conditional jump to it in case of a null
3495   //       method.
3496   __ cbz(rmethod, no_such_method);
3497 
3498   __ profile_arguments_type(r3, rmethod, r13, true);
3499 
3500   // do the call
3501   // r2: receiver
3502   // rmethod,: Method
3503   __ jump_from_interpreted(rmethod, r3);
3504   __ should_not_reach_here();
3505 
3506   // exception handling code follows...
3507   // note: must restore interpreter registers to canonical
3508   //       state for exception handling to work correctly!
3509 
3510   __ bind(no_such_method);
3511   // throw exception
3512   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
3513   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3514   // Pass arguments for generating a verbose error message.
3515   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16);
3516   // the call_VM checks for exception, so we should never return here.
3517   __ should_not_reach_here();
3518 
3519   __ bind(no_such_interface);
3520   // throw exception
3521   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
3522   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3523   // Pass arguments for generating a verbose error message.
3524   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3525                    InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0);
3526   // the call_VM checks for exception, so we should never return here.
3527   __ should_not_reach_here();
3528   return;
3529 }
3530 
3531 void TemplateTable::invokehandle(int byte_no) {
3532   transition(vtos, vtos);
3533   assert(byte_no == f1_byte, "use this argument");
3534 
3535   load_resolved_method_entry_handle(r2,      // ResolvedMethodEntry*
3536                                     rmethod, // Method*
3537                                     r0,      // Resolved reference
3538                                     r3);     // flags
3539   prepare_invoke(r2, r2);
3540 
3541   __ verify_method_ptr(r2);
3542   __ verify_oop(r2);
3543   __ null_check(r2);
3544 
3545   // FIXME: profile the LambdaForm also
3546 
3547   // r13 is safe to use here as a scratch reg because it is about to
3548   // be clobbered by jump_from_interpreted().
3549   __ profile_final_call(r13);
3550   __ profile_arguments_type(r13, rmethod, r4, true);
3551 
3552   __ jump_from_interpreted(rmethod, r0);
3553 }
3554 
3555 void TemplateTable::invokedynamic(int byte_no) {
3556   transition(vtos, vtos);
3557   assert(byte_no == f1_byte, "use this argument");
3558 
3559   load_invokedynamic_entry(rmethod);
3560 
3561   // r0: CallSite object (from cpool->resolved_references[])
3562   // rmethod: MH.linkToCallSite method
3563 
3564   // Note:  r0_callsite is already pushed
3565 
3566   // %%% should make a type profile for any invokedynamic that takes a ref argument
3567   // profile this call
3568   __ profile_call(rbcp);
3569   __ profile_arguments_type(r3, rmethod, r13, false);
3570 
3571   __ verify_oop(r0);
3572 
3573   __ jump_from_interpreted(rmethod, r0);
3574 }
3575 
3576 
3577 //-----------------------------------------------------------------------------
3578 // Allocation
3579 
3580 void TemplateTable::_new() {
3581   transition(vtos, atos);
3582 
3583   __ get_unsigned_2_byte_index_at_bcp(r3, 1);
3584   Label slow_case;
3585   Label done;
3586   Label initialize_header;
3587 
3588   __ get_cpool_and_tags(r4, r0);
3589   // Make sure the class we're about to instantiate has been resolved.
3590   // This is done before loading InstanceKlass to be consistent with the order
3591   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3592   const int tags_offset = Array<u1>::base_offset_in_bytes();
3593   __ lea(rscratch1, Address(r0, r3, Address::lsl(0)));
3594   __ lea(rscratch1, Address(rscratch1, tags_offset));
3595   __ ldarb(rscratch1, rscratch1);
3596   __ cmp(rscratch1, (u1)JVM_CONSTANT_Class);
3597   __ br(Assembler::NE, slow_case);
3598 
3599   // get InstanceKlass
3600   __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
3601 
3602   // make sure klass is initialized
3603   assert(VM_Version::supports_fast_class_init_checks(), "Optimization requires support for fast class initialization checks");
3604   __ clinit_barrier(r4, rscratch1, nullptr /*L_fast_path*/, &slow_case);
3605 
3606   // get instance_size in InstanceKlass (scaled to a count of bytes)
3607   __ ldrw(r3,
3608           Address(r4,
3609                   Klass::layout_helper_offset()));
3610   // test to see if it is malformed in some way
3611   __ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
3612 
3613   // Allocate the instance:
3614   //  If TLAB is enabled:
3615   //    Try to allocate in the TLAB.
3616   //    If fails, go to the slow path.
3617   //    Initialize the allocation.
3618   //    Exit.
3619   //
3620   //  Go to slow path.
3621 
3622   if (UseTLAB) {
3623     __ tlab_allocate(r0, r3, 0, noreg, r1, slow_case);
3624 
3625     if (ZeroTLAB) {
3626       // the fields have been already cleared
3627       __ b(initialize_header);
3628     }
3629 
3630     // The object is initialized before the header.  If the object size is
3631     // zero, go directly to the header initialization.
3632     __ sub(r3, r3, sizeof(oopDesc));
3633     __ cbz(r3, initialize_header);
3634 
3635     // Initialize object fields
3636     {
3637       __ add(r2, r0, sizeof(oopDesc));
3638       Label loop;
3639       __ bind(loop);
3640       __ str(zr, Address(__ post(r2, BytesPerLong)));
3641       __ sub(r3, r3, BytesPerLong);
3642       __ cbnz(r3, loop);
3643     }
3644 
3645     // initialize object header only.
3646     __ bind(initialize_header);
3647     __ mov(rscratch1, (intptr_t)markWord::prototype().value());
3648     __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
3649     __ store_klass_gap(r0, zr);  // zero klass gap for compressed oops
3650     __ store_klass(r0, r4);      // store klass last
3651 
3652     if (DTraceAllocProbes) {
3653       // Trigger dtrace event for fastpath
3654       __ push(atos); // save the return value
3655       __ call_VM_leaf(
3656            CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), r0);
3657       __ pop(atos); // restore the return value
3658 
3659     }
3660     __ b(done);
3661   }
3662 
3663   // slow case
3664   __ bind(slow_case);
3665   __ get_constant_pool(c_rarg1);
3666   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3667   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3668   __ verify_oop(r0);
3669 
3670   // continue
3671   __ bind(done);
3672   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3673   __ membar(Assembler::StoreStore);
3674 }
3675 
3676 void TemplateTable::newarray() {
3677   transition(itos, atos);
3678   __ load_unsigned_byte(c_rarg1, at_bcp(1));
3679   __ mov(c_rarg2, r0);
3680   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3681           c_rarg1, c_rarg2);
3682   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3683   __ membar(Assembler::StoreStore);
3684 }
3685 
3686 void TemplateTable::anewarray() {
3687   transition(itos, atos);
3688   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3689   __ get_constant_pool(c_rarg1);
3690   __ mov(c_rarg3, r0);
3691   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3692           c_rarg1, c_rarg2, c_rarg3);
3693   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3694   __ membar(Assembler::StoreStore);
3695 }
3696 
3697 void TemplateTable::arraylength() {
3698   transition(atos, itos);
3699   __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes()));
3700 }
3701 
3702 void TemplateTable::checkcast()
3703 {
3704   transition(atos, atos);
3705   Label done, is_null, ok_is_subtype, quicked, resolved;
3706   __ cbz(r0, is_null);
3707 
3708   // Get cpool & tags index
3709   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3710   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3711   // See if bytecode has already been quicked
3712   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3713   __ lea(r1, Address(rscratch1, r19));
3714   __ ldarb(r1, r1);
3715   __ cmp(r1, (u1)JVM_CONSTANT_Class);
3716   __ br(Assembler::EQ, quicked);
3717 
3718   __ push(atos); // save receiver for result, and for GC
3719   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3720   // vm_result_2 has metadata result
3721   __ get_vm_result_2(r0, rthread);
3722   __ pop(r3); // restore receiver
3723   __ b(resolved);
3724 
3725   // Get superklass in r0 and subklass in r3
3726   __ bind(quicked);
3727   __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3728   __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3729 
3730   __ bind(resolved);
3731   __ load_klass(r19, r3);
3732 
3733   // Generate subtype check.  Blows r2, r5.  Object in r3.
3734   // Superklass in r0.  Subklass in r19.
3735   __ gen_subtype_check(r19, ok_is_subtype);
3736 
3737   // Come here on failure
3738   __ push(r3);
3739   // object is at TOS
3740   __ b(Interpreter::_throw_ClassCastException_entry);
3741 
3742   // Come here on success
3743   __ bind(ok_is_subtype);
3744   __ mov(r0, r3); // Restore object in r3
3745 
3746   // Collect counts on whether this test sees nulls a lot or not.
3747   if (ProfileInterpreter) {
3748     __ b(done);
3749     __ bind(is_null);
3750     __ profile_null_seen(r2);
3751   } else {
3752     __ bind(is_null);   // same as 'done'
3753   }
3754   __ bind(done);
3755 }
3756 
3757 void TemplateTable::instanceof() {
3758   transition(atos, itos);
3759   Label done, is_null, ok_is_subtype, quicked, resolved;
3760   __ cbz(r0, is_null);
3761 
3762   // Get cpool & tags index
3763   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3764   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3765   // See if bytecode has already been quicked
3766   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3767   __ lea(r1, Address(rscratch1, r19));
3768   __ ldarb(r1, r1);
3769   __ cmp(r1, (u1)JVM_CONSTANT_Class);
3770   __ br(Assembler::EQ, quicked);
3771 
3772   __ push(atos); // save receiver for result, and for GC
3773   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3774   // vm_result_2 has metadata result
3775   __ get_vm_result_2(r0, rthread);
3776   __ pop(r3); // restore receiver
3777   __ verify_oop(r3);
3778   __ load_klass(r3, r3);
3779   __ b(resolved);
3780 
3781   // Get superklass in r0 and subklass in r3
3782   __ bind(quicked);
3783   __ load_klass(r3, r0);
3784   __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
3785 
3786   __ bind(resolved);
3787 
3788   // Generate subtype check.  Blows r2, r5
3789   // Superklass in r0.  Subklass in r3.
3790   __ gen_subtype_check(r3, ok_is_subtype);
3791 
3792   // Come here on failure
3793   __ mov(r0, 0);
3794   __ b(done);
3795   // Come here on success
3796   __ bind(ok_is_subtype);
3797   __ mov(r0, 1);
3798 
3799   // Collect counts on whether this test sees nulls a lot or not.
3800   if (ProfileInterpreter) {
3801     __ b(done);
3802     __ bind(is_null);
3803     __ profile_null_seen(r2);
3804   } else {
3805     __ bind(is_null);   // same as 'done'
3806   }
3807   __ bind(done);
3808   // r0 = 0: obj == nullptr or  obj is not an instanceof the specified klass
3809   // r0 = 1: obj != nullptr and obj is     an instanceof the specified klass
3810 }
3811 
3812 //-----------------------------------------------------------------------------
3813 // Breakpoints
3814 void TemplateTable::_breakpoint() {
3815   // Note: We get here even if we are single stepping..
3816   // jbug inists on setting breakpoints at every bytecode
3817   // even if we are in single step mode.
3818 
3819   transition(vtos, vtos);
3820 
3821   // get the unpatched byte code
3822   __ get_method(c_rarg1);
3823   __ call_VM(noreg,
3824              CAST_FROM_FN_PTR(address,
3825                               InterpreterRuntime::get_original_bytecode_at),
3826              c_rarg1, rbcp);
3827   __ mov(r19, r0);
3828 
3829   // post the breakpoint event
3830   __ call_VM(noreg,
3831              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3832              rmethod, rbcp);
3833 
3834   // complete the execution of original bytecode
3835   __ mov(rscratch1, r19);
3836   __ dispatch_only_normal(vtos);
3837 }
3838 
3839 //-----------------------------------------------------------------------------
3840 // Exceptions
3841 
3842 void TemplateTable::athrow() {
3843   transition(atos, vtos);
3844   __ null_check(r0);
3845   __ b(Interpreter::throw_exception_entry());
3846 }
3847 
3848 //-----------------------------------------------------------------------------
3849 // Synchronization
3850 //
3851 // Note: monitorenter & exit are symmetric routines; which is reflected
3852 //       in the assembly code structure as well
3853 //
3854 // Stack layout:
3855 //
3856 // [expressions  ] <--- esp               = expression stack top
3857 // ..
3858 // [expressions  ]
3859 // [monitor entry] <--- monitor block top = expression stack bot
3860 // ..
3861 // [monitor entry]
3862 // [frame data   ] <--- monitor block bot
3863 // ...
3864 // [saved rfp    ] <--- rfp
3865 void TemplateTable::monitorenter()
3866 {
3867   transition(atos, vtos);
3868 
3869   // check for null object
3870   __ null_check(r0);
3871 
3872   const Address monitor_block_top(
3873         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3874   const Address monitor_block_bot(
3875         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3876   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
3877 
3878   Label allocated;
3879 
3880   // initialize entry pointer
3881   __ mov(c_rarg1, zr); // points to free slot or null
3882 
3883   // find a free slot in the monitor block (result in c_rarg1)
3884   {
3885     Label entry, loop, exit;
3886     __ ldr(c_rarg3, monitor_block_top); // derelativize pointer
3887     __ lea(c_rarg3, Address(rfp, c_rarg3, Address::lsl(Interpreter::logStackElementSize)));
3888     // c_rarg3 points to current entry, starting with top-most entry
3889 
3890     __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3891 
3892     __ b(entry);
3893 
3894     __ bind(loop);
3895     // check if current entry is used
3896     // if not used then remember entry in c_rarg1
3897     __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset()));
3898     __ cmp(zr, rscratch1);
3899     __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ);
3900     // check if current entry is for same object
3901     __ cmp(r0, rscratch1);
3902     // if same object then stop searching
3903     __ br(Assembler::EQ, exit);
3904     // otherwise advance to next entry
3905     __ add(c_rarg3, c_rarg3, entry_size);
3906     __ bind(entry);
3907     // check if bottom reached
3908     __ cmp(c_rarg3, c_rarg2);
3909     // if not at bottom then check this entry
3910     __ br(Assembler::NE, loop);
3911     __ bind(exit);
3912   }
3913 
3914   __ cbnz(c_rarg1, allocated); // check if a slot has been found and
3915                             // if found, continue with that on
3916 
3917   // allocate one if there's no free slot
3918   {
3919     Label entry, loop;
3920     // 1. compute new pointers            // rsp: old expression stack top
3921 
3922     __ check_extended_sp();
3923     __ sub(sp, sp, entry_size);           // make room for the monitor
3924     __ sub(rscratch1, sp, rfp);
3925     __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
3926     __ str(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
3927 
3928     __ ldr(c_rarg1, monitor_block_bot);   // derelativize pointer
3929     __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize)));
3930     // c_rarg1 points to the old expression stack bottom
3931 
3932     __ sub(esp, esp, entry_size);         // move expression stack top
3933     __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom
3934     __ mov(c_rarg3, esp);                 // set start value for copy loop
3935     __ sub(rscratch1, c_rarg1, rfp);      // relativize pointer
3936     __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
3937     __ str(rscratch1, monitor_block_bot);  // set new monitor block bottom
3938 
3939     __ b(entry);
3940     // 2. move expression stack contents
3941     __ bind(loop);
3942     __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3943                                                    // word from old location
3944     __ str(c_rarg2, Address(c_rarg3, 0));          // and store it at new location
3945     __ add(c_rarg3, c_rarg3, wordSize);            // advance to next word
3946     __ bind(entry);
3947     __ cmp(c_rarg3, c_rarg1);        // check if bottom reached
3948     __ br(Assembler::NE, loop);      // if not at bottom then
3949                                      // copy next word
3950   }
3951 
3952   // call run-time routine
3953   // c_rarg1: points to monitor entry
3954   __ bind(allocated);
3955 
3956   // Increment bcp to point to the next bytecode, so exception
3957   // handling for async. exceptions work correctly.
3958   // The object has already been popped from the stack, so the
3959   // expression stack looks correct.
3960   __ increment(rbcp);
3961 
3962   // store object
3963   __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset()));
3964   __ lock_object(c_rarg1);
3965 
3966   // check to make sure this monitor doesn't cause stack overflow after locking
3967   __ save_bcp();  // in case of exception
3968   __ generate_stack_overflow_check(0);
3969 
3970   // The bcp has already been incremented. Just need to dispatch to
3971   // next instruction.
3972   __ dispatch_next(vtos);
3973 }
3974 
3975 
3976 void TemplateTable::monitorexit()
3977 {
3978   transition(atos, vtos);
3979 
3980   // check for null object
3981   __ null_check(r0);
3982 
3983   const Address monitor_block_top(
3984         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3985   const Address monitor_block_bot(
3986         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3987   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
3988 
3989   Label found;
3990 
3991   // find matching slot
3992   {
3993     Label entry, loop;
3994     __ ldr(c_rarg1, monitor_block_top); // derelativize pointer
3995     __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize)));
3996     // c_rarg1 points to current entry, starting with top-most entry
3997 
3998     __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3999                                         // of monitor block
4000     __ b(entry);
4001 
4002     __ bind(loop);
4003     // check if current entry is for same object
4004     __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset()));
4005     __ cmp(r0, rscratch1);
4006     // if same object then stop searching
4007     __ br(Assembler::EQ, found);
4008     // otherwise advance to next entry
4009     __ add(c_rarg1, c_rarg1, entry_size);
4010     __ bind(entry);
4011     // check if bottom reached
4012     __ cmp(c_rarg1, c_rarg2);
4013     // if not at bottom then check this entry
4014     __ br(Assembler::NE, loop);
4015   }
4016 
4017   // error handling. Unlocking was not block-structured
4018   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4019                    InterpreterRuntime::throw_illegal_monitor_state_exception));
4020   __ should_not_reach_here();
4021 
4022   // call run-time routine
4023   __ bind(found);
4024   __ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
4025   __ unlock_object(c_rarg1);
4026   __ pop_ptr(r0); // discard object
4027 }
4028 
4029 
4030 // Wide instructions
4031 void TemplateTable::wide()
4032 {
4033   __ load_unsigned_byte(r19, at_bcp(1));
4034   __ mov(rscratch1, (address)Interpreter::_wentry_point);
4035   __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3)));
4036   __ br(rscratch1);
4037 }
4038 
4039 
4040 // Multi arrays
4041 void TemplateTable::multianewarray() {
4042   transition(vtos, atos);
4043   __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions
4044   // last dim is on top of stack; we want address of first one:
4045   // first_addr = last_addr + (ndims - 1) * wordSize
4046   __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3)));
4047   __ sub(c_rarg1, c_rarg1, wordSize);
4048   call_VM(r0,
4049           CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
4050           c_rarg1);
4051   __ load_unsigned_byte(r1, at_bcp(3));
4052   __ lea(esp, Address(esp, r1, Address::uxtw(3)));
4053 }