1 /*
   2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "compiler/disassembler.hpp"
  28 #include "compiler/compilerDefinitions.inline.hpp"
  29 #include "gc/shared/barrierSetAssembler.hpp"
  30 #include "gc/shared/collectedHeap.hpp"
  31 #include "gc/shared/tlab_globals.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "interpreter/interp_masm.hpp"
  35 #include "interpreter/templateTable.hpp"
  36 #include "memory/universe.hpp"
  37 #include "oops/methodData.hpp"
  38 #include "oops/method.inline.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "oops/resolvedFieldEntry.hpp"
  42 #include "oops/resolvedIndyEntry.hpp"
  43 #include "oops/resolvedMethodEntry.hpp"
  44 #include "prims/jvmtiExport.hpp"
  45 #include "prims/methodHandles.hpp"
  46 #include "runtime/frame.inline.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "runtime/synchronizer.hpp"
  50 #include "utilities/powerOfTwo.hpp"
  51 
  52 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  53 
  54 // Address computation: local variables
  55 
  56 static inline Address iaddress(int n) {
  57   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n) {
  61   return iaddress(n + 1);
  62 }
  63 
  64 static inline Address faddress(int n) {
  65   return iaddress(n);
  66 }
  67 
  68 static inline Address daddress(int n) {
  69   return laddress(n);
  70 }
  71 
  72 static inline Address aaddress(int n) {
  73   return iaddress(n);
  74 }
  75 
  76 static inline Address iaddress(Register r) {
  77   return Address(rlocals, r, Address::lsl(3));
  78 }
  79 
  80 static inline Address laddress(Register r, Register scratch,
  81                                InterpreterMacroAssembler* _masm) {
  82   __ lea(scratch, Address(rlocals, r, Address::lsl(3)));
  83   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  84 }
  85 
  86 static inline Address faddress(Register r) {
  87   return iaddress(r);
  88 }
  89 
  90 static inline Address daddress(Register r, Register scratch,
  91                                InterpreterMacroAssembler* _masm) {
  92   return laddress(r, scratch, _masm);
  93 }
  94 
  95 static inline Address aaddress(Register r) {
  96   return iaddress(r);
  97 }
  98 
  99 static inline Address at_rsp() {
 100   return Address(esp, 0);
 101 }
 102 
 103 // At top of Java expression stack which may be different than esp().  It
 104 // isn't for category 1 objects.
 105 static inline Address at_tos   () {
 106   return Address(esp,  Interpreter::expr_offset_in_bytes(0));
 107 }
 108 
 109 static inline Address at_tos_p1() {
 110   return Address(esp,  Interpreter::expr_offset_in_bytes(1));
 111 }
 112 
 113 static inline Address at_tos_p2() {
 114   return Address(esp,  Interpreter::expr_offset_in_bytes(2));
 115 }
 116 
 117 static inline Address at_tos_p3() {
 118   return Address(esp,  Interpreter::expr_offset_in_bytes(3));
 119 }
 120 
 121 static inline Address at_tos_p4() {
 122   return Address(esp,  Interpreter::expr_offset_in_bytes(4));
 123 }
 124 
 125 static inline Address at_tos_p5() {
 126   return Address(esp,  Interpreter::expr_offset_in_bytes(5));
 127 }
 128 
 129 // Condition conversion
 130 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 131   switch (cc) {
 132   case TemplateTable::equal        : return Assembler::NE;
 133   case TemplateTable::not_equal    : return Assembler::EQ;
 134   case TemplateTable::less         : return Assembler::GE;
 135   case TemplateTable::less_equal   : return Assembler::GT;
 136   case TemplateTable::greater      : return Assembler::LE;
 137   case TemplateTable::greater_equal: return Assembler::LT;
 138   }
 139   ShouldNotReachHere();
 140   return Assembler::EQ;
 141 }
 142 
 143 
 144 // Miscellaneous helper routines
 145 // Store an oop (or null) at the Address described by obj.
 146 // If val == noreg this means store a null
 147 static void do_oop_store(InterpreterMacroAssembler* _masm,
 148                          Address dst,
 149                          Register val,
 150                          DecoratorSet decorators) {
 151   assert(val == noreg || val == r0, "parameter is just for looks");
 152   __ store_heap_oop(dst, val, r10, r11, r3, decorators);
 153 }
 154 
 155 static void do_oop_load(InterpreterMacroAssembler* _masm,
 156                         Address src,
 157                         Register dst,
 158                         DecoratorSet decorators) {
 159   __ load_heap_oop(dst, src, r10, r11, decorators);
 160 }
 161 
 162 Address TemplateTable::at_bcp(int offset) {
 163   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 164   return Address(rbcp, offset);
 165 }
 166 
 167 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 168                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 169                                    int byte_no)
 170 {
 171   if (!RewriteBytecodes)  return;
 172   Label L_patch_done;
 173 
 174   switch (bc) {
 175   case Bytecodes::_fast_vputfield:
 176   case Bytecodes::_fast_aputfield:
 177   case Bytecodes::_fast_bputfield:
 178   case Bytecodes::_fast_zputfield:
 179   case Bytecodes::_fast_cputfield:
 180   case Bytecodes::_fast_dputfield:
 181   case Bytecodes::_fast_fputfield:
 182   case Bytecodes::_fast_iputfield:
 183   case Bytecodes::_fast_lputfield:
 184   case Bytecodes::_fast_sputfield:
 185     {
 186       // We skip bytecode quickening for putfield instructions when
 187       // the put_code written to the constant pool cache is zero.
 188       // This is required so that every execution of this instruction
 189       // calls out to InterpreterRuntime::resolve_get_put to do
 190       // additional, required work.
 191       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 192       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 193       __ load_field_entry(temp_reg, bc_reg);
 194       if (byte_no == f1_byte) {
 195         __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset())));
 196       } else {
 197         __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset())));
 198       }
 199       // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in()
 200       __ ldarb(temp_reg, temp_reg);
 201       __ movw(bc_reg, bc);
 202       __ cbzw(temp_reg, L_patch_done);  // don't patch
 203     }
 204     break;
 205   default:
 206     assert(byte_no == -1, "sanity");
 207     // the pair bytecodes have already done the load.
 208     if (load_bc_into_bc_reg) {
 209       __ movw(bc_reg, bc);
 210     }
 211   }
 212 
 213   if (JvmtiExport::can_post_breakpoint()) {
 214     Label L_fast_patch;
 215     // if a breakpoint is present we can't rewrite the stream directly
 216     __ load_unsigned_byte(temp_reg, at_bcp(0));
 217     __ cmpw(temp_reg, Bytecodes::_breakpoint);
 218     __ br(Assembler::NE, L_fast_patch);
 219     // Let breakpoint table handling rewrite to quicker bytecode
 220     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
 221     __ b(L_patch_done);
 222     __ bind(L_fast_patch);
 223   }
 224 
 225 #ifdef ASSERT
 226   Label L_okay;
 227   __ load_unsigned_byte(temp_reg, at_bcp(0));
 228   __ cmpw(temp_reg, (int) Bytecodes::java_code(bc));
 229   __ br(Assembler::EQ, L_okay);
 230   __ cmpw(temp_reg, bc_reg);
 231   __ br(Assembler::EQ, L_okay);
 232   __ stop("patching the wrong bytecode");
 233   __ bind(L_okay);
 234 #endif
 235 
 236   // patch bytecode
 237   __ strb(bc_reg, at_bcp(0));
 238   __ bind(L_patch_done);
 239 }
 240 
 241 
 242 // Individual instructions
 243 
 244 void TemplateTable::nop() {
 245   transition(vtos, vtos);
 246   // nothing to do
 247 }
 248 
 249 void TemplateTable::shouldnotreachhere() {
 250   transition(vtos, vtos);
 251   __ stop("shouldnotreachhere bytecode");
 252 }
 253 
 254 void TemplateTable::aconst_null()
 255 {
 256   transition(vtos, atos);
 257   __ mov(r0, 0);
 258 }
 259 
 260 void TemplateTable::iconst(int value)
 261 {
 262   transition(vtos, itos);
 263   __ mov(r0, value);
 264 }
 265 
 266 void TemplateTable::lconst(int value)
 267 {
 268   __ mov(r0, value);
 269 }
 270 
 271 void TemplateTable::fconst(int value)
 272 {
 273   transition(vtos, ftos);
 274   switch (value) {
 275   case 0:
 276     __ fmovs(v0, 0.0);
 277     break;
 278   case 1:
 279     __ fmovs(v0, 1.0);
 280     break;
 281   case 2:
 282     __ fmovs(v0, 2.0);
 283     break;
 284   default:
 285     ShouldNotReachHere();
 286     break;
 287   }
 288 }
 289 
 290 void TemplateTable::dconst(int value)
 291 {
 292   transition(vtos, dtos);
 293   switch (value) {
 294   case 0:
 295     __ fmovd(v0, 0.0);
 296     break;
 297   case 1:
 298     __ fmovd(v0, 1.0);
 299     break;
 300   case 2:
 301     __ fmovd(v0, 2.0);
 302     break;
 303   default:
 304     ShouldNotReachHere();
 305     break;
 306   }
 307 }
 308 
 309 void TemplateTable::bipush()
 310 {
 311   transition(vtos, itos);
 312   __ load_signed_byte32(r0, at_bcp(1));
 313 }
 314 
 315 void TemplateTable::sipush()
 316 {
 317   transition(vtos, itos);
 318   __ load_unsigned_short(r0, at_bcp(1));
 319   __ revw(r0, r0);
 320   __ asrw(r0, r0, 16);
 321 }
 322 
 323 void TemplateTable::ldc(LdcType type)
 324 {
 325   transition(vtos, vtos);
 326   Label call_ldc, notFloat, notClass, notInt, Done;
 327 
 328   if (is_ldc_wide(type)) {
 329     __ get_unsigned_2_byte_index_at_bcp(r1, 1);
 330   } else {
 331     __ load_unsigned_byte(r1, at_bcp(1));
 332   }
 333   __ get_cpool_and_tags(r2, r0);
 334 
 335   const int base_offset = ConstantPool::header_size() * wordSize;
 336   const int tags_offset = Array<u1>::base_offset_in_bytes();
 337 
 338   // get type
 339   __ add(r3, r1, tags_offset);
 340   __ lea(r3, Address(r0, r3));
 341   __ ldarb(r3, r3);
 342 
 343   // unresolved class - get the resolved class
 344   __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass);
 345   __ br(Assembler::EQ, call_ldc);
 346 
 347   // unresolved class in error state - call into runtime to throw the error
 348   // from the first resolution attempt
 349   __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError);
 350   __ br(Assembler::EQ, call_ldc);
 351 
 352   // resolved class - need to call vm to get java mirror of the class
 353   __ cmp(r3, (u1)JVM_CONSTANT_Class);
 354   __ br(Assembler::NE, notClass);
 355 
 356   __ bind(call_ldc);
 357   __ mov(c_rarg1, is_ldc_wide(type) ? 1 : 0);
 358   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
 359   __ push_ptr(r0);
 360   __ verify_oop(r0);
 361   __ b(Done);
 362 
 363   __ bind(notClass);
 364   __ cmp(r3, (u1)JVM_CONSTANT_Float);
 365   __ br(Assembler::NE, notFloat);
 366   // ftos
 367   __ adds(r1, r2, r1, Assembler::LSL, 3);
 368   __ ldrs(v0, Address(r1, base_offset));
 369   __ push_f();
 370   __ b(Done);
 371 
 372   __ bind(notFloat);
 373 
 374   __ cmp(r3, (u1)JVM_CONSTANT_Integer);
 375   __ br(Assembler::NE, notInt);
 376 
 377   // itos
 378   __ adds(r1, r2, r1, Assembler::LSL, 3);
 379   __ ldrw(r0, Address(r1, base_offset));
 380   __ push_i(r0);
 381   __ b(Done);
 382 
 383   __ bind(notInt);
 384   condy_helper(Done);
 385 
 386   __ bind(Done);
 387 }
 388 
 389 // Fast path for caching oop constants.
 390 void TemplateTable::fast_aldc(LdcType type)
 391 {
 392   transition(vtos, atos);
 393 
 394   Register result = r0;
 395   Register tmp = r1;
 396   Register rarg = r2;
 397 
 398   int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1);
 399 
 400   Label resolved;
 401 
 402   // We are resolved if the resolved reference cache entry contains a
 403   // non-null object (String, MethodType, etc.)
 404   assert_different_registers(result, tmp);
 405   __ get_cache_index_at_bcp(tmp, 1, index_size);
 406   __ load_resolved_reference_at_index(result, tmp);
 407   __ cbnz(result, resolved);
 408 
 409   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 410 
 411   // first time invocation - must resolve first
 412   __ mov(rarg, (int)bytecode());
 413   __ call_VM(result, entry, rarg);
 414 
 415   __ bind(resolved);
 416 
 417   { // Check for the null sentinel.
 418     // If we just called the VM, it already did the mapping for us,
 419     // but it's harmless to retry.
 420     Label notNull;
 421 
 422     // Stash null_sentinel address to get its value later
 423     __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
 424     __ ldr(tmp, Address(rarg));
 425     __ resolve_oop_handle(tmp, r5, rscratch2);
 426     __ cmpoop(result, tmp);
 427     __ br(Assembler::NE, notNull);
 428     __ mov(result, 0);  // null object reference
 429     __ bind(notNull);
 430   }
 431 
 432   if (VerifyOops) {
 433     // Safe to call with 0 result
 434     __ verify_oop(result);
 435   }
 436 }
 437 
 438 void TemplateTable::ldc2_w()
 439 {
 440   transition(vtos, vtos);
 441   Label notDouble, notLong, Done;
 442   __ get_unsigned_2_byte_index_at_bcp(r0, 1);
 443 
 444   __ get_cpool_and_tags(r1, r2);
 445   const int base_offset = ConstantPool::header_size() * wordSize;
 446   const int tags_offset = Array<u1>::base_offset_in_bytes();
 447 
 448   // get type
 449   __ lea(r2, Address(r2, r0, Address::lsl(0)));
 450   __ load_unsigned_byte(r2, Address(r2, tags_offset));
 451   __ cmpw(r2, (int)JVM_CONSTANT_Double);
 452   __ br(Assembler::NE, notDouble);
 453 
 454   // dtos
 455   __ lea (r2, Address(r1, r0, Address::lsl(3)));
 456   __ ldrd(v0, Address(r2, base_offset));
 457   __ push_d();
 458   __ b(Done);
 459 
 460   __ bind(notDouble);
 461   __ cmpw(r2, (int)JVM_CONSTANT_Long);
 462   __ br(Assembler::NE, notLong);
 463 
 464   // ltos
 465   __ lea(r0, Address(r1, r0, Address::lsl(3)));
 466   __ ldr(r0, Address(r0, base_offset));
 467   __ push_l();
 468   __ b(Done);
 469 
 470   __ bind(notLong);
 471   condy_helper(Done);
 472 
 473   __ bind(Done);
 474 }
 475 
 476 void TemplateTable::condy_helper(Label& Done)
 477 {
 478   Register obj = r0;
 479   Register rarg = r1;
 480   Register flags = r2;
 481   Register off = r3;
 482 
 483   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 484 
 485   __ mov(rarg, (int) bytecode());
 486   __ call_VM(obj, entry, rarg);
 487 
 488   __ get_vm_result_2(flags, rthread);
 489 
 490   // VMr = obj = base address to find primitive value to push
 491   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 492   __ mov(off, flags);
 493   __ andw(off, off, ConstantPoolCache::field_index_mask);
 494 
 495   const Address field(obj, off);
 496 
 497   // What sort of thing are we loading?
 498   // x86 uses a shift and mask or wings it with a shift plus assert
 499   // the mask is not needed. aarch64 just uses bitfield extract
 500   __ ubfxw(flags, flags, ConstantPoolCache::tos_state_shift,
 501            ConstantPoolCache::tos_state_bits);
 502 
 503   switch (bytecode()) {
 504     case Bytecodes::_ldc:
 505     case Bytecodes::_ldc_w:
 506       {
 507         // tos in (itos, ftos, stos, btos, ctos, ztos)
 508         Label notInt, notFloat, notShort, notByte, notChar, notBool;
 509         __ cmpw(flags, itos);
 510         __ br(Assembler::NE, notInt);
 511         // itos
 512         __ ldrw(r0, field);
 513         __ push(itos);
 514         __ b(Done);
 515 
 516         __ bind(notInt);
 517         __ cmpw(flags, ftos);
 518         __ br(Assembler::NE, notFloat);
 519         // ftos
 520         __ load_float(field);
 521         __ push(ftos);
 522         __ b(Done);
 523 
 524         __ bind(notFloat);
 525         __ cmpw(flags, stos);
 526         __ br(Assembler::NE, notShort);
 527         // stos
 528         __ load_signed_short(r0, field);
 529         __ push(stos);
 530         __ b(Done);
 531 
 532         __ bind(notShort);
 533         __ cmpw(flags, btos);
 534         __ br(Assembler::NE, notByte);
 535         // btos
 536         __ load_signed_byte(r0, field);
 537         __ push(btos);
 538         __ b(Done);
 539 
 540         __ bind(notByte);
 541         __ cmpw(flags, ctos);
 542         __ br(Assembler::NE, notChar);
 543         // ctos
 544         __ load_unsigned_short(r0, field);
 545         __ push(ctos);
 546         __ b(Done);
 547 
 548         __ bind(notChar);
 549         __ cmpw(flags, ztos);
 550         __ br(Assembler::NE, notBool);
 551         // ztos
 552         __ load_signed_byte(r0, field);
 553         __ push(ztos);
 554         __ b(Done);
 555 
 556         __ bind(notBool);
 557         break;
 558       }
 559 
 560     case Bytecodes::_ldc2_w:
 561       {
 562         Label notLong, notDouble;
 563         __ cmpw(flags, ltos);
 564         __ br(Assembler::NE, notLong);
 565         // ltos
 566         __ ldr(r0, field);
 567         __ push(ltos);
 568         __ b(Done);
 569 
 570         __ bind(notLong);
 571         __ cmpw(flags, dtos);
 572         __ br(Assembler::NE, notDouble);
 573         // dtos
 574         __ load_double(field);
 575         __ push(dtos);
 576         __ b(Done);
 577 
 578        __ bind(notDouble);
 579         break;
 580       }
 581 
 582     default:
 583       ShouldNotReachHere();
 584     }
 585 
 586     __ stop("bad ldc/condy");
 587 }
 588 
 589 void TemplateTable::locals_index(Register reg, int offset)
 590 {
 591   __ ldrb(reg, at_bcp(offset));
 592   __ neg(reg, reg);
 593 }
 594 
 595 void TemplateTable::iload() {
 596   iload_internal();
 597 }
 598 
 599 void TemplateTable::nofast_iload() {
 600   iload_internal(may_not_rewrite);
 601 }
 602 
 603 void TemplateTable::iload_internal(RewriteControl rc) {
 604   transition(vtos, itos);
 605   if (RewriteFrequentPairs && rc == may_rewrite) {
 606     Label rewrite, done;
 607     Register bc = r4;
 608 
 609     // get next bytecode
 610     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 611 
 612     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 613     // last two iloads in a pair.  Comparing against fast_iload means that
 614     // the next bytecode is neither an iload or a caload, and therefore
 615     // an iload pair.
 616     __ cmpw(r1, Bytecodes::_iload);
 617     __ br(Assembler::EQ, done);
 618 
 619     // if _fast_iload rewrite to _fast_iload2
 620     __ cmpw(r1, Bytecodes::_fast_iload);
 621     __ movw(bc, Bytecodes::_fast_iload2);
 622     __ br(Assembler::EQ, rewrite);
 623 
 624     // if _caload rewrite to _fast_icaload
 625     __ cmpw(r1, Bytecodes::_caload);
 626     __ movw(bc, Bytecodes::_fast_icaload);
 627     __ br(Assembler::EQ, rewrite);
 628 
 629     // else rewrite to _fast_iload
 630     __ movw(bc, Bytecodes::_fast_iload);
 631 
 632     // rewrite
 633     // bc: new bytecode
 634     __ bind(rewrite);
 635     patch_bytecode(Bytecodes::_iload, bc, r1, false);
 636     __ bind(done);
 637 
 638   }
 639 
 640   // do iload, get the local value into tos
 641   locals_index(r1);
 642   __ ldr(r0, iaddress(r1));
 643 
 644 }
 645 
 646 void TemplateTable::fast_iload2()
 647 {
 648   transition(vtos, itos);
 649   locals_index(r1);
 650   __ ldr(r0, iaddress(r1));
 651   __ push(itos);
 652   locals_index(r1, 3);
 653   __ ldr(r0, iaddress(r1));
 654 }
 655 
 656 void TemplateTable::fast_iload()
 657 {
 658   transition(vtos, itos);
 659   locals_index(r1);
 660   __ ldr(r0, iaddress(r1));
 661 }
 662 
 663 void TemplateTable::lload()
 664 {
 665   transition(vtos, ltos);
 666   __ ldrb(r1, at_bcp(1));
 667   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 668   __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 669 }
 670 
 671 void TemplateTable::fload()
 672 {
 673   transition(vtos, ftos);
 674   locals_index(r1);
 675   // n.b. we use ldrd here because this is a 64 bit slot
 676   // this is comparable to the iload case
 677   __ ldrd(v0, faddress(r1));
 678 }
 679 
 680 void TemplateTable::dload()
 681 {
 682   transition(vtos, dtos);
 683   __ ldrb(r1, at_bcp(1));
 684   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 685   __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 686 }
 687 
 688 void TemplateTable::aload()
 689 {
 690   transition(vtos, atos);
 691   locals_index(r1);
 692   __ ldr(r0, iaddress(r1));
 693 }
 694 
 695 void TemplateTable::locals_index_wide(Register reg) {
 696   __ ldrh(reg, at_bcp(2));
 697   __ rev16w(reg, reg);
 698   __ neg(reg, reg);
 699 }
 700 
 701 void TemplateTable::wide_iload() {
 702   transition(vtos, itos);
 703   locals_index_wide(r1);
 704   __ ldr(r0, iaddress(r1));
 705 }
 706 
 707 void TemplateTable::wide_lload()
 708 {
 709   transition(vtos, ltos);
 710   __ ldrh(r1, at_bcp(2));
 711   __ rev16w(r1, r1);
 712   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 713   __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 714 }
 715 
 716 void TemplateTable::wide_fload()
 717 {
 718   transition(vtos, ftos);
 719   locals_index_wide(r1);
 720   // n.b. we use ldrd here because this is a 64 bit slot
 721   // this is comparable to the iload case
 722   __ ldrd(v0, faddress(r1));
 723 }
 724 
 725 void TemplateTable::wide_dload()
 726 {
 727   transition(vtos, dtos);
 728   __ ldrh(r1, at_bcp(2));
 729   __ rev16w(r1, r1);
 730   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 731   __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 732 }
 733 
 734 void TemplateTable::wide_aload()
 735 {
 736   transition(vtos, atos);
 737   locals_index_wide(r1);
 738   __ ldr(r0, aaddress(r1));
 739 }
 740 
 741 void TemplateTable::index_check(Register array, Register index)
 742 {
 743   // destroys r1, rscratch1
 744   // sign extend index for use by indexed load
 745   // __ movl2ptr(index, index);
 746   // check index
 747   Register length = rscratch1;
 748   __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
 749   __ cmpw(index, length);
 750   if (index != r1) {
 751     // ??? convention: move aberrant index into r1 for exception message
 752     assert(r1 != array, "different registers");
 753     __ mov(r1, index);
 754   }
 755   Label ok;
 756   __ br(Assembler::LO, ok);
 757   // ??? convention: move array into r3 for exception message
 758    __ mov(r3, array);
 759    __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
 760    __ br(rscratch1);
 761   __ bind(ok);
 762 }
 763 
 764 void TemplateTable::iaload()
 765 {
 766   transition(itos, itos);
 767   __ mov(r1, r0);
 768   __ pop_ptr(r0);
 769   // r0: array
 770   // r1: index
 771   index_check(r0, r1); // leaves index in r1, kills rscratch1
 772   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
 773   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 774 }
 775 
 776 void TemplateTable::laload()
 777 {
 778   transition(itos, ltos);
 779   __ mov(r1, r0);
 780   __ pop_ptr(r0);
 781   // r0: array
 782   // r1: index
 783   index_check(r0, r1); // leaves index in r1, kills rscratch1
 784   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
 785   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 786 }
 787 
 788 void TemplateTable::faload()
 789 {
 790   transition(itos, ftos);
 791   __ mov(r1, r0);
 792   __ pop_ptr(r0);
 793   // r0: array
 794   // r1: index
 795   index_check(r0, r1); // leaves index in r1, kills rscratch1
 796   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
 797   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 798 }
 799 
 800 void TemplateTable::daload()
 801 {
 802   transition(itos, dtos);
 803   __ mov(r1, r0);
 804   __ pop_ptr(r0);
 805   // r0: array
 806   // r1: index
 807   index_check(r0, r1); // leaves index in r1, kills rscratch1
 808   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
 809   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 810 }
 811 
 812 void TemplateTable::aaload()
 813 {
 814   transition(itos, atos);
 815   __ mov(r1, r0);
 816   __ pop_ptr(r0);
 817   // r0: array
 818   // r1: index
 819   index_check(r0, r1); // leaves index in r1, kills rscratch1
 820   __ profile_array_type<ArrayLoadData>(r2, r0, r4);
 821   if (UseArrayFlattening) {
 822     Label is_flat_array, done;
 823 
 824     __ test_flat_array_oop(r0, r8 /*temp*/, is_flat_array);
 825     __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 826     do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
 827 
 828     __ b(done);
 829     __ bind(is_flat_array);
 830     __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_load), r0, r1);
 831     // Ensure the stores to copy the inline field contents are visible
 832     // before any subsequent store that publishes this reference.
 833     __ membar(Assembler::StoreStore);
 834     __ bind(done);
 835   } else {
 836     __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 837     do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
 838   }
 839   __ profile_element_type(r2, r0, r4);
 840 }
 841 
 842 void TemplateTable::baload()
 843 {
 844   transition(itos, itos);
 845   __ mov(r1, r0);
 846   __ pop_ptr(r0);
 847   // r0: array
 848   // r1: index
 849   index_check(r0, r1); // leaves index in r1, kills rscratch1
 850   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
 851   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
 852 }
 853 
 854 void TemplateTable::caload()
 855 {
 856   transition(itos, itos);
 857   __ mov(r1, r0);
 858   __ pop_ptr(r0);
 859   // r0: array
 860   // r1: index
 861   index_check(r0, r1); // leaves index in r1, kills rscratch1
 862   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
 863   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 864 }
 865 
 866 // iload followed by caload frequent pair
 867 void TemplateTable::fast_icaload()
 868 {
 869   transition(vtos, itos);
 870   // load index out of locals
 871   locals_index(r2);
 872   __ ldr(r1, iaddress(r2));
 873 
 874   __ pop_ptr(r0);
 875 
 876   // r0: array
 877   // r1: index
 878   index_check(r0, r1); // leaves index in r1, kills rscratch1
 879   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
 880   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 881 }
 882 
 883 void TemplateTable::saload()
 884 {
 885   transition(itos, itos);
 886   __ mov(r1, r0);
 887   __ pop_ptr(r0);
 888   // r0: array
 889   // r1: index
 890   index_check(r0, r1); // leaves index in r1, kills rscratch1
 891   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
 892   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 893 }
 894 
 895 void TemplateTable::iload(int n)
 896 {
 897   transition(vtos, itos);
 898   __ ldr(r0, iaddress(n));
 899 }
 900 
 901 void TemplateTable::lload(int n)
 902 {
 903   transition(vtos, ltos);
 904   __ ldr(r0, laddress(n));
 905 }
 906 
 907 void TemplateTable::fload(int n)
 908 {
 909   transition(vtos, ftos);
 910   __ ldrs(v0, faddress(n));
 911 }
 912 
 913 void TemplateTable::dload(int n)
 914 {
 915   transition(vtos, dtos);
 916   __ ldrd(v0, daddress(n));
 917 }
 918 
 919 void TemplateTable::aload(int n)
 920 {
 921   transition(vtos, atos);
 922   __ ldr(r0, iaddress(n));
 923 }
 924 
 925 void TemplateTable::aload_0() {
 926   aload_0_internal();
 927 }
 928 
 929 void TemplateTable::nofast_aload_0() {
 930   aload_0_internal(may_not_rewrite);
 931 }
 932 
 933 void TemplateTable::aload_0_internal(RewriteControl rc) {
 934   // According to bytecode histograms, the pairs:
 935   //
 936   // _aload_0, _fast_igetfield
 937   // _aload_0, _fast_agetfield
 938   // _aload_0, _fast_fgetfield
 939   //
 940   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 941   // _aload_0 bytecode checks if the next bytecode is either
 942   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 943   // rewrites the current bytecode into a pair bytecode; otherwise it
 944   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 945   // the pair check anymore.
 946   //
 947   // Note: If the next bytecode is _getfield, the rewrite must be
 948   //       delayed, otherwise we may miss an opportunity for a pair.
 949   //
 950   // Also rewrite frequent pairs
 951   //   aload_0, aload_1
 952   //   aload_0, iload_1
 953   // These bytecodes with a small amount of code are most profitable
 954   // to rewrite
 955   if (RewriteFrequentPairs && rc == may_rewrite) {
 956     Label rewrite, done;
 957     const Register bc = r4;
 958 
 959     // get next bytecode
 960     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 961 
 962     // if _getfield then wait with rewrite
 963     __ cmpw(r1, Bytecodes::Bytecodes::_getfield);
 964     __ br(Assembler::EQ, done);
 965 
 966     // if _igetfield then rewrite to _fast_iaccess_0
 967     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 968     __ cmpw(r1, Bytecodes::_fast_igetfield);
 969     __ movw(bc, Bytecodes::_fast_iaccess_0);
 970     __ br(Assembler::EQ, rewrite);
 971 
 972     // if _agetfield then rewrite to _fast_aaccess_0
 973     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 974     __ cmpw(r1, Bytecodes::_fast_agetfield);
 975     __ movw(bc, Bytecodes::_fast_aaccess_0);
 976     __ br(Assembler::EQ, rewrite);
 977 
 978     // if _fgetfield then rewrite to _fast_faccess_0
 979     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 980     __ cmpw(r1, Bytecodes::_fast_fgetfield);
 981     __ movw(bc, Bytecodes::_fast_faccess_0);
 982     __ br(Assembler::EQ, rewrite);
 983 
 984     // else rewrite to _fast_aload0
 985     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 986     __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0);
 987 
 988     // rewrite
 989     // bc: new bytecode
 990     __ bind(rewrite);
 991     patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
 992 
 993     __ bind(done);
 994   }
 995 
 996   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 997   aload(0);
 998 }
 999 
1000 void TemplateTable::istore()
1001 {
1002   transition(itos, vtos);
1003   locals_index(r1);
1004   // FIXME: We're being very pernickerty here storing a jint in a
1005   // local with strw, which costs an extra instruction over what we'd
1006   // be able to do with a simple str.  We should just store the whole
1007   // word.
1008   __ lea(rscratch1, iaddress(r1));
1009   __ strw(r0, Address(rscratch1));
1010 }
1011 
1012 void TemplateTable::lstore()
1013 {
1014   transition(ltos, vtos);
1015   locals_index(r1);
1016   __ str(r0, laddress(r1, rscratch1, _masm));
1017 }
1018 
1019 void TemplateTable::fstore() {
1020   transition(ftos, vtos);
1021   locals_index(r1);
1022   __ lea(rscratch1, iaddress(r1));
1023   __ strs(v0, Address(rscratch1));
1024 }
1025 
1026 void TemplateTable::dstore() {
1027   transition(dtos, vtos);
1028   locals_index(r1);
1029   __ strd(v0, daddress(r1, rscratch1, _masm));
1030 }
1031 
1032 void TemplateTable::astore()
1033 {
1034   transition(vtos, vtos);
1035   __ pop_ptr(r0);
1036   locals_index(r1);
1037   __ str(r0, aaddress(r1));
1038 }
1039 
1040 void TemplateTable::wide_istore() {
1041   transition(vtos, vtos);
1042   __ pop_i();
1043   locals_index_wide(r1);
1044   __ lea(rscratch1, iaddress(r1));
1045   __ strw(r0, Address(rscratch1));
1046 }
1047 
1048 void TemplateTable::wide_lstore() {
1049   transition(vtos, vtos);
1050   __ pop_l();
1051   locals_index_wide(r1);
1052   __ str(r0, laddress(r1, rscratch1, _masm));
1053 }
1054 
1055 void TemplateTable::wide_fstore() {
1056   transition(vtos, vtos);
1057   __ pop_f();
1058   locals_index_wide(r1);
1059   __ lea(rscratch1, faddress(r1));
1060   __ strs(v0, rscratch1);
1061 }
1062 
1063 void TemplateTable::wide_dstore() {
1064   transition(vtos, vtos);
1065   __ pop_d();
1066   locals_index_wide(r1);
1067   __ strd(v0, daddress(r1, rscratch1, _masm));
1068 }
1069 
1070 void TemplateTable::wide_astore() {
1071   transition(vtos, vtos);
1072   __ pop_ptr(r0);
1073   locals_index_wide(r1);
1074   __ str(r0, aaddress(r1));
1075 }
1076 
1077 void TemplateTable::iastore() {
1078   transition(itos, vtos);
1079   __ pop_i(r1);
1080   __ pop_ptr(r3);
1081   // r0: value
1082   // r1: index
1083   // r3: array
1084   index_check(r3, r1); // prefer index in r1
1085   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
1086   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg, noreg);
1087 }
1088 
1089 void TemplateTable::lastore() {
1090   transition(ltos, vtos);
1091   __ pop_i(r1);
1092   __ pop_ptr(r3);
1093   // r0: value
1094   // r1: index
1095   // r3: array
1096   index_check(r3, r1); // prefer index in r1
1097   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
1098   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg, noreg);
1099 }
1100 
1101 void TemplateTable::fastore() {
1102   transition(ftos, vtos);
1103   __ pop_i(r1);
1104   __ pop_ptr(r3);
1105   // v0: value
1106   // r1:  index
1107   // r3:  array
1108   index_check(r3, r1); // prefer index in r1
1109   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
1110   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg, noreg);
1111 }
1112 
1113 void TemplateTable::dastore() {
1114   transition(dtos, vtos);
1115   __ pop_i(r1);
1116   __ pop_ptr(r3);
1117   // v0: value
1118   // r1:  index
1119   // r3:  array
1120   index_check(r3, r1); // prefer index in r1
1121   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1122   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg, noreg);
1123 }
1124 
1125 void TemplateTable::aastore() {
1126   Label is_null, is_flat_array, ok_is_subtype, done;
1127   transition(vtos, vtos);
1128   // stack: ..., array, index, value
1129   __ ldr(r0, at_tos());    // value
1130   __ ldr(r2, at_tos_p1()); // index
1131   __ ldr(r3, at_tos_p2()); // array
1132 
1133   index_check(r3, r2);     // kills r1
1134 
1135   __ profile_array_type<ArrayStoreData>(r4, r3, r5);
1136   __ profile_multiple_element_types(r4, r0, r5, r6);
1137 
1138   __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1139   Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1140   // Be careful not to clobber r4 below
1141 
1142   // do array store check - check for null value first
1143   __ cbz(r0, is_null);
1144 
1145   // Move array class to r5
1146   __ load_klass(r5, r3);
1147 
1148   if (UseArrayFlattening) {
1149     __ ldrw(r6, Address(r5, Klass::layout_helper_offset()));
1150     __ test_flat_array_layout(r6, is_flat_array);
1151   }
1152 
1153   // Move subklass into r1
1154   __ load_klass(r1, r0);
1155 
1156   // Move array element superklass into r0
1157   __ ldr(r0, Address(r5, ObjArrayKlass::element_klass_offset()));
1158   // Compress array + index*oopSize + 12 into a single register.  Frees r2.
1159 
1160   // Generate subtype check.  Blows r2, r5
1161   // Superklass in r0.  Subklass in r1.
1162 
1163   // is "r1 <: r0" ? (value subclass <: array element superclass)
1164   __ gen_subtype_check(r1, ok_is_subtype, false);
1165 
1166   // Come here on failure
1167   // object is at TOS
1168   __ b(Interpreter::_throw_ArrayStoreException_entry);
1169 
1170   // Come here on success
1171   __ bind(ok_is_subtype);
1172 
1173   // Get the value we will store
1174   __ ldr(r0, at_tos());
1175   // Now store using the appropriate barrier
1176   do_oop_store(_masm, element_address, r0, IS_ARRAY);
1177   __ b(done);
1178 
1179   // Have a null in r0, r3=array, r2=index.  Store null at ary[idx]
1180   __ bind(is_null);
1181   if (EnableValhalla) {
1182     Label is_null_into_value_array_npe, store_null;
1183 
1184     if (UseArrayFlattening) {
1185       __ test_flat_array_oop(r3, r8, is_flat_array);
1186     }
1187 
1188     // No way to store null in a null-free array
1189     __ test_null_free_array_oop(r3, r8, is_null_into_value_array_npe);
1190     __ b(store_null);
1191 
1192     __ bind(is_null_into_value_array_npe);
1193     __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
1194 
1195     __ bind(store_null);
1196   }
1197 
1198   // Store a null
1199   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1200   __ b(done);
1201 
1202   if (UseArrayFlattening) {
1203      Label is_type_ok;
1204     __ bind(is_flat_array); // Store non-null value to flat
1205 
1206     __ ldr(r0, at_tos());    // value
1207     __ ldr(r3, at_tos_p1()); // index
1208     __ ldr(r2, at_tos_p2()); // array
1209     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::flat_array_store), r0, r2, r3);
1210   }
1211 
1212   // Pop stack arguments
1213   __ bind(done);
1214   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1215 }
1216 
1217 void TemplateTable::bastore()
1218 {
1219   transition(itos, vtos);
1220   __ pop_i(r1);
1221   __ pop_ptr(r3);
1222   // r0: value
1223   // r1: index
1224   // r3: array
1225   index_check(r3, r1); // prefer index in r1
1226 
1227   // Need to check whether array is boolean or byte
1228   // since both types share the bastore bytecode.
1229   __ load_klass(r2, r3);
1230   __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
1231   int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
1232   Label L_skip;
1233   __ tbz(r2, diffbit_index, L_skip);
1234   __ andw(r0, r0, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1235   __ bind(L_skip);
1236 
1237   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
1238   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg, noreg);
1239 }
1240 
1241 void TemplateTable::castore()
1242 {
1243   transition(itos, vtos);
1244   __ pop_i(r1);
1245   __ pop_ptr(r3);
1246   // r0: value
1247   // r1: index
1248   // r3: array
1249   index_check(r3, r1); // prefer index in r1
1250   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
1251   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg, noreg);
1252 }
1253 
1254 void TemplateTable::sastore()
1255 {
1256   castore();
1257 }
1258 
1259 void TemplateTable::istore(int n)
1260 {
1261   transition(itos, vtos);
1262   __ str(r0, iaddress(n));
1263 }
1264 
1265 void TemplateTable::lstore(int n)
1266 {
1267   transition(ltos, vtos);
1268   __ str(r0, laddress(n));
1269 }
1270 
1271 void TemplateTable::fstore(int n)
1272 {
1273   transition(ftos, vtos);
1274   __ strs(v0, faddress(n));
1275 }
1276 
1277 void TemplateTable::dstore(int n)
1278 {
1279   transition(dtos, vtos);
1280   __ strd(v0, daddress(n));
1281 }
1282 
1283 void TemplateTable::astore(int n)
1284 {
1285   transition(vtos, vtos);
1286   __ pop_ptr(r0);
1287   __ str(r0, iaddress(n));
1288 }
1289 
1290 void TemplateTable::pop()
1291 {
1292   transition(vtos, vtos);
1293   __ add(esp, esp, Interpreter::stackElementSize);
1294 }
1295 
1296 void TemplateTable::pop2()
1297 {
1298   transition(vtos, vtos);
1299   __ add(esp, esp, 2 * Interpreter::stackElementSize);
1300 }
1301 
1302 void TemplateTable::dup()
1303 {
1304   transition(vtos, vtos);
1305   __ ldr(r0, Address(esp, 0));
1306   __ push(r0);
1307   // stack: ..., a, a
1308 }
1309 
1310 void TemplateTable::dup_x1()
1311 {
1312   transition(vtos, vtos);
1313   // stack: ..., a, b
1314   __ ldr(r0, at_tos());  // load b
1315   __ ldr(r2, at_tos_p1());  // load a
1316   __ str(r0, at_tos_p1());  // store b
1317   __ str(r2, at_tos());  // store a
1318   __ push(r0);                  // push b
1319   // stack: ..., b, a, b
1320 }
1321 
1322 void TemplateTable::dup_x2()
1323 {
1324   transition(vtos, vtos);
1325   // stack: ..., a, b, c
1326   __ ldr(r0, at_tos());  // load c
1327   __ ldr(r2, at_tos_p2());  // load a
1328   __ str(r0, at_tos_p2());  // store c in a
1329   __ push(r0);      // push c
1330   // stack: ..., c, b, c, c
1331   __ ldr(r0, at_tos_p2());  // load b
1332   __ str(r2, at_tos_p2());  // store a in b
1333   // stack: ..., c, a, c, c
1334   __ str(r0, at_tos_p1());  // store b in c
1335   // stack: ..., c, a, b, c
1336 }
1337 
1338 void TemplateTable::dup2()
1339 {
1340   transition(vtos, vtos);
1341   // stack: ..., a, b
1342   __ ldr(r0, at_tos_p1());  // load a
1343   __ push(r0);                  // push a
1344   __ ldr(r0, at_tos_p1());  // load b
1345   __ push(r0);                  // push b
1346   // stack: ..., a, b, a, b
1347 }
1348 
1349 void TemplateTable::dup2_x1()
1350 {
1351   transition(vtos, vtos);
1352   // stack: ..., a, b, c
1353   __ ldr(r2, at_tos());  // load c
1354   __ ldr(r0, at_tos_p1());  // load b
1355   __ push(r0);                  // push b
1356   __ push(r2);                  // push c
1357   // stack: ..., a, b, c, b, c
1358   __ str(r2, at_tos_p3());  // store c in b
1359   // stack: ..., a, c, c, b, c
1360   __ ldr(r2, at_tos_p4());  // load a
1361   __ str(r2, at_tos_p2());  // store a in 2nd c
1362   // stack: ..., a, c, a, b, c
1363   __ str(r0, at_tos_p4());  // store b in a
1364   // stack: ..., b, c, a, b, c
1365 }
1366 
1367 void TemplateTable::dup2_x2()
1368 {
1369   transition(vtos, vtos);
1370   // stack: ..., a, b, c, d
1371   __ ldr(r2, at_tos());  // load d
1372   __ ldr(r0, at_tos_p1());  // load c
1373   __ push(r0)            ;      // push c
1374   __ push(r2);                  // push d
1375   // stack: ..., a, b, c, d, c, d
1376   __ ldr(r0, at_tos_p4());  // load b
1377   __ str(r0, at_tos_p2());  // store b in d
1378   __ str(r2, at_tos_p4());  // store d in b
1379   // stack: ..., a, d, c, b, c, d
1380   __ ldr(r2, at_tos_p5());  // load a
1381   __ ldr(r0, at_tos_p3());  // load c
1382   __ str(r2, at_tos_p3());  // store a in c
1383   __ str(r0, at_tos_p5());  // store c in a
1384   // stack: ..., c, d, a, b, c, d
1385 }
1386 
1387 void TemplateTable::swap()
1388 {
1389   transition(vtos, vtos);
1390   // stack: ..., a, b
1391   __ ldr(r2, at_tos_p1());  // load a
1392   __ ldr(r0, at_tos());  // load b
1393   __ str(r2, at_tos());  // store a in b
1394   __ str(r0, at_tos_p1());  // store b in a
1395   // stack: ..., b, a
1396 }
1397 
1398 void TemplateTable::iop2(Operation op)
1399 {
1400   transition(itos, itos);
1401   // r0 <== r1 op r0
1402   __ pop_i(r1);
1403   switch (op) {
1404   case add  : __ addw(r0, r1, r0); break;
1405   case sub  : __ subw(r0, r1, r0); break;
1406   case mul  : __ mulw(r0, r1, r0); break;
1407   case _and : __ andw(r0, r1, r0); break;
1408   case _or  : __ orrw(r0, r1, r0); break;
1409   case _xor : __ eorw(r0, r1, r0); break;
1410   case shl  : __ lslvw(r0, r1, r0); break;
1411   case shr  : __ asrvw(r0, r1, r0); break;
1412   case ushr : __ lsrvw(r0, r1, r0);break;
1413   default   : ShouldNotReachHere();
1414   }
1415 }
1416 
1417 void TemplateTable::lop2(Operation op)
1418 {
1419   transition(ltos, ltos);
1420   // r0 <== r1 op r0
1421   __ pop_l(r1);
1422   switch (op) {
1423   case add  : __ add(r0, r1, r0); break;
1424   case sub  : __ sub(r0, r1, r0); break;
1425   case mul  : __ mul(r0, r1, r0); break;
1426   case _and : __ andr(r0, r1, r0); break;
1427   case _or  : __ orr(r0, r1, r0); break;
1428   case _xor : __ eor(r0, r1, r0); break;
1429   default   : ShouldNotReachHere();
1430   }
1431 }
1432 
1433 void TemplateTable::idiv()
1434 {
1435   transition(itos, itos);
1436   // explicitly check for div0
1437   Label no_div0;
1438   __ cbnzw(r0, no_div0);
1439   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1440   __ br(rscratch1);
1441   __ bind(no_div0);
1442   __ pop_i(r1);
1443   // r0 <== r1 idiv r0
1444   __ corrected_idivl(r0, r1, r0, /* want_remainder */ false);
1445 }
1446 
1447 void TemplateTable::irem()
1448 {
1449   transition(itos, itos);
1450   // explicitly check for div0
1451   Label no_div0;
1452   __ cbnzw(r0, no_div0);
1453   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1454   __ br(rscratch1);
1455   __ bind(no_div0);
1456   __ pop_i(r1);
1457   // r0 <== r1 irem r0
1458   __ corrected_idivl(r0, r1, r0, /* want_remainder */ true);
1459 }
1460 
1461 void TemplateTable::lmul()
1462 {
1463   transition(ltos, ltos);
1464   __ pop_l(r1);
1465   __ mul(r0, r0, r1);
1466 }
1467 
1468 void TemplateTable::ldiv()
1469 {
1470   transition(ltos, ltos);
1471   // explicitly check for div0
1472   Label no_div0;
1473   __ cbnz(r0, no_div0);
1474   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1475   __ br(rscratch1);
1476   __ bind(no_div0);
1477   __ pop_l(r1);
1478   // r0 <== r1 ldiv r0
1479   __ corrected_idivq(r0, r1, r0, /* want_remainder */ false);
1480 }
1481 
1482 void TemplateTable::lrem()
1483 {
1484   transition(ltos, ltos);
1485   // explicitly check for div0
1486   Label no_div0;
1487   __ cbnz(r0, no_div0);
1488   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1489   __ br(rscratch1);
1490   __ bind(no_div0);
1491   __ pop_l(r1);
1492   // r0 <== r1 lrem r0
1493   __ corrected_idivq(r0, r1, r0, /* want_remainder */ true);
1494 }
1495 
1496 void TemplateTable::lshl()
1497 {
1498   transition(itos, ltos);
1499   // shift count is in r0
1500   __ pop_l(r1);
1501   __ lslv(r0, r1, r0);
1502 }
1503 
1504 void TemplateTable::lshr()
1505 {
1506   transition(itos, ltos);
1507   // shift count is in r0
1508   __ pop_l(r1);
1509   __ asrv(r0, r1, r0);
1510 }
1511 
1512 void TemplateTable::lushr()
1513 {
1514   transition(itos, ltos);
1515   // shift count is in r0
1516   __ pop_l(r1);
1517   __ lsrv(r0, r1, r0);
1518 }
1519 
1520 void TemplateTable::fop2(Operation op)
1521 {
1522   transition(ftos, ftos);
1523   switch (op) {
1524   case add:
1525     // n.b. use ldrd because this is a 64 bit slot
1526     __ pop_f(v1);
1527     __ fadds(v0, v1, v0);
1528     break;
1529   case sub:
1530     __ pop_f(v1);
1531     __ fsubs(v0, v1, v0);
1532     break;
1533   case mul:
1534     __ pop_f(v1);
1535     __ fmuls(v0, v1, v0);
1536     break;
1537   case div:
1538     __ pop_f(v1);
1539     __ fdivs(v0, v1, v0);
1540     break;
1541   case rem:
1542     __ fmovs(v1, v0);
1543     __ pop_f(v0);
1544     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1545     break;
1546   default:
1547     ShouldNotReachHere();
1548     break;
1549   }
1550 }
1551 
1552 void TemplateTable::dop2(Operation op)
1553 {
1554   transition(dtos, dtos);
1555   switch (op) {
1556   case add:
1557     // n.b. use ldrd because this is a 64 bit slot
1558     __ pop_d(v1);
1559     __ faddd(v0, v1, v0);
1560     break;
1561   case sub:
1562     __ pop_d(v1);
1563     __ fsubd(v0, v1, v0);
1564     break;
1565   case mul:
1566     __ pop_d(v1);
1567     __ fmuld(v0, v1, v0);
1568     break;
1569   case div:
1570     __ pop_d(v1);
1571     __ fdivd(v0, v1, v0);
1572     break;
1573   case rem:
1574     __ fmovd(v1, v0);
1575     __ pop_d(v0);
1576     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1577     break;
1578   default:
1579     ShouldNotReachHere();
1580     break;
1581   }
1582 }
1583 
1584 void TemplateTable::ineg()
1585 {
1586   transition(itos, itos);
1587   __ negw(r0, r0);
1588 
1589 }
1590 
1591 void TemplateTable::lneg()
1592 {
1593   transition(ltos, ltos);
1594   __ neg(r0, r0);
1595 }
1596 
1597 void TemplateTable::fneg()
1598 {
1599   transition(ftos, ftos);
1600   __ fnegs(v0, v0);
1601 }
1602 
1603 void TemplateTable::dneg()
1604 {
1605   transition(dtos, dtos);
1606   __ fnegd(v0, v0);
1607 }
1608 
1609 void TemplateTable::iinc()
1610 {
1611   transition(vtos, vtos);
1612   __ load_signed_byte(r1, at_bcp(2)); // get constant
1613   locals_index(r2);
1614   __ ldr(r0, iaddress(r2));
1615   __ addw(r0, r0, r1);
1616   __ str(r0, iaddress(r2));
1617 }
1618 
1619 void TemplateTable::wide_iinc()
1620 {
1621   transition(vtos, vtos);
1622   // __ mov(r1, zr);
1623   __ ldrw(r1, at_bcp(2)); // get constant and index
1624   __ rev16(r1, r1);
1625   __ ubfx(r2, r1, 0, 16);
1626   __ neg(r2, r2);
1627   __ sbfx(r1, r1, 16, 16);
1628   __ ldr(r0, iaddress(r2));
1629   __ addw(r0, r0, r1);
1630   __ str(r0, iaddress(r2));
1631 }
1632 
1633 void TemplateTable::convert()
1634 {
1635   // Checking
1636 #ifdef ASSERT
1637   {
1638     TosState tos_in  = ilgl;
1639     TosState tos_out = ilgl;
1640     switch (bytecode()) {
1641     case Bytecodes::_i2l: // fall through
1642     case Bytecodes::_i2f: // fall through
1643     case Bytecodes::_i2d: // fall through
1644     case Bytecodes::_i2b: // fall through
1645     case Bytecodes::_i2c: // fall through
1646     case Bytecodes::_i2s: tos_in = itos; break;
1647     case Bytecodes::_l2i: // fall through
1648     case Bytecodes::_l2f: // fall through
1649     case Bytecodes::_l2d: tos_in = ltos; break;
1650     case Bytecodes::_f2i: // fall through
1651     case Bytecodes::_f2l: // fall through
1652     case Bytecodes::_f2d: tos_in = ftos; break;
1653     case Bytecodes::_d2i: // fall through
1654     case Bytecodes::_d2l: // fall through
1655     case Bytecodes::_d2f: tos_in = dtos; break;
1656     default             : ShouldNotReachHere();
1657     }
1658     switch (bytecode()) {
1659     case Bytecodes::_l2i: // fall through
1660     case Bytecodes::_f2i: // fall through
1661     case Bytecodes::_d2i: // fall through
1662     case Bytecodes::_i2b: // fall through
1663     case Bytecodes::_i2c: // fall through
1664     case Bytecodes::_i2s: tos_out = itos; break;
1665     case Bytecodes::_i2l: // fall through
1666     case Bytecodes::_f2l: // fall through
1667     case Bytecodes::_d2l: tos_out = ltos; break;
1668     case Bytecodes::_i2f: // fall through
1669     case Bytecodes::_l2f: // fall through
1670     case Bytecodes::_d2f: tos_out = ftos; break;
1671     case Bytecodes::_i2d: // fall through
1672     case Bytecodes::_l2d: // fall through
1673     case Bytecodes::_f2d: tos_out = dtos; break;
1674     default             : ShouldNotReachHere();
1675     }
1676     transition(tos_in, tos_out);
1677   }
1678 #endif // ASSERT
1679   // static const int64_t is_nan = 0x8000000000000000L;
1680 
1681   // Conversion
1682   switch (bytecode()) {
1683   case Bytecodes::_i2l:
1684     __ sxtw(r0, r0);
1685     break;
1686   case Bytecodes::_i2f:
1687     __ scvtfws(v0, r0);
1688     break;
1689   case Bytecodes::_i2d:
1690     __ scvtfwd(v0, r0);
1691     break;
1692   case Bytecodes::_i2b:
1693     __ sxtbw(r0, r0);
1694     break;
1695   case Bytecodes::_i2c:
1696     __ uxthw(r0, r0);
1697     break;
1698   case Bytecodes::_i2s:
1699     __ sxthw(r0, r0);
1700     break;
1701   case Bytecodes::_l2i:
1702     __ uxtw(r0, r0);
1703     break;
1704   case Bytecodes::_l2f:
1705     __ scvtfs(v0, r0);
1706     break;
1707   case Bytecodes::_l2d:
1708     __ scvtfd(v0, r0);
1709     break;
1710   case Bytecodes::_f2i:
1711   {
1712     Label L_Okay;
1713     __ clear_fpsr();
1714     __ fcvtzsw(r0, v0);
1715     __ get_fpsr(r1);
1716     __ cbzw(r1, L_Okay);
1717     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i));
1718     __ bind(L_Okay);
1719   }
1720     break;
1721   case Bytecodes::_f2l:
1722   {
1723     Label L_Okay;
1724     __ clear_fpsr();
1725     __ fcvtzs(r0, v0);
1726     __ get_fpsr(r1);
1727     __ cbzw(r1, L_Okay);
1728     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1729     __ bind(L_Okay);
1730   }
1731     break;
1732   case Bytecodes::_f2d:
1733     __ fcvts(v0, v0);
1734     break;
1735   case Bytecodes::_d2i:
1736   {
1737     Label L_Okay;
1738     __ clear_fpsr();
1739     __ fcvtzdw(r0, v0);
1740     __ get_fpsr(r1);
1741     __ cbzw(r1, L_Okay);
1742     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
1743     __ bind(L_Okay);
1744   }
1745     break;
1746   case Bytecodes::_d2l:
1747   {
1748     Label L_Okay;
1749     __ clear_fpsr();
1750     __ fcvtzd(r0, v0);
1751     __ get_fpsr(r1);
1752     __ cbzw(r1, L_Okay);
1753     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1754     __ bind(L_Okay);
1755   }
1756     break;
1757   case Bytecodes::_d2f:
1758     __ fcvtd(v0, v0);
1759     break;
1760   default:
1761     ShouldNotReachHere();
1762   }
1763 }
1764 
1765 void TemplateTable::lcmp()
1766 {
1767   transition(ltos, itos);
1768   Label done;
1769   __ pop_l(r1);
1770   __ cmp(r1, r0);
1771   __ mov(r0, (uint64_t)-1L);
1772   __ br(Assembler::LT, done);
1773   // __ mov(r0, 1UL);
1774   // __ csel(r0, r0, zr, Assembler::NE);
1775   // and here is a faster way
1776   __ csinc(r0, zr, zr, Assembler::EQ);
1777   __ bind(done);
1778 }
1779 
1780 void TemplateTable::float_cmp(bool is_float, int unordered_result)
1781 {
1782   Label done;
1783   if (is_float) {
1784     // XXX get rid of pop here, use ... reg, mem32
1785     __ pop_f(v1);
1786     __ fcmps(v1, v0);
1787   } else {
1788     // XXX get rid of pop here, use ... reg, mem64
1789     __ pop_d(v1);
1790     __ fcmpd(v1, v0);
1791   }
1792   if (unordered_result < 0) {
1793     // we want -1 for unordered or less than, 0 for equal and 1 for
1794     // greater than.
1795     __ mov(r0, (uint64_t)-1L);
1796     // for FP LT tests less than or unordered
1797     __ br(Assembler::LT, done);
1798     // install 0 for EQ otherwise 1
1799     __ csinc(r0, zr, zr, Assembler::EQ);
1800   } else {
1801     // we want -1 for less than, 0 for equal and 1 for unordered or
1802     // greater than.
1803     __ mov(r0, 1L);
1804     // for FP HI tests greater than or unordered
1805     __ br(Assembler::HI, done);
1806     // install 0 for EQ otherwise ~0
1807     __ csinv(r0, zr, zr, Assembler::EQ);
1808 
1809   }
1810   __ bind(done);
1811 }
1812 
1813 void TemplateTable::branch(bool is_jsr, bool is_wide)
1814 {
1815   __ profile_taken_branch(r0, r1);
1816   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1817                              InvocationCounter::counter_offset();
1818   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1819                               InvocationCounter::counter_offset();
1820 
1821   // load branch displacement
1822   if (!is_wide) {
1823     __ ldrh(r2, at_bcp(1));
1824     __ rev16(r2, r2);
1825     // sign extend the 16 bit value in r2
1826     __ sbfm(r2, r2, 0, 15);
1827   } else {
1828     __ ldrw(r2, at_bcp(1));
1829     __ revw(r2, r2);
1830     // sign extend the 32 bit value in r2
1831     __ sbfm(r2, r2, 0, 31);
1832   }
1833 
1834   // Handle all the JSR stuff here, then exit.
1835   // It's much shorter and cleaner than intermingling with the non-JSR
1836   // normal-branch stuff occurring below.
1837 
1838   if (is_jsr) {
1839     // Pre-load the next target bytecode into rscratch1
1840     __ load_unsigned_byte(rscratch1, Address(rbcp, r2));
1841     // compute return address as bci
1842     __ ldr(rscratch2, Address(rmethod, Method::const_offset()));
1843     __ add(rscratch2, rscratch2,
1844            in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
1845     __ sub(r1, rbcp, rscratch2);
1846     __ push_i(r1);
1847     // Adjust the bcp by the 16-bit displacement in r2
1848     __ add(rbcp, rbcp, r2);
1849     __ dispatch_only(vtos, /*generate_poll*/true);
1850     return;
1851   }
1852 
1853   // Normal (non-jsr) branch handling
1854 
1855   // Adjust the bcp by the displacement in r2
1856   __ add(rbcp, rbcp, r2);
1857 
1858   assert(UseLoopCounter || !UseOnStackReplacement,
1859          "on-stack-replacement requires loop counters");
1860   Label backedge_counter_overflow;
1861   Label dispatch;
1862   if (UseLoopCounter) {
1863     // increment backedge counter for backward branches
1864     // r0: MDO
1865     // w1: MDO bumped taken-count
1866     // r2: target offset
1867     __ cmp(r2, zr);
1868     __ br(Assembler::GT, dispatch); // count only if backward branch
1869 
1870     // ECN: FIXME: This code smells
1871     // check if MethodCounters exists
1872     Label has_counters;
1873     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1874     __ cbnz(rscratch1, has_counters);
1875     __ push(r0);
1876     __ push(r1);
1877     __ push(r2);
1878     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
1879             InterpreterRuntime::build_method_counters), rmethod);
1880     __ pop(r2);
1881     __ pop(r1);
1882     __ pop(r0);
1883     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1884     __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
1885     __ bind(has_counters);
1886 
1887     Label no_mdo;
1888     int increment = InvocationCounter::count_increment;
1889     if (ProfileInterpreter) {
1890       // Are we profiling?
1891       __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1892       __ cbz(r1, no_mdo);
1893       // Increment the MDO backedge counter
1894       const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
1895                                          in_bytes(InvocationCounter::counter_offset()));
1896       const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
1897       __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1898                                  r0, rscratch1, false, Assembler::EQ,
1899                                  UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1900       __ b(dispatch);
1901     }
1902     __ bind(no_mdo);
1903     // Increment backedge counter in MethodCounters*
1904     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1905     const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
1906     __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
1907                                r0, rscratch2, false, Assembler::EQ,
1908                                UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1909     __ bind(dispatch);
1910   }
1911 
1912   // Pre-load the next target bytecode into rscratch1
1913   __ load_unsigned_byte(rscratch1, Address(rbcp, 0));
1914 
1915   // continue with the bytecode @ target
1916   // rscratch1: target bytecode
1917   // rbcp: target bcp
1918   __ dispatch_only(vtos, /*generate_poll*/true);
1919 
1920   if (UseLoopCounter && UseOnStackReplacement) {
1921     // invocation counter overflow
1922     __ bind(backedge_counter_overflow);
1923     __ neg(r2, r2);
1924     __ add(r2, r2, rbcp);     // branch bcp
1925     // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1926     __ call_VM(noreg,
1927                CAST_FROM_FN_PTR(address,
1928                                 InterpreterRuntime::frequency_counter_overflow),
1929                r2);
1930     __ load_unsigned_byte(r1, Address(rbcp, 0));  // restore target bytecode
1931 
1932     // r0: osr nmethod (osr ok) or null (osr not possible)
1933     // w1: target bytecode
1934     // r2: scratch
1935     __ cbz(r0, dispatch);     // test result -- no osr if null
1936     // nmethod may have been invalidated (VM may block upon call_VM return)
1937     __ ldrb(r2, Address(r0, nmethod::state_offset()));
1938     if (nmethod::in_use != 0)
1939       __ sub(r2, r2, nmethod::in_use);
1940     __ cbnz(r2, dispatch);
1941 
1942     // We have the address of an on stack replacement routine in r0
1943     // We need to prepare to execute the OSR method. First we must
1944     // migrate the locals and monitors off of the stack.
1945 
1946     __ mov(r19, r0);                             // save the nmethod
1947 
1948     call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1949 
1950     // r0 is OSR buffer, move it to expected parameter location
1951     __ mov(j_rarg0, r0);
1952 
1953     // remove activation
1954     // get sender esp
1955     __ ldr(esp,
1956         Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
1957     // remove frame anchor
1958     __ leave();
1959     // Ensure compiled code always sees stack at proper alignment
1960     __ andr(sp, esp, -16);
1961 
1962     // and begin the OSR nmethod
1963     __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
1964     __ br(rscratch1);
1965   }
1966 }
1967 
1968 
1969 void TemplateTable::if_0cmp(Condition cc)
1970 {
1971   transition(itos, vtos);
1972   // assume branch is more often taken than not (loops use backward branches)
1973   Label not_taken;
1974   if (cc == equal)
1975     __ cbnzw(r0, not_taken);
1976   else if (cc == not_equal)
1977     __ cbzw(r0, not_taken);
1978   else {
1979     __ andsw(zr, r0, r0);
1980     __ br(j_not(cc), not_taken);
1981   }
1982 
1983   branch(false, false);
1984   __ bind(not_taken);
1985   __ profile_not_taken_branch(r0);
1986 }
1987 
1988 void TemplateTable::if_icmp(Condition cc)
1989 {
1990   transition(itos, vtos);
1991   // assume branch is more often taken than not (loops use backward branches)
1992   Label not_taken;
1993   __ pop_i(r1);
1994   __ cmpw(r1, r0, Assembler::LSL);
1995   __ br(j_not(cc), not_taken);
1996   branch(false, false);
1997   __ bind(not_taken);
1998   __ profile_not_taken_branch(r0);
1999 }
2000 
2001 void TemplateTable::if_nullcmp(Condition cc)
2002 {
2003   transition(atos, vtos);
2004   // assume branch is more often taken than not (loops use backward branches)
2005   Label not_taken;
2006   if (cc == equal)
2007     __ cbnz(r0, not_taken);
2008   else
2009     __ cbz(r0, not_taken);
2010   branch(false, false);
2011   __ bind(not_taken);
2012   __ profile_not_taken_branch(r0);
2013 }
2014 
2015 void TemplateTable::if_acmp(Condition cc) {
2016   transition(atos, vtos);
2017   // assume branch is more often taken than not (loops use backward branches)
2018   Label taken, not_taken;
2019   __ pop_ptr(r1);
2020 
2021   __ profile_acmp(r2, r1, r0, r4);
2022 
2023   Register is_inline_type_mask = rscratch1;
2024   __ mov(is_inline_type_mask, markWord::inline_type_pattern);
2025 
2026   if (EnableValhalla) {
2027     __ cmp(r1, r0);
2028     __ br(Assembler::EQ, (cc == equal) ? taken : not_taken);
2029 
2030     // might be substitutable, test if either r0 or r1 is null
2031     __ andr(r2, r0, r1);
2032     __ cbz(r2, (cc == equal) ? not_taken : taken);
2033 
2034     // and both are values ?
2035     __ ldr(r2, Address(r1, oopDesc::mark_offset_in_bytes()));
2036     __ andr(r2, r2, is_inline_type_mask);
2037     __ ldr(r4, Address(r0, oopDesc::mark_offset_in_bytes()));
2038     __ andr(r4, r4, is_inline_type_mask);
2039     __ andr(r2, r2, r4);
2040     __ cmp(r2,  is_inline_type_mask);
2041     __ br(Assembler::NE, (cc == equal) ? not_taken : taken);
2042 
2043     // same value klass ?
2044     __ load_metadata(r2, r1);
2045     __ load_metadata(r4, r0);
2046     __ cmp(r2, r4);
2047     __ br(Assembler::NE, (cc == equal) ? not_taken : taken);
2048 
2049     // Know both are the same type, let's test for substitutability...
2050     if (cc == equal) {
2051       invoke_is_substitutable(r0, r1, taken, not_taken);
2052     } else {
2053       invoke_is_substitutable(r0, r1, not_taken, taken);
2054     }
2055     __ stop("Not reachable");
2056   }
2057 
2058   __ cmpoop(r1, r0);
2059   __ br(j_not(cc), not_taken);
2060   __ bind(taken);
2061   branch(false, false);
2062   __ bind(not_taken);
2063   __ profile_not_taken_branch(r0, true);
2064 }
2065 
2066 void TemplateTable::invoke_is_substitutable(Register aobj, Register bobj,
2067                                             Label& is_subst, Label& not_subst) {
2068 
2069   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::is_substitutable), aobj, bobj);
2070   // Restored... r0 answer, jmp to outcome...
2071   __ cbz(r0, not_subst);
2072   __ b(is_subst);
2073 }
2074 
2075 
2076 void TemplateTable::ret() {
2077   transition(vtos, vtos);
2078   locals_index(r1);
2079   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2080   __ profile_ret(r1, r2);
2081   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2082   __ lea(rbcp, Address(rbcp, r1));
2083   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2084   __ dispatch_next(vtos, 0, /*generate_poll*/true);
2085 }
2086 
2087 void TemplateTable::wide_ret() {
2088   transition(vtos, vtos);
2089   locals_index_wide(r1);
2090   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2091   __ profile_ret(r1, r2);
2092   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2093   __ lea(rbcp, Address(rbcp, r1));
2094   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2095   __ dispatch_next(vtos, 0, /*generate_poll*/true);
2096 }
2097 
2098 
2099 void TemplateTable::tableswitch() {
2100   Label default_case, continue_execution;
2101   transition(itos, vtos);
2102   // align rbcp
2103   __ lea(r1, at_bcp(BytesPerInt));
2104   __ andr(r1, r1, -BytesPerInt);
2105   // load lo & hi
2106   __ ldrw(r2, Address(r1, BytesPerInt));
2107   __ ldrw(r3, Address(r1, 2 * BytesPerInt));
2108   __ rev32(r2, r2);
2109   __ rev32(r3, r3);
2110   // check against lo & hi
2111   __ cmpw(r0, r2);
2112   __ br(Assembler::LT, default_case);
2113   __ cmpw(r0, r3);
2114   __ br(Assembler::GT, default_case);
2115   // lookup dispatch offset
2116   __ subw(r0, r0, r2);
2117   __ lea(r3, Address(r1, r0, Address::uxtw(2)));
2118   __ ldrw(r3, Address(r3, 3 * BytesPerInt));
2119   __ profile_switch_case(r0, r1, r2);
2120   // continue execution
2121   __ bind(continue_execution);
2122   __ rev32(r3, r3);
2123   __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
2124   __ add(rbcp, rbcp, r3, ext::sxtw);
2125   __ dispatch_only(vtos, /*generate_poll*/true);
2126   // handle default
2127   __ bind(default_case);
2128   __ profile_switch_default(r0);
2129   __ ldrw(r3, Address(r1, 0));
2130   __ b(continue_execution);
2131 }
2132 
2133 void TemplateTable::lookupswitch() {
2134   transition(itos, itos);
2135   __ stop("lookupswitch bytecode should have been rewritten");
2136 }
2137 
2138 void TemplateTable::fast_linearswitch() {
2139   transition(itos, vtos);
2140   Label loop_entry, loop, found, continue_execution;
2141   // bswap r0 so we can avoid bswapping the table entries
2142   __ rev32(r0, r0);
2143   // align rbcp
2144   __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2145                                     // this instruction (change offsets
2146                                     // below)
2147   __ andr(r19, r19, -BytesPerInt);
2148   // set counter
2149   __ ldrw(r1, Address(r19, BytesPerInt));
2150   __ rev32(r1, r1);
2151   __ b(loop_entry);
2152   // table search
2153   __ bind(loop);
2154   __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2155   __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt));
2156   __ cmpw(r0, rscratch1);
2157   __ br(Assembler::EQ, found);
2158   __ bind(loop_entry);
2159   __ subs(r1, r1, 1);
2160   __ br(Assembler::PL, loop);
2161   // default case
2162   __ profile_switch_default(r0);
2163   __ ldrw(r3, Address(r19, 0));
2164   __ b(continue_execution);
2165   // entry found -> get offset
2166   __ bind(found);
2167   __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2168   __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt));
2169   __ profile_switch_case(r1, r0, r19);
2170   // continue execution
2171   __ bind(continue_execution);
2172   __ rev32(r3, r3);
2173   __ add(rbcp, rbcp, r3, ext::sxtw);
2174   __ ldrb(rscratch1, Address(rbcp, 0));
2175   __ dispatch_only(vtos, /*generate_poll*/true);
2176 }
2177 
2178 void TemplateTable::fast_binaryswitch() {
2179   transition(itos, vtos);
2180   // Implementation using the following core algorithm:
2181   //
2182   // int binary_search(int key, LookupswitchPair* array, int n) {
2183   //   // Binary search according to "Methodik des Programmierens" by
2184   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2185   //   int i = 0;
2186   //   int j = n;
2187   //   while (i+1 < j) {
2188   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2189   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2190   //     // where a stands for the array and assuming that the (inexisting)
2191   //     // element a[n] is infinitely big.
2192   //     int h = (i + j) >> 1;
2193   //     // i < h < j
2194   //     if (key < array[h].fast_match()) {
2195   //       j = h;
2196   //     } else {
2197   //       i = h;
2198   //     }
2199   //   }
2200   //   // R: a[i] <= key < a[i+1] or Q
2201   //   // (i.e., if key is within array, i is the correct index)
2202   //   return i;
2203   // }
2204 
2205   // Register allocation
2206   const Register key   = r0; // already set (tosca)
2207   const Register array = r1;
2208   const Register i     = r2;
2209   const Register j     = r3;
2210   const Register h     = rscratch1;
2211   const Register temp  = rscratch2;
2212 
2213   // Find array start
2214   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2215                                           // get rid of this
2216                                           // instruction (change
2217                                           // offsets below)
2218   __ andr(array, array, -BytesPerInt);
2219 
2220   // Initialize i & j
2221   __ mov(i, 0);                            // i = 0;
2222   __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array);
2223 
2224   // Convert j into native byteordering
2225   __ rev32(j, j);
2226 
2227   // And start
2228   Label entry;
2229   __ b(entry);
2230 
2231   // binary search loop
2232   {
2233     Label loop;
2234     __ bind(loop);
2235     // int h = (i + j) >> 1;
2236     __ addw(h, i, j);                           // h = i + j;
2237     __ lsrw(h, h, 1);                                   // h = (i + j) >> 1;
2238     // if (key < array[h].fast_match()) {
2239     //   j = h;
2240     // } else {
2241     //   i = h;
2242     // }
2243     // Convert array[h].match to native byte-ordering before compare
2244     __ ldr(temp, Address(array, h, Address::lsl(3)));
2245     __ rev32(temp, temp);
2246     __ cmpw(key, temp);
2247     // j = h if (key <  array[h].fast_match())
2248     __ csel(j, h, j, Assembler::LT);
2249     // i = h if (key >= array[h].fast_match())
2250     __ csel(i, h, i, Assembler::GE);
2251     // while (i+1 < j)
2252     __ bind(entry);
2253     __ addw(h, i, 1);          // i+1
2254     __ cmpw(h, j);             // i+1 < j
2255     __ br(Assembler::LT, loop);
2256   }
2257 
2258   // end of binary search, result index is i (must check again!)
2259   Label default_case;
2260   // Convert array[i].match to native byte-ordering before compare
2261   __ ldr(temp, Address(array, i, Address::lsl(3)));
2262   __ rev32(temp, temp);
2263   __ cmpw(key, temp);
2264   __ br(Assembler::NE, default_case);
2265 
2266   // entry found -> j = offset
2267   __ add(j, array, i, ext::uxtx, 3);
2268   __ ldrw(j, Address(j, BytesPerInt));
2269   __ profile_switch_case(i, key, array);
2270   __ rev32(j, j);
2271   __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2272   __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2273   __ dispatch_only(vtos, /*generate_poll*/true);
2274 
2275   // default case -> j = default offset
2276   __ bind(default_case);
2277   __ profile_switch_default(i);
2278   __ ldrw(j, Address(array, -2 * BytesPerInt));
2279   __ rev32(j, j);
2280   __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2281   __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2282   __ dispatch_only(vtos, /*generate_poll*/true);
2283 }
2284 
2285 
2286 void TemplateTable::_return(TosState state)
2287 {
2288   transition(state, state);
2289   assert(_desc->calls_vm(),
2290          "inconsistent calls_vm information"); // call in remove_activation
2291 
2292   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2293     assert(state == vtos, "only valid state");
2294 
2295     __ ldr(c_rarg1, aaddress(0));
2296     __ load_klass(r3, c_rarg1);
2297     __ ldrb(r3, Address(r3, Klass::misc_flags_offset()));
2298     Label skip_register_finalizer;
2299     __ tbz(r3, exact_log2(KlassFlags::_misc_has_finalizer), skip_register_finalizer);
2300 
2301     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2302 
2303     __ bind(skip_register_finalizer);
2304   }
2305 
2306   // Issue a StoreStore barrier after all stores but before return
2307   // from any constructor for any class with a final field.  We don't
2308   // know if this is a finalizer, so we always do so.
2309   if (_desc->bytecode() == Bytecodes::_return)
2310     __ membar(MacroAssembler::StoreStore);
2311 
2312   if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2313     Label no_safepoint;
2314     __ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
2315     __ tbz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), no_safepoint);
2316     __ push(state);
2317     __ push_cont_fastpath(rthread);
2318     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
2319     __ pop_cont_fastpath(rthread);
2320     __ pop(state);
2321     __ bind(no_safepoint);
2322   }
2323 
2324   // Narrow result if state is itos but result type is smaller.
2325   // Need to narrow in the return bytecode rather than in generate_return_entry
2326   // since compiled code callers expect the result to already be narrowed.
2327   if (state == itos) {
2328     __ narrow(r0);
2329   }
2330 
2331   __ remove_activation(state);
2332   __ ret(lr);
2333 }
2334 
2335 // ----------------------------------------------------------------------------
2336 // Volatile variables demand their effects be made known to all CPU's
2337 // in order.  Store buffers on most chips allow reads & writes to
2338 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2339 // without some kind of memory barrier (i.e., it's not sufficient that
2340 // the interpreter does not reorder volatile references, the hardware
2341 // also must not reorder them).
2342 //
2343 // According to the new Java Memory Model (JMM):
2344 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2345 //     writes act as acquire & release, so:
2346 // (2) A read cannot let unrelated NON-volatile memory refs that
2347 //     happen after the read float up to before the read.  It's OK for
2348 //     non-volatile memory refs that happen before the volatile read to
2349 //     float down below it.
2350 // (3) Similar a volatile write cannot let unrelated NON-volatile
2351 //     memory refs that happen BEFORE the write float down to after the
2352 //     write.  It's OK for non-volatile memory refs that happen after the
2353 //     volatile write to float up before it.
2354 //
2355 // We only put in barriers around volatile refs (they are expensive),
2356 // not _between_ memory refs (that would require us to track the
2357 // flavor of the previous memory refs).  Requirements (2) and (3)
2358 // require some barriers before volatile stores and after volatile
2359 // loads.  These nearly cover requirement (1) but miss the
2360 // volatile-store-volatile-load case.  This final case is placed after
2361 // volatile-stores although it could just as well go before
2362 // volatile-loads.
2363 
2364 void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
2365                                             Register Rcache,
2366                                             Register index) {
2367   const Register temp = r19;
2368   assert_different_registers(Rcache, index, temp);
2369   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2370 
2371   Label resolved, clinit_barrier_slow;
2372 
2373   Bytecodes::Code code = bytecode();
2374   __ load_method_entry(Rcache, index);
2375   switch(byte_no) {
2376     case f1_byte:
2377       __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode1_offset())));
2378       break;
2379     case f2_byte:
2380       __ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode2_offset())));
2381       break;
2382   }
2383   // Load-acquire the bytecode to match store-release in InterpreterRuntime
2384   __ ldarb(temp, temp);
2385   __ subs(zr, temp, (int) code);  // have we resolved this bytecode?
2386   __ br(Assembler::EQ, resolved);
2387 
2388   // resolve first time through
2389   // Class initialization barrier slow path lands here as well.
2390   __ bind(clinit_barrier_slow);
2391   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2392   __ mov(temp, (int) code);
2393   __ call_VM(noreg, entry, temp);
2394 
2395   // Update registers with resolved info
2396   __ load_method_entry(Rcache, index);
2397   // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2398   // so all clients ofthis method must be modified accordingly
2399   __ bind(resolved);
2400 
2401   // Class initialization barrier for static methods
2402   if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2403     __ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
2404     __ load_method_holder(temp, temp);
2405     __ clinit_barrier(temp, rscratch1, nullptr, &clinit_barrier_slow);
2406   }
2407 }
2408 
2409 void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
2410                                             Register Rcache,
2411                                             Register index) {
2412   const Register temp = r19;
2413   assert_different_registers(Rcache, index, temp);
2414 
2415   Label resolved;
2416 
2417   Bytecodes::Code code = bytecode();
2418   switch (code) {
2419   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2420   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2421   default: break;
2422   }
2423 
2424   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2425   __ load_field_entry(Rcache, index);
2426   if (byte_no == f1_byte) {
2427     __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::get_code_offset())));
2428   } else {
2429     __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::put_code_offset())));
2430   }
2431   // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in()
2432   __ ldarb(temp, temp);
2433   __ subs(zr, temp, (int) code);  // have we resolved this bytecode?
2434   __ br(Assembler::EQ, resolved);
2435 
2436   // resolve first time through
2437   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2438   __ mov(temp, (int) code);
2439   __ call_VM(noreg, entry, temp);
2440 
2441   // Update registers with resolved info
2442   __ load_field_entry(Rcache, index);
2443   __ bind(resolved);
2444 }
2445 
2446 void TemplateTable::load_resolved_field_entry(Register obj,
2447                                               Register cache,
2448                                               Register tos_state,
2449                                               Register offset,
2450                                               Register flags,
2451                                               bool is_static = false) {
2452   assert_different_registers(cache, tos_state, flags, offset);
2453 
2454   // Field offset
2455   __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
2456 
2457   // Flags
2458   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset())));
2459 
2460   // TOS state
2461   if (tos_state != noreg) {
2462     __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset())));
2463   }
2464 
2465   // Klass overwrite register
2466   if (is_static) {
2467     __ ldr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset()));
2468     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2469     __ ldr(obj, Address(obj, mirror_offset));
2470     __ resolve_oop_handle(obj, r5, rscratch2);
2471   }
2472 }
2473 
2474 void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
2475                                                                  Register method,
2476                                                                  Register flags) {
2477 
2478   // setup registers
2479   const Register index = flags;
2480   assert_different_registers(method, cache, flags);
2481 
2482   // determine constant pool cache field offsets
2483   resolve_cache_and_index_for_method(f1_byte, cache, index);
2484   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2485   __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2486 }
2487 
2488 void TemplateTable::load_resolved_method_entry_handle(Register cache,
2489                                                       Register method,
2490                                                       Register ref_index,
2491                                                       Register flags) {
2492   // setup registers
2493   const Register index = ref_index;
2494   assert_different_registers(method, flags);
2495   assert_different_registers(method, cache, index);
2496 
2497   // determine constant pool cache field offsets
2498   resolve_cache_and_index_for_method(f1_byte, cache, index);
2499   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2500 
2501   // maybe push appendix to arguments (just before return address)
2502   Label L_no_push;
2503   __ tbz(flags, ResolvedMethodEntry::has_appendix_shift, L_no_push);
2504   // invokehandle uses an index into the resolved references array
2505   __ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
2506   // Push the appendix as a trailing parameter.
2507   // This must be done before we get the receiver,
2508   // since the parameter_size includes it.
2509   Register appendix = method;
2510   __ load_resolved_reference_at_index(appendix, ref_index);
2511   __ push(appendix);  // push appendix (MethodType, CallSite, etc.)
2512   __ bind(L_no_push);
2513 
2514   __ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2515 }
2516 
2517 void TemplateTable::load_resolved_method_entry_interface(Register cache,
2518                                                          Register klass,
2519                                                          Register method_or_table_index,
2520                                                          Register flags) {
2521   // setup registers
2522   const Register index = method_or_table_index;
2523   assert_different_registers(method_or_table_index, cache, flags);
2524 
2525   // determine constant pool cache field offsets
2526   resolve_cache_and_index_for_method(f1_byte, cache, index);
2527   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2528 
2529   // Invokeinterface can behave in different ways:
2530   // If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
2531   // behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
2532   // vtable index is placed in the register.
2533   // Otherwise, the registers will be populated with the klass and method.
2534 
2535   Label NotVirtual; Label NotVFinal; Label Done;
2536   __ tbz(flags, ResolvedMethodEntry::is_forced_virtual_shift, NotVirtual);
2537   __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
2538   __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2539   __ b(Done);
2540 
2541   __ bind(NotVFinal);
2542   __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2543   __ b(Done);
2544 
2545   __ bind(NotVirtual);
2546   __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2547   __ ldr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
2548   __ bind(Done);
2549 }
2550 
2551 void TemplateTable::load_resolved_method_entry_virtual(Register cache,
2552                                                        Register method_or_table_index,
2553                                                        Register flags) {
2554   // setup registers
2555   const Register index = flags;
2556   assert_different_registers(method_or_table_index, cache, flags);
2557 
2558   // determine constant pool cache field offsets
2559   resolve_cache_and_index_for_method(f2_byte, cache, index);
2560   __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
2561 
2562   // method_or_table_index can either be an itable index or a method depending on the virtual final flag
2563   Label NotVFinal; Label Done;
2564   __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
2565   __ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
2566   __ b(Done);
2567 
2568   __ bind(NotVFinal);
2569   __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
2570   __ bind(Done);
2571 }
2572 
2573 // The rmethod register is input and overwritten to be the adapter method for the
2574 // indy call. Link Register (lr) is set to the return address for the adapter and
2575 // an appendix may be pushed to the stack. Registers r0-r3 are clobbered
2576 void TemplateTable::load_invokedynamic_entry(Register method) {
2577   // setup registers
2578   const Register appendix = r0;
2579   const Register cache = r2;
2580   const Register index = r3;
2581   assert_different_registers(method, appendix, cache, index, rcpool);
2582 
2583   __ save_bcp();
2584 
2585   Label resolved;
2586 
2587   __ load_resolved_indy_entry(cache, index);
2588   // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in()
2589   __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2590   __ ldar(method, method);
2591 
2592   // Compare the method to zero
2593   __ cbnz(method, resolved);
2594 
2595   Bytecodes::Code code = bytecode();
2596 
2597   // Call to the interpreter runtime to resolve invokedynamic
2598   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2599   __ mov(method, code); // this is essentially Bytecodes::_invokedynamic
2600   __ call_VM(noreg, entry, method);
2601   // Update registers with resolved info
2602   __ load_resolved_indy_entry(cache, index);
2603   // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in()
2604   __ lea(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
2605   __ ldar(method, method);
2606 
2607 #ifdef ASSERT
2608   __ cbnz(method, resolved);
2609   __ stop("Should be resolved by now");
2610 #endif // ASSERT
2611   __ bind(resolved);
2612 
2613   Label L_no_push;
2614   // Check if there is an appendix
2615   __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset())));
2616   __ tbz(index, ResolvedIndyEntry::has_appendix_shift, L_no_push);
2617 
2618   // Get appendix
2619   __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset())));
2620   // Push the appendix as a trailing parameter
2621   // since the parameter_size includes it.
2622   __ push(method);
2623   __ mov(method, index);
2624   __ load_resolved_reference_at_index(appendix, method);
2625   __ verify_oop(appendix);
2626   __ pop(method);
2627   __ push(appendix);  // push appendix (MethodType, CallSite, etc.)
2628   __ bind(L_no_push);
2629 
2630   // compute return type
2631   __ load_unsigned_byte(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset())));
2632   // load return address
2633   // Return address is loaded into link register(lr) and not pushed to the stack
2634   // like x86
2635   {
2636     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2637     __ mov(rscratch1, table_addr);
2638     __ ldr(lr, Address(rscratch1, index, Address::lsl(3)));
2639   }
2640 }
2641 
2642 // The registers cache and index expected to be set before call.
2643 // Correct values of the cache and index registers are preserved.
2644 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2645                                             bool is_static, bool has_tos) {
2646   // do the JVMTI work here to avoid disturbing the register state below
2647   // We use c_rarg registers here because we want to use the register used in
2648   // the call to the VM
2649   if (JvmtiExport::can_post_field_access()) {
2650     // Check to see if a field access watch has been set before we
2651     // take the time to call into the VM.
2652     Label L1;
2653     assert_different_registers(cache, index, r0);
2654     __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2655     __ ldrw(r0, Address(rscratch1));
2656     __ cbzw(r0, L1);
2657 
2658     __ load_field_entry(c_rarg2, index);
2659 
2660     if (is_static) {
2661       __ mov(c_rarg1, zr); // null object reference
2662     } else {
2663       __ ldr(c_rarg1, at_tos()); // get object pointer without popping it
2664       __ verify_oop(c_rarg1);
2665     }
2666     // c_rarg1: object pointer or null
2667     // c_rarg2: cache entry pointer
2668     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2669                                        InterpreterRuntime::post_field_access),
2670                c_rarg1, c_rarg2);
2671     __ load_field_entry(cache, index);
2672     __ bind(L1);
2673   }
2674 }
2675 
2676 void TemplateTable::pop_and_check_object(Register r)
2677 {
2678   __ pop_ptr(r);
2679   __ null_check(r);  // for field access must check obj.
2680   __ verify_oop(r);
2681 }
2682 
2683 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2684 {
2685   const Register cache     = r2;
2686   const Register obj       = r4;
2687   const Register klass     = r5;
2688   const Register inline_klass = r7;
2689   const Register field_index = r23;
2690   const Register index     = r3;
2691   const Register tos_state = r3;
2692   const Register off       = r19;
2693   const Register flags     = r6;
2694   const Register bc        = r4; // uses same reg as obj, so don't mix them
2695 
2696   resolve_cache_and_index_for_field(byte_no, cache, index);
2697   jvmti_post_field_access(cache, index, is_static, false);
2698 
2699   // Valhalla extras
2700   __ load_unsigned_short(field_index, Address(cache, in_bytes(ResolvedFieldEntry::field_index_offset())));
2701   __ ldr(klass, Address(cache, ResolvedFieldEntry::field_holder_offset()));
2702 
2703   load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
2704 
2705   if (!is_static) {
2706     // obj is on the stack
2707     pop_and_check_object(obj);
2708   }
2709 
2710   // 8179954: We need to make sure that the code generated for
2711   // volatile accesses forms a sequentially-consistent set of
2712   // operations when combined with STLR and LDAR.  Without a leading
2713   // membar it's possible for a simple Dekker test to fail if loads
2714   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
2715   // the stores in one method and we interpret the loads in another.
2716   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){
2717     Label notVolatile;
2718     __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2719     __ membar(MacroAssembler::AnyAny);
2720     __ bind(notVolatile);
2721   }
2722 
2723   const Address field(obj, off);
2724 
2725   Label Done, notByte, notBool, notInt, notShort, notChar,
2726               notLong, notFloat, notObj, notDouble;
2727 
2728   assert(btos == 0, "change code, btos != 0");
2729   __ cbnz(tos_state, notByte);
2730 
2731   // Don't rewrite getstatic, only getfield
2732   if (is_static) rc = may_not_rewrite;
2733 
2734   // btos
2735   __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2736   __ push(btos);
2737   // Rewrite bytecode to be faster
2738   if (rc == may_rewrite) {
2739     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2740   }
2741   __ b(Done);
2742 
2743   __ bind(notByte);
2744   __ cmp(tos_state, (u1)ztos);
2745   __ br(Assembler::NE, notBool);
2746 
2747   // ztos (same code as btos)
2748   __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2749   __ push(ztos);
2750   // Rewrite bytecode to be faster
2751   if (rc == may_rewrite) {
2752     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2753     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2754   }
2755   __ b(Done);
2756 
2757   __ bind(notBool);
2758   __ cmp(tos_state, (u1)atos);
2759   __ br(Assembler::NE, notObj);
2760   // atos
2761   if (!EnableValhalla) {
2762     do_oop_load(_masm, field, r0, IN_HEAP);
2763     __ push(atos);
2764     if (rc == may_rewrite) {
2765       patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2766     }
2767     __ b(Done);
2768   } else { // Valhalla
2769     if (is_static) {
2770       __ load_heap_oop(r0, field, rscratch1, rscratch2);
2771       Label is_null_free_inline_type, uninitialized;
2772       // Issue below if the static field has not been initialized yet
2773       __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_null_free_inline_type);
2774         // field is not a null free inline type
2775         __ push(atos);
2776         __ b(Done);
2777       // field is a null free inline type, must not return null even if uninitialized
2778       __ bind(is_null_free_inline_type);
2779         __ cbz(r0, uninitialized);
2780           __ push(atos);
2781           __ b(Done);
2782         __ bind(uninitialized);
2783           __ b(ExternalAddress(Interpreter::_throw_NPE_UninitializedField_entry));
2784     } else {
2785       Label is_flat, nonnull, is_inline_type, has_null_marker, rewrite_inline;
2786       __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_inline_type);
2787       __ test_field_has_null_marker(flags, noreg /*temp*/, has_null_marker);
2788         // Non-inline field case
2789         __ load_heap_oop(r0, field, rscratch1, rscratch2);
2790         __ push(atos);
2791         if (rc == may_rewrite) {
2792           patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2793         }
2794         __ b(Done);
2795       __ bind(is_inline_type);
2796         __ test_field_is_flat(flags, noreg /* temp */, is_flat);
2797          // field is not flat
2798           __ load_heap_oop(r0, field, rscratch1, rscratch2);
2799           __ cbnz(r0, nonnull);
2800             __ b(ExternalAddress(Interpreter::_throw_NPE_UninitializedField_entry));
2801           __ bind(nonnull);
2802           __ verify_oop(r0);
2803           __ push(atos);
2804           __ b(rewrite_inline);
2805         __ bind(is_flat);
2806         // field is flat
2807           __ mov(r0, obj);
2808           __ read_flat_field(cache, field_index, off, inline_klass /* temp */, r0);
2809           __ verify_oop(r0);
2810           __ push(atos);
2811           __ b(rewrite_inline);
2812         __ bind(has_null_marker);
2813           call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_nullable_flat_field), obj, cache);
2814           __ verify_oop(r0);
2815           __ push(atos);
2816       __ bind(rewrite_inline);
2817       if (rc == may_rewrite) {
2818         patch_bytecode(Bytecodes::_fast_vgetfield, bc, r1);
2819       }
2820       __ b(Done);
2821     }
2822   }
2823 
2824   __ bind(notObj);
2825   __ cmp(tos_state, (u1)itos);
2826   __ br(Assembler::NE, notInt);
2827   // itos
2828   __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2829   __ push(itos);
2830   // Rewrite bytecode to be faster
2831   if (rc == may_rewrite) {
2832     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2833   }
2834   __ b(Done);
2835 
2836   __ bind(notInt);
2837   __ cmp(tos_state, (u1)ctos);
2838   __ br(Assembler::NE, notChar);
2839   // ctos
2840   __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2841   __ push(ctos);
2842   // Rewrite bytecode to be faster
2843   if (rc == may_rewrite) {
2844     patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2845   }
2846   __ b(Done);
2847 
2848   __ bind(notChar);
2849   __ cmp(tos_state, (u1)stos);
2850   __ br(Assembler::NE, notShort);
2851   // stos
2852   __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
2853   __ push(stos);
2854   // Rewrite bytecode to be faster
2855   if (rc == may_rewrite) {
2856     patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2857   }
2858   __ b(Done);
2859 
2860   __ bind(notShort);
2861   __ cmp(tos_state, (u1)ltos);
2862   __ br(Assembler::NE, notLong);
2863   // ltos
2864   __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
2865   __ push(ltos);
2866   // Rewrite bytecode to be faster
2867   if (rc == may_rewrite) {
2868     patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2869   }
2870   __ b(Done);
2871 
2872   __ bind(notLong);
2873   __ cmp(tos_state, (u1)ftos);
2874   __ br(Assembler::NE, notFloat);
2875   // ftos
2876   __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2877   __ push(ftos);
2878   // Rewrite bytecode to be faster
2879   if (rc == may_rewrite) {
2880     patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2881   }
2882   __ b(Done);
2883 
2884   __ bind(notFloat);
2885 #ifdef ASSERT
2886   __ cmp(tos_state, (u1)dtos);
2887   __ br(Assembler::NE, notDouble);
2888 #endif
2889   // dtos
2890   __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2891   __ push(dtos);
2892   // Rewrite bytecode to be faster
2893   if (rc == may_rewrite) {
2894     patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2895   }
2896 #ifdef ASSERT
2897   __ b(Done);
2898 
2899   __ bind(notDouble);
2900   __ stop("Bad state");
2901 #endif
2902 
2903   __ bind(Done);
2904 
2905   Label notVolatile;
2906   __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2907   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2908   __ bind(notVolatile);
2909 }
2910 
2911 
2912 void TemplateTable::getfield(int byte_no)
2913 {
2914   getfield_or_static(byte_no, false);
2915 }
2916 
2917 void TemplateTable::nofast_getfield(int byte_no) {
2918   getfield_or_static(byte_no, false, may_not_rewrite);
2919 }
2920 
2921 void TemplateTable::getstatic(int byte_no)
2922 {
2923   getfield_or_static(byte_no, true);
2924 }
2925 
2926 // The registers cache and index expected to be set before call.
2927 // The function may destroy various registers, just not the cache and index registers.
2928 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2929   transition(vtos, vtos);
2930 
2931   if (JvmtiExport::can_post_field_modification()) {
2932     // Check to see if a field modification watch has been set before
2933     // we take the time to call into the VM.
2934     Label L1;
2935     assert_different_registers(cache, index, r0);
2936     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2937     __ ldrw(r0, Address(rscratch1));
2938     __ cbz(r0, L1);
2939 
2940     __ mov(c_rarg2, cache);
2941 
2942     if (is_static) {
2943       // Life is simple.  Null out the object pointer.
2944       __ mov(c_rarg1, zr);
2945     } else {
2946       // Life is harder. The stack holds the value on top, followed by
2947       // the object.  We don't know the size of the value, though; it
2948       // could be one or two words depending on its type. As a result,
2949       // we must find the type to determine where the object is.
2950       __ load_unsigned_byte(c_rarg3, Address(c_rarg2, in_bytes(ResolvedFieldEntry::type_offset())));
2951       Label nope2, done, ok;
2952       __ ldr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
2953       __ cmpw(c_rarg3, ltos);
2954       __ br(Assembler::EQ, ok);
2955       __ cmpw(c_rarg3, dtos);
2956       __ br(Assembler::NE, nope2);
2957       __ bind(ok);
2958       __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2959       __ bind(nope2);
2960     }
2961     // object (tos)
2962     __ mov(c_rarg3, esp);
2963     // c_rarg1: object pointer set up above (null if static)
2964     // c_rarg2: cache entry pointer
2965     // c_rarg3: jvalue object on the stack
2966     __ call_VM(noreg,
2967                CAST_FROM_FN_PTR(address,
2968                                 InterpreterRuntime::post_field_modification),
2969                c_rarg1, c_rarg2, c_rarg3);
2970     __ load_field_entry(cache, index);
2971     __ bind(L1);
2972   }
2973 }
2974 
2975 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2976   transition(vtos, vtos);
2977 
2978   const Register cache     = r2;
2979   const Register index     = r3;
2980   const Register tos_state = r3;
2981   const Register obj       = r2;
2982   const Register off       = r19;
2983   const Register flags     = r6;
2984   const Register bc        = r4;
2985   const Register inline_klass = r5;
2986 
2987   resolve_cache_and_index_for_field(byte_no, cache, index);
2988   jvmti_post_field_mod(cache, index, is_static);
2989   load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static);
2990 
2991   Label Done;
2992   {
2993     Label notVolatile;
2994     __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
2995     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2996     __ bind(notVolatile);
2997   }
2998 
2999   // field address
3000   const Address field(obj, off);
3001 
3002   Label notByte, notBool, notInt, notShort, notChar,
3003         notLong, notFloat, notObj, notDouble;
3004 
3005   assert(btos == 0, "change code, btos != 0");
3006   __ cbnz(tos_state, notByte);
3007 
3008   // Don't rewrite putstatic, only putfield
3009   if (is_static) rc = may_not_rewrite;
3010 
3011   // btos
3012   {
3013     __ pop(btos);
3014     if (!is_static) pop_and_check_object(obj);
3015     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg);
3016     if (rc == may_rewrite) {
3017       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
3018     }
3019     __ b(Done);
3020   }
3021 
3022   __ bind(notByte);
3023   __ cmp(tos_state, (u1)ztos);
3024   __ br(Assembler::NE, notBool);
3025 
3026   // ztos
3027   {
3028     __ pop(ztos);
3029     if (!is_static) pop_and_check_object(obj);
3030     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg);
3031     if (rc == may_rewrite) {
3032       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
3033     }
3034     __ b(Done);
3035   }
3036 
3037   __ bind(notBool);
3038   __ cmp(tos_state, (u1)atos);
3039   __ br(Assembler::NE, notObj);
3040 
3041   // atos
3042   {
3043      if (!EnableValhalla) {
3044       __ pop(atos);
3045       if (!is_static) pop_and_check_object(obj);
3046       // Store into the field
3047       do_oop_store(_masm, field, r0, IN_HEAP);
3048       if (rc == may_rewrite) {
3049         patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
3050       }
3051       __ b(Done);
3052      } else { // Valhalla
3053       __ pop(atos);
3054       if (is_static) {
3055         Label is_inline_type;
3056          __ test_field_is_not_null_free_inline_type(flags, noreg /* temp */, is_inline_type);
3057          __ null_check(r0);
3058          __ bind(is_inline_type);
3059          do_oop_store(_masm, field, r0, IN_HEAP);
3060          __ b(Done);
3061       } else {
3062         Label is_inline_type, is_flat, has_null_marker, rewrite_not_inline, rewrite_inline;
3063         __ test_field_is_null_free_inline_type(flags, noreg /*temp*/, is_inline_type);
3064         __ test_field_has_null_marker(flags, noreg /*temp*/, has_null_marker);
3065         // Not an inline type
3066         pop_and_check_object(obj);
3067         // Store into the field
3068         do_oop_store(_masm, field, r0, IN_HEAP);
3069         __ bind(rewrite_not_inline);
3070         if (rc == may_rewrite) {
3071           patch_bytecode(Bytecodes::_fast_aputfield, bc, r19, true, byte_no);
3072         }
3073         __ b(Done);
3074         // Implementation of the inline type semantic
3075         __ bind(is_inline_type);
3076         __ null_check(r0);
3077         __ test_field_is_flat(flags, noreg /*temp*/, is_flat);
3078         // field is not flat
3079         pop_and_check_object(obj);
3080         // Store into the field
3081         do_oop_store(_masm, field, r0, IN_HEAP);
3082         __ b(rewrite_inline);
3083         __ bind(is_flat);
3084         __ load_field_entry(cache, index); // reload field entry (cache) because it was erased by tos_state
3085         __ load_unsigned_short(index, Address(cache, in_bytes(ResolvedFieldEntry::field_index_offset())));
3086         __ ldr(r2, Address(cache, in_bytes(ResolvedFieldEntry::field_holder_offset())));
3087         __ inline_layout_info(r2, index, r6);
3088         pop_and_check_object(obj);
3089         __ load_klass(inline_klass, r0);
3090         __ payload_address(r0, r0, inline_klass);
3091         __ add(obj, obj, off);
3092         // because we use InlineLayoutInfo, we need special value access code specialized for fields (arrays will need a different API)
3093         __ flat_field_copy(IN_HEAP, r0, obj, r6);
3094         __ b(rewrite_inline);
3095         __ bind(has_null_marker);
3096         assert_different_registers(r0, cache, r19);
3097         pop_and_check_object(r19);
3098         __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_nullable_flat_field), r19, r0, cache);
3099         __ bind(rewrite_inline);
3100         if (rc == may_rewrite) {
3101           patch_bytecode(Bytecodes::_fast_vputfield, bc, r19, true, byte_no);
3102         }
3103         __ b(Done);
3104       }
3105      }  // Valhalla
3106   }
3107 
3108   __ bind(notObj);
3109   __ cmp(tos_state, (u1)itos);
3110   __ br(Assembler::NE, notInt);
3111 
3112   // itos
3113   {
3114     __ pop(itos);
3115     if (!is_static) pop_and_check_object(obj);
3116     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg);
3117     if (rc == may_rewrite) {
3118       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
3119     }
3120     __ b(Done);
3121   }
3122 
3123   __ bind(notInt);
3124   __ cmp(tos_state, (u1)ctos);
3125   __ br(Assembler::NE, notChar);
3126 
3127   // ctos
3128   {
3129     __ pop(ctos);
3130     if (!is_static) pop_and_check_object(obj);
3131     __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg);
3132     if (rc == may_rewrite) {
3133       patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
3134     }
3135     __ b(Done);
3136   }
3137 
3138   __ bind(notChar);
3139   __ cmp(tos_state, (u1)stos);
3140   __ br(Assembler::NE, notShort);
3141 
3142   // stos
3143   {
3144     __ pop(stos);
3145     if (!is_static) pop_and_check_object(obj);
3146     __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg);
3147     if (rc == may_rewrite) {
3148       patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
3149     }
3150     __ b(Done);
3151   }
3152 
3153   __ bind(notShort);
3154   __ cmp(tos_state, (u1)ltos);
3155   __ br(Assembler::NE, notLong);
3156 
3157   // ltos
3158   {
3159     __ pop(ltos);
3160     if (!is_static) pop_and_check_object(obj);
3161     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
3162     if (rc == may_rewrite) {
3163       patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
3164     }
3165     __ b(Done);
3166   }
3167 
3168   __ bind(notLong);
3169   __ cmp(tos_state, (u1)ftos);
3170   __ br(Assembler::NE, notFloat);
3171 
3172   // ftos
3173   {
3174     __ pop(ftos);
3175     if (!is_static) pop_and_check_object(obj);
3176     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3177     if (rc == may_rewrite) {
3178       patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
3179     }
3180     __ b(Done);
3181   }
3182 
3183   __ bind(notFloat);
3184 #ifdef ASSERT
3185   __ cmp(tos_state, (u1)dtos);
3186   __ br(Assembler::NE, notDouble);
3187 #endif
3188 
3189   // dtos
3190   {
3191     __ pop(dtos);
3192     if (!is_static) pop_and_check_object(obj);
3193     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg);
3194     if (rc == may_rewrite) {
3195       patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
3196     }
3197   }
3198 
3199 #ifdef ASSERT
3200   __ b(Done);
3201 
3202   __ bind(notDouble);
3203   __ stop("Bad state");
3204 #endif
3205 
3206   __ bind(Done);
3207 
3208   {
3209     Label notVolatile;
3210     __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3211     __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
3212     __ bind(notVolatile);
3213   }
3214 }
3215 
3216 void TemplateTable::putfield(int byte_no)
3217 {
3218   putfield_or_static(byte_no, false);
3219 }
3220 
3221 void TemplateTable::nofast_putfield(int byte_no) {
3222   putfield_or_static(byte_no, false, may_not_rewrite);
3223 }
3224 
3225 void TemplateTable::putstatic(int byte_no) {
3226   putfield_or_static(byte_no, true);
3227 }
3228 
3229 void TemplateTable::jvmti_post_fast_field_mod() {
3230   if (JvmtiExport::can_post_field_modification()) {
3231     // Check to see if a field modification watch has been set before
3232     // we take the time to call into the VM.
3233     Label L2;
3234     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3235     __ ldrw(c_rarg3, Address(rscratch1));
3236     __ cbzw(c_rarg3, L2);
3237     __ pop_ptr(r19);                  // copy the object pointer from tos
3238     __ verify_oop(r19);
3239     __ push_ptr(r19);                 // put the object pointer back on tos
3240     // Save tos values before call_VM() clobbers them. Since we have
3241     // to do it for every data type, we use the saved values as the
3242     // jvalue object.
3243     switch (bytecode()) {          // load values into the jvalue object
3244     case Bytecodes::_fast_vputfield: //fall through
3245     case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
3246     case Bytecodes::_fast_bputfield: // fall through
3247     case Bytecodes::_fast_zputfield: // fall through
3248     case Bytecodes::_fast_sputfield: // fall through
3249     case Bytecodes::_fast_cputfield: // fall through
3250     case Bytecodes::_fast_iputfield: __ push_i(r0); break;
3251     case Bytecodes::_fast_dputfield: __ push_d(); break;
3252     case Bytecodes::_fast_fputfield: __ push_f(); break;
3253     case Bytecodes::_fast_lputfield: __ push_l(r0); break;
3254 
3255     default:
3256       ShouldNotReachHere();
3257     }
3258     __ mov(c_rarg3, esp);             // points to jvalue on the stack
3259     // access constant pool cache entry
3260     __ load_field_entry(c_rarg2, r0);
3261     __ verify_oop(r19);
3262     // r19: object pointer copied above
3263     // c_rarg2: cache entry pointer
3264     // c_rarg3: jvalue object on the stack
3265     __ call_VM(noreg,
3266                CAST_FROM_FN_PTR(address,
3267                                 InterpreterRuntime::post_field_modification),
3268                r19, c_rarg2, c_rarg3);
3269 
3270     switch (bytecode()) {             // restore tos values
3271     case Bytecodes::_fast_vputfield: //fall through
3272     case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
3273     case Bytecodes::_fast_bputfield: // fall through
3274     case Bytecodes::_fast_zputfield: // fall through
3275     case Bytecodes::_fast_sputfield: // fall through
3276     case Bytecodes::_fast_cputfield: // fall through
3277     case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
3278     case Bytecodes::_fast_dputfield: __ pop_d(); break;
3279     case Bytecodes::_fast_fputfield: __ pop_f(); break;
3280     case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
3281     default: break;
3282     }
3283     __ bind(L2);
3284   }
3285 }
3286 
3287 void TemplateTable::fast_storefield(TosState state)
3288 {
3289   transition(state, vtos);
3290 
3291   ByteSize base = ConstantPoolCache::base_offset();
3292 
3293   jvmti_post_fast_field_mod();
3294 
3295   // access constant pool cache
3296   __ load_field_entry(r2, r1);
3297 
3298   // R1: field offset, R2: field holder, R3: flags
3299   load_resolved_field_entry(r2, r2, noreg, r1, r3);
3300 
3301   {
3302     Label notVolatile;
3303     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3304     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
3305     __ bind(notVolatile);
3306   }
3307 
3308   Label notVolatile;
3309 
3310   // Get object from stack
3311   pop_and_check_object(r2);
3312 
3313   // field address
3314   const Address field(r2, r1);
3315 
3316   // access field
3317   switch (bytecode()) {
3318   case Bytecodes::_fast_vputfield:
3319    {
3320       Label is_flat, has_null_marker, done;
3321       __ test_field_has_null_marker(r3, noreg /* temp */, has_null_marker);
3322       __ null_check(r0);
3323       __ test_field_is_flat(r3, noreg /* temp */, is_flat);
3324       // field is not flat
3325       do_oop_store(_masm, field, r0, IN_HEAP);
3326       __ b(done);
3327       __ bind(is_flat);
3328       // field is flat
3329       __ load_field_entry(r4, r3);
3330       __ load_unsigned_short(r3, Address(r4, in_bytes(ResolvedFieldEntry::field_index_offset())));
3331       __ ldr(r4, Address(r4, in_bytes(ResolvedFieldEntry::field_holder_offset())));
3332       __ inline_layout_info(r4, r3, r5);
3333       __ load_klass(r4, r0);
3334       __ payload_address(r0, r0, r4);
3335       __ lea(rscratch1, field);
3336       __ flat_field_copy(IN_HEAP, r0, rscratch1, r5);
3337       __ b(done);
3338       __ bind(has_null_marker);
3339       __ load_field_entry(r4, r1);
3340       __ mov(r1, r2);
3341       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_nullable_flat_field), r1, r0, r4);
3342       __ bind(done);
3343     }
3344     break;
3345   case Bytecodes::_fast_aputfield:
3346     do_oop_store(_masm, field, r0, IN_HEAP);
3347     break;
3348   case Bytecodes::_fast_lputfield:
3349     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
3350     break;
3351   case Bytecodes::_fast_iputfield:
3352     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg);
3353     break;
3354   case Bytecodes::_fast_zputfield:
3355     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg);
3356     break;
3357   case Bytecodes::_fast_bputfield:
3358     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg);
3359     break;
3360   case Bytecodes::_fast_sputfield:
3361     __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg);
3362     break;
3363   case Bytecodes::_fast_cputfield:
3364     __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg);
3365     break;
3366   case Bytecodes::_fast_fputfield:
3367     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3368     break;
3369   case Bytecodes::_fast_dputfield:
3370     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg);
3371     break;
3372   default:
3373     ShouldNotReachHere();
3374   }
3375 
3376   {
3377     Label notVolatile;
3378     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3379     __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
3380     __ bind(notVolatile);
3381   }
3382 }
3383 
3384 
3385 void TemplateTable::fast_accessfield(TosState state)
3386 {
3387   transition(atos, state);
3388   // Do the JVMTI work here to avoid disturbing the register state below
3389   if (JvmtiExport::can_post_field_access()) {
3390     // Check to see if a field access watch has been set before we
3391     // take the time to call into the VM.
3392     Label L1;
3393     __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3394     __ ldrw(r2, Address(rscratch1));
3395     __ cbzw(r2, L1);
3396     // access constant pool cache entry
3397     __ load_field_entry(c_rarg2, rscratch2);
3398     __ verify_oop(r0);
3399     __ push_ptr(r0);  // save object pointer before call_VM() clobbers it
3400     __ mov(c_rarg1, r0);
3401     // c_rarg1: object pointer copied above
3402     // c_rarg2: cache entry pointer
3403     __ call_VM(noreg,
3404                CAST_FROM_FN_PTR(address,
3405                                 InterpreterRuntime::post_field_access),
3406                c_rarg1, c_rarg2);
3407     __ pop_ptr(r0); // restore object pointer
3408     __ bind(L1);
3409   }
3410 
3411   // access constant pool cache
3412   __ load_field_entry(r2, r1);
3413 
3414   __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3415   __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3416 
3417   // r0: object
3418   __ verify_oop(r0);
3419   __ null_check(r0);
3420   const Address field(r0, r1);
3421 
3422   // 8179954: We need to make sure that the code generated for
3423   // volatile accesses forms a sequentially-consistent set of
3424   // operations when combined with STLR and LDAR.  Without a leading
3425   // membar it's possible for a simple Dekker test to fail if loads
3426   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3427   // the stores in one method and we interpret the loads in another.
3428   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3429     Label notVolatile;
3430     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3431     __ membar(MacroAssembler::AnyAny);
3432     __ bind(notVolatile);
3433   }
3434 
3435   // access field
3436   switch (bytecode()) {
3437   case Bytecodes::_fast_vgetfield:
3438     {
3439       Register index = r4, klass = r5, inline_klass = r6, tmp = r7;
3440       Label is_flat, has_null_marker, nonnull, Done;
3441       __ test_field_has_null_marker(r3, noreg /*temp*/, has_null_marker);
3442       __ test_field_is_flat(r3, noreg /* temp */, is_flat);
3443         // field is not flat
3444         __ load_heap_oop(r0, field, rscratch1, rscratch2);
3445         __ cbnz(r0, nonnull);
3446           __ b(ExternalAddress(Interpreter::_throw_NPE_UninitializedField_entry));
3447         __ bind(nonnull);
3448         __ verify_oop(r0);
3449         __ b(Done);
3450       __ bind(is_flat);
3451       // field is flat
3452         __ load_unsigned_short(index, Address(r2, in_bytes(ResolvedFieldEntry::field_index_offset())));
3453         __ read_flat_field(r2, index, r1, tmp /* temp */, r0);
3454         __ verify_oop(r0);
3455         __ b(Done);
3456       __ bind(has_null_marker);
3457         call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_nullable_flat_field), r0, r2);
3458         __ verify_oop(r0);
3459       __ bind(Done);
3460     }
3461     break;
3462   case Bytecodes::_fast_agetfield:
3463     do_oop_load(_masm, field, r0, IN_HEAP);
3464     __ verify_oop(r0);
3465     break;
3466   case Bytecodes::_fast_lgetfield:
3467     __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3468     break;
3469   case Bytecodes::_fast_igetfield:
3470     __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3471     break;
3472   case Bytecodes::_fast_bgetfield:
3473     __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3474     break;
3475   case Bytecodes::_fast_sgetfield:
3476     __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3477     break;
3478   case Bytecodes::_fast_cgetfield:
3479     __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3480     break;
3481   case Bytecodes::_fast_fgetfield:
3482     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3483     break;
3484   case Bytecodes::_fast_dgetfield:
3485     __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3486     break;
3487   default:
3488     ShouldNotReachHere();
3489   }
3490   {
3491     Label notVolatile;
3492     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3493     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3494     __ bind(notVolatile);
3495   }
3496 }
3497 
3498 void TemplateTable::fast_xaccess(TosState state)
3499 {
3500   transition(vtos, state);
3501 
3502   // get receiver
3503   __ ldr(r0, aaddress(0));
3504   // access constant pool cache
3505   __ load_field_entry(r2, r3, 2);
3506   __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
3507 
3508   // 8179954: We need to make sure that the code generated for
3509   // volatile accesses forms a sequentially-consistent set of
3510   // operations when combined with STLR and LDAR.  Without a leading
3511   // membar it's possible for a simple Dekker test to fail if loads
3512   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3513   // the stores in one method and we interpret the loads in another.
3514   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3515     Label notVolatile;
3516     __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3517     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3518     __ membar(MacroAssembler::AnyAny);
3519     __ bind(notVolatile);
3520   }
3521 
3522   // make sure exception is reported in correct bcp range (getfield is
3523   // next instruction)
3524   __ increment(rbcp);
3525   __ null_check(r0);
3526   switch (state) {
3527   case itos:
3528     __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3529     break;
3530   case atos:
3531     do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
3532     __ verify_oop(r0);
3533     break;
3534   case ftos:
3535     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3536     break;
3537   default:
3538     ShouldNotReachHere();
3539   }
3540 
3541   {
3542     Label notVolatile;
3543     __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
3544     __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile);
3545     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3546     __ bind(notVolatile);
3547   }
3548 
3549   __ decrement(rbcp);
3550 }
3551 
3552 
3553 
3554 //-----------------------------------------------------------------------------
3555 // Calls
3556 
3557 void TemplateTable::prepare_invoke(Register cache, Register recv) {
3558 
3559   Bytecodes::Code code = bytecode();
3560   const bool load_receiver       = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
3561 
3562   // save 'interpreter return address'
3563   __ save_bcp();
3564 
3565   // Load TOS state for later
3566   __ load_unsigned_byte(rscratch2, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())));
3567 
3568   // load receiver if needed (note: no return address pushed yet)
3569   if (load_receiver) {
3570     __ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
3571     __ add(rscratch1, esp, recv, ext::uxtx, 3);
3572     __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1)));
3573     __ verify_oop(recv);
3574   }
3575 
3576   // load return address
3577   {
3578     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3579     __ mov(rscratch1, table_addr);
3580     __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3)));
3581   }
3582 }
3583 
3584 
3585 void TemplateTable::invokevirtual_helper(Register index,
3586                                          Register recv,
3587                                          Register flags)
3588 {
3589   // Uses temporary registers r0, r3
3590   assert_different_registers(index, recv, r0, r3);
3591   // Test for an invoke of a final method
3592   Label notFinal;
3593   __ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, notFinal);
3594 
3595   const Register method = index;  // method must be rmethod
3596   assert(method == rmethod,
3597          "Method must be rmethod for interpreter calling convention");
3598 
3599   // do the call - the index is actually the method to call
3600   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3601 
3602   // It's final, need a null check here!
3603   __ null_check(recv);
3604 
3605   // profile this call
3606   __ profile_final_call(r0);
3607   __ profile_arguments_type(r0, method, r4, true);
3608 
3609   __ jump_from_interpreted(method, r0);
3610 
3611   __ bind(notFinal);
3612 
3613   // get receiver klass
3614   __ load_klass(r0, recv);
3615 
3616   // profile this call
3617   __ profile_virtual_call(r0, rlocals, r3);
3618 
3619   // get target Method & entry point
3620   __ lookup_virtual_method(r0, index, method);
3621   __ profile_arguments_type(r3, method, r4, true);
3622   // FIXME -- this looks completely redundant. is it?
3623   // __ ldr(r3, Address(method, Method::interpreter_entry_offset()));
3624   __ jump_from_interpreted(method, r3);
3625 }
3626 
3627 void TemplateTable::invokevirtual(int byte_no)
3628 {
3629   transition(vtos, vtos);
3630   assert(byte_no == f2_byte, "use this argument");
3631 
3632   load_resolved_method_entry_virtual(r2,      // ResolvedMethodEntry*
3633                                      rmethod, // Method* or itable index
3634                                      r3);     // flags
3635   prepare_invoke(r2, r2); // recv
3636 
3637   // rmethod: index (actually a Method*)
3638   // r2: receiver
3639   // r3: flags
3640 
3641   invokevirtual_helper(rmethod, r2, r3);
3642 }
3643 
3644 void TemplateTable::invokespecial(int byte_no)
3645 {
3646   transition(vtos, vtos);
3647   assert(byte_no == f1_byte, "use this argument");
3648 
3649   load_resolved_method_entry_special_or_static(r2,      // ResolvedMethodEntry*
3650                                                rmethod, // Method*
3651                                                r3);     // flags
3652   prepare_invoke(r2, r2);  // get receiver also for null check
3653   __ verify_oop(r2);
3654   __ null_check(r2);
3655   // do the call
3656   __ profile_call(r0);
3657   __ profile_arguments_type(r0, rmethod, rbcp, false);
3658   __ jump_from_interpreted(rmethod, r0);
3659 }
3660 
3661 void TemplateTable::invokestatic(int byte_no)
3662 {
3663   transition(vtos, vtos);
3664   assert(byte_no == f1_byte, "use this argument");
3665 
3666   load_resolved_method_entry_special_or_static(r2,      // ResolvedMethodEntry*
3667                                                rmethod, // Method*
3668                                                r3);     // flags
3669   prepare_invoke(r2, r2);  // get receiver also for null check
3670 
3671   // do the call
3672   __ profile_call(r0);
3673   __ profile_arguments_type(r0, rmethod, r4, false);
3674   __ jump_from_interpreted(rmethod, r0);
3675 }
3676 
3677 void TemplateTable::fast_invokevfinal(int byte_no)
3678 {
3679   __ call_Unimplemented();
3680 }
3681 
3682 void TemplateTable::invokeinterface(int byte_no) {
3683   transition(vtos, vtos);
3684   assert(byte_no == f1_byte, "use this argument");
3685 
3686   load_resolved_method_entry_interface(r2,      // ResolvedMethodEntry*
3687                                        r0,      // Klass*
3688                                        rmethod, // Method* or itable/vtable index
3689                                        r3);     // flags
3690   prepare_invoke(r2, r2); // receiver
3691 
3692   // r0: interface klass (from f1)
3693   // rmethod: method (from f2)
3694   // r2: receiver
3695   // r3: flags
3696 
3697   // First check for Object case, then private interface method,
3698   // then regular interface method.
3699 
3700   // Special case of invokeinterface called for virtual method of
3701   // java.lang.Object.  See cpCache.cpp for details.
3702   Label notObjectMethod;
3703   __ tbz(r3, ResolvedMethodEntry::is_forced_virtual_shift, notObjectMethod);
3704 
3705   invokevirtual_helper(rmethod, r2, r3);
3706   __ bind(notObjectMethod);
3707 
3708   Label no_such_interface;
3709 
3710   // Check for private method invocation - indicated by vfinal
3711   Label notVFinal;
3712   __ tbz(r3, ResolvedMethodEntry::is_vfinal_shift, notVFinal);
3713 
3714   // Get receiver klass into r3
3715   __ load_klass(r3, r2);
3716 
3717   Label subtype;
3718   __ check_klass_subtype(r3, r0, r4, subtype);
3719   // If we get here the typecheck failed
3720   __ b(no_such_interface);
3721   __ bind(subtype);
3722 
3723   __ profile_final_call(r0);
3724   __ profile_arguments_type(r0, rmethod, r4, true);
3725   __ jump_from_interpreted(rmethod, r0);
3726 
3727   __ bind(notVFinal);
3728 
3729   // Get receiver klass into r3
3730   __ restore_locals();
3731   __ load_klass(r3, r2);
3732 
3733   Label no_such_method;
3734 
3735   // Preserve method for throw_AbstractMethodErrorVerbose.
3736   __ mov(r16, rmethod);
3737   // Receiver subtype check against REFC.
3738   // Superklass in r0. Subklass in r3. Blows rscratch2, r13
3739   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3740                              r3, r0, noreg,
3741                              // outputs: scan temp. reg, scan temp. reg
3742                              rscratch2, r13,
3743                              no_such_interface,
3744                              /*return_method=*/false);
3745 
3746   // profile this call
3747   __ profile_virtual_call(r3, r13, r19);
3748 
3749   // Get declaring interface class from method, and itable index
3750 
3751   __ load_method_holder(r0, rmethod);
3752   __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset()));
3753   __ subw(rmethod, rmethod, Method::itable_index_max);
3754   __ negw(rmethod, rmethod);
3755 
3756   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3757   __ mov(rlocals, r3);
3758   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3759                              rlocals, r0, rmethod,
3760                              // outputs: method, scan temp. reg
3761                              rmethod, r13,
3762                              no_such_interface);
3763 
3764   // rmethod,: Method to call
3765   // r2: receiver
3766   // Check for abstract method error
3767   // Note: This should be done more efficiently via a throw_abstract_method_error
3768   //       interpreter entry point and a conditional jump to it in case of a null
3769   //       method.
3770   __ cbz(rmethod, no_such_method);
3771 
3772   __ profile_arguments_type(r3, rmethod, r13, true);
3773 
3774   // do the call
3775   // r2: receiver
3776   // rmethod,: Method
3777   __ jump_from_interpreted(rmethod, r3);
3778   __ should_not_reach_here();
3779 
3780   // exception handling code follows...
3781   // note: must restore interpreter registers to canonical
3782   //       state for exception handling to work correctly!
3783 
3784   __ bind(no_such_method);
3785   // throw exception
3786   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
3787   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3788   // Pass arguments for generating a verbose error message.
3789   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16);
3790   // the call_VM checks for exception, so we should never return here.
3791   __ should_not_reach_here();
3792 
3793   __ bind(no_such_interface);
3794   // throw exception
3795   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
3796   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3797   // Pass arguments for generating a verbose error message.
3798   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3799                    InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0);
3800   // the call_VM checks for exception, so we should never return here.
3801   __ should_not_reach_here();
3802   return;
3803 }
3804 
3805 void TemplateTable::invokehandle(int byte_no) {
3806   transition(vtos, vtos);
3807   assert(byte_no == f1_byte, "use this argument");
3808 
3809   load_resolved_method_entry_handle(r2,      // ResolvedMethodEntry*
3810                                     rmethod, // Method*
3811                                     r0,      // Resolved reference
3812                                     r3);     // flags
3813   prepare_invoke(r2, r2);
3814 
3815   __ verify_method_ptr(r2);
3816   __ verify_oop(r2);
3817   __ null_check(r2);
3818 
3819   // FIXME: profile the LambdaForm also
3820 
3821   // r13 is safe to use here as a scratch reg because it is about to
3822   // be clobbered by jump_from_interpreted().
3823   __ profile_final_call(r13);
3824   __ profile_arguments_type(r13, rmethod, r4, true);
3825 
3826   __ jump_from_interpreted(rmethod, r0);
3827 }
3828 
3829 void TemplateTable::invokedynamic(int byte_no) {
3830   transition(vtos, vtos);
3831   assert(byte_no == f1_byte, "use this argument");
3832 
3833   load_invokedynamic_entry(rmethod);
3834 
3835   // r0: CallSite object (from cpool->resolved_references[])
3836   // rmethod: MH.linkToCallSite method
3837 
3838   // Note:  r0_callsite is already pushed
3839 
3840   // %%% should make a type profile for any invokedynamic that takes a ref argument
3841   // profile this call
3842   __ profile_call(rbcp);
3843   __ profile_arguments_type(r3, rmethod, r13, false);
3844 
3845   __ verify_oop(r0);
3846 
3847   __ jump_from_interpreted(rmethod, r0);
3848 }
3849 
3850 
3851 //-----------------------------------------------------------------------------
3852 // Allocation
3853 
3854 void TemplateTable::_new() {
3855   transition(vtos, atos);
3856 
3857   __ get_unsigned_2_byte_index_at_bcp(r3, 1);
3858   Label slow_case;
3859   Label done;
3860   Label initialize_header;
3861 
3862   __ get_cpool_and_tags(r4, r0);
3863   // Make sure the class we're about to instantiate has been resolved.
3864   // This is done before loading InstanceKlass to be consistent with the order
3865   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3866   const int tags_offset = Array<u1>::base_offset_in_bytes();
3867   __ lea(rscratch1, Address(r0, r3, Address::lsl(0)));
3868   __ lea(rscratch1, Address(rscratch1, tags_offset));
3869   __ ldarb(rscratch1, rscratch1);
3870   __ cmp(rscratch1, (u1)JVM_CONSTANT_Class);
3871   __ br(Assembler::NE, slow_case);
3872 
3873   // get InstanceKlass
3874   __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
3875 
3876   // make sure klass is initialized
3877   assert(VM_Version::supports_fast_class_init_checks(), "Optimization requires support for fast class initialization checks");
3878   __ clinit_barrier(r4, rscratch1, nullptr /*L_fast_path*/, &slow_case);
3879 
3880   __ allocate_instance(r4, r0, r3, r1, true, slow_case);
3881     if (DTraceAllocProbes) {
3882       // Trigger dtrace event for fastpath
3883       __ push(atos); // save the return value
3884       __ call_VM_leaf(
3885            CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), r0);
3886       __ pop(atos); // restore the return value
3887 
3888     }
3889   __ b(done);
3890 
3891   // slow case
3892   __ bind(slow_case);
3893   __ get_constant_pool(c_rarg1);
3894   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3895   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3896   __ verify_oop(r0);
3897 
3898   // continue
3899   __ bind(done);
3900   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3901   __ membar(Assembler::StoreStore);
3902 }
3903 
3904 void TemplateTable::newarray() {
3905   transition(itos, atos);
3906   __ load_unsigned_byte(c_rarg1, at_bcp(1));
3907   __ mov(c_rarg2, r0);
3908   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3909           c_rarg1, c_rarg2);
3910   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3911   __ membar(Assembler::StoreStore);
3912 }
3913 
3914 void TemplateTable::anewarray() {
3915   transition(itos, atos);
3916   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3917   __ get_constant_pool(c_rarg1);
3918   __ mov(c_rarg3, r0);
3919   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3920           c_rarg1, c_rarg2, c_rarg3);
3921   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3922   __ membar(Assembler::StoreStore);
3923 }
3924 
3925 void TemplateTable::arraylength() {
3926   transition(atos, itos);
3927   __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes()));
3928 }
3929 
3930 void TemplateTable::checkcast()
3931 {
3932   transition(atos, atos);
3933   Label done, is_null, ok_is_subtype, quicked, resolved;
3934   __ cbz(r0, is_null);
3935 
3936   // Get cpool & tags index
3937   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3938   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3939   // See if bytecode has already been quicked
3940   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3941   __ lea(r1, Address(rscratch1, r19));
3942   __ ldarb(r1, r1);
3943   __ cmp(r1, (u1)JVM_CONSTANT_Class);
3944   __ br(Assembler::EQ, quicked);
3945 
3946   __ push(atos); // save receiver for result, and for GC
3947   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3948   // vm_result_2 has metadata result
3949   __ get_vm_result_2(r0, rthread);
3950   __ pop(r3); // restore receiver
3951   __ b(resolved);
3952 
3953   // Get superklass in r0 and subklass in r3
3954   __ bind(quicked);
3955   __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3956   __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3957 
3958   __ bind(resolved);
3959   __ load_klass(r19, r3);
3960 
3961   // Generate subtype check.  Blows r2, r5.  Object in r3.
3962   // Superklass in r0.  Subklass in r19.
3963   __ gen_subtype_check(r19, ok_is_subtype);
3964 
3965   // Come here on failure
3966   __ push(r3);
3967   // object is at TOS
3968   __ b(Interpreter::_throw_ClassCastException_entry);
3969 
3970   // Come here on success
3971   __ bind(ok_is_subtype);
3972   __ mov(r0, r3); // Restore object in r3
3973 
3974   __ b(done);
3975   __ bind(is_null);
3976 
3977   // Collect counts on whether this test sees nulls a lot or not.
3978   if (ProfileInterpreter) {
3979     __ profile_null_seen(r2);
3980   }
3981 
3982   __ bind(done);
3983 }
3984 
3985 void TemplateTable::instanceof() {
3986   transition(atos, itos);
3987   Label done, is_null, ok_is_subtype, quicked, resolved;
3988   __ cbz(r0, is_null);
3989 
3990   // Get cpool & tags index
3991   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3992   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3993   // See if bytecode has already been quicked
3994   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3995   __ lea(r1, Address(rscratch1, r19));
3996   __ ldarb(r1, r1);
3997   __ cmp(r1, (u1)JVM_CONSTANT_Class);
3998   __ br(Assembler::EQ, quicked);
3999 
4000   __ push(atos); // save receiver for result, and for GC
4001   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4002   // vm_result_2 has metadata result
4003   __ get_vm_result_2(r0, rthread);
4004   __ pop(r3); // restore receiver
4005   __ verify_oop(r3);
4006   __ load_klass(r3, r3);
4007   __ b(resolved);
4008 
4009   // Get superklass in r0 and subklass in r3
4010   __ bind(quicked);
4011   __ load_klass(r3, r0);
4012   __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
4013 
4014   __ bind(resolved);
4015 
4016   // Generate subtype check.  Blows r2, r5
4017   // Superklass in r0.  Subklass in r3.
4018   __ gen_subtype_check(r3, ok_is_subtype);
4019 
4020   // Come here on failure
4021   __ mov(r0, 0);
4022   __ b(done);
4023   // Come here on success
4024   __ bind(ok_is_subtype);
4025   __ mov(r0, 1);
4026 
4027   // Collect counts on whether this test sees nulls a lot or not.
4028   if (ProfileInterpreter) {
4029     __ b(done);
4030     __ bind(is_null);
4031     __ profile_null_seen(r2);
4032   } else {
4033     __ bind(is_null);   // same as 'done'
4034   }
4035   __ bind(done);
4036   // r0 = 0: obj == nullptr or  obj is not an instanceof the specified klass
4037   // r0 = 1: obj != nullptr and obj is     an instanceof the specified klass
4038 }
4039 
4040 //-----------------------------------------------------------------------------
4041 // Breakpoints
4042 void TemplateTable::_breakpoint() {
4043   // Note: We get here even if we are single stepping..
4044   // jbug inists on setting breakpoints at every bytecode
4045   // even if we are in single step mode.
4046 
4047   transition(vtos, vtos);
4048 
4049   // get the unpatched byte code
4050   __ get_method(c_rarg1);
4051   __ call_VM(noreg,
4052              CAST_FROM_FN_PTR(address,
4053                               InterpreterRuntime::get_original_bytecode_at),
4054              c_rarg1, rbcp);
4055   __ mov(r19, r0);
4056 
4057   // post the breakpoint event
4058   __ call_VM(noreg,
4059              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4060              rmethod, rbcp);
4061 
4062   // complete the execution of original bytecode
4063   __ mov(rscratch1, r19);
4064   __ dispatch_only_normal(vtos);
4065 }
4066 
4067 //-----------------------------------------------------------------------------
4068 // Exceptions
4069 
4070 void TemplateTable::athrow() {
4071   transition(atos, vtos);
4072   __ null_check(r0);
4073   __ b(Interpreter::throw_exception_entry());
4074 }
4075 
4076 //-----------------------------------------------------------------------------
4077 // Synchronization
4078 //
4079 // Note: monitorenter & exit are symmetric routines; which is reflected
4080 //       in the assembly code structure as well
4081 //
4082 // Stack layout:
4083 //
4084 // [expressions  ] <--- esp               = expression stack top
4085 // ..
4086 // [expressions  ]
4087 // [monitor entry] <--- monitor block top = expression stack bot
4088 // ..
4089 // [monitor entry]
4090 // [frame data   ] <--- monitor block bot
4091 // ...
4092 // [saved rfp    ] <--- rfp
4093 void TemplateTable::monitorenter()
4094 {
4095   transition(atos, vtos);
4096 
4097   // check for null object
4098   __ null_check(r0);
4099 
4100   Label is_inline_type;
4101   __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
4102   __ test_markword_is_inline_type(rscratch1, is_inline_type);
4103 
4104   const Address monitor_block_top(
4105         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4106   const Address monitor_block_bot(
4107         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
4108   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
4109 
4110   Label allocated;
4111 
4112   // initialize entry pointer
4113   __ mov(c_rarg1, zr); // points to free slot or null
4114 
4115   // find a free slot in the monitor block (result in c_rarg1)
4116   {
4117     Label entry, loop, exit;
4118     __ ldr(c_rarg3, monitor_block_top); // derelativize pointer
4119     __ lea(c_rarg3, Address(rfp, c_rarg3, Address::lsl(Interpreter::logStackElementSize)));
4120     // c_rarg3 points to current entry, starting with top-most entry
4121 
4122     __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
4123 
4124     __ b(entry);
4125 
4126     __ bind(loop);
4127     // check if current entry is used
4128     // if not used then remember entry in c_rarg1
4129     __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset()));
4130     __ cmp(zr, rscratch1);
4131     __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ);
4132     // check if current entry is for same object
4133     __ cmp(r0, rscratch1);
4134     // if same object then stop searching
4135     __ br(Assembler::EQ, exit);
4136     // otherwise advance to next entry
4137     __ add(c_rarg3, c_rarg3, entry_size);
4138     __ bind(entry);
4139     // check if bottom reached
4140     __ cmp(c_rarg3, c_rarg2);
4141     // if not at bottom then check this entry
4142     __ br(Assembler::NE, loop);
4143     __ bind(exit);
4144   }
4145 
4146   __ cbnz(c_rarg1, allocated); // check if a slot has been found and
4147                             // if found, continue with that on
4148 
4149   // allocate one if there's no free slot
4150   {
4151     Label entry, loop;
4152     // 1. compute new pointers            // rsp: old expression stack top
4153 
4154     __ check_extended_sp();
4155     __ sub(sp, sp, entry_size);           // make room for the monitor
4156     __ sub(rscratch1, sp, rfp);
4157     __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
4158     __ str(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
4159 
4160     __ ldr(c_rarg1, monitor_block_bot);   // derelativize pointer
4161     __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize)));
4162     // c_rarg1 points to the old expression stack bottom
4163 
4164     __ sub(esp, esp, entry_size);         // move expression stack top
4165     __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom
4166     __ mov(c_rarg3, esp);                 // set start value for copy loop
4167     __ sub(rscratch1, c_rarg1, rfp);      // relativize pointer
4168     __ asr(rscratch1, rscratch1, Interpreter::logStackElementSize);
4169     __ str(rscratch1, monitor_block_bot);  // set new monitor block bottom
4170 
4171     __ b(entry);
4172     // 2. move expression stack contents
4173     __ bind(loop);
4174     __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
4175                                                    // word from old location
4176     __ str(c_rarg2, Address(c_rarg3, 0));          // and store it at new location
4177     __ add(c_rarg3, c_rarg3, wordSize);            // advance to next word
4178     __ bind(entry);
4179     __ cmp(c_rarg3, c_rarg1);        // check if bottom reached
4180     __ br(Assembler::NE, loop);      // if not at bottom then
4181                                      // copy next word
4182   }
4183 
4184   // call run-time routine
4185   // c_rarg1: points to monitor entry
4186   __ bind(allocated);
4187 
4188   // Increment bcp to point to the next bytecode, so exception
4189   // handling for async. exceptions work correctly.
4190   // The object has already been popped from the stack, so the
4191   // expression stack looks correct.
4192   __ increment(rbcp);
4193 
4194   // store object
4195   __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset()));
4196   __ lock_object(c_rarg1);
4197 
4198   // check to make sure this monitor doesn't cause stack overflow after locking
4199   __ save_bcp();  // in case of exception
4200   __ generate_stack_overflow_check(0);
4201 
4202   // The bcp has already been incremented. Just need to dispatch to
4203   // next instruction.
4204   __ dispatch_next(vtos);
4205 
4206   __ bind(is_inline_type);
4207   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4208                     InterpreterRuntime::throw_identity_exception), r0);
4209   __ should_not_reach_here();
4210 }
4211 
4212 
4213 void TemplateTable::monitorexit()
4214 {
4215   transition(atos, vtos);
4216 
4217   // check for null object
4218   __ null_check(r0);
4219 
4220   const int is_inline_type_mask = markWord::inline_type_pattern;
4221   Label has_identity;
4222   __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
4223   __ mov(rscratch2, is_inline_type_mask);
4224   __ andr(rscratch1, rscratch1, rscratch2);
4225   __ cmp(rscratch1, rscratch2);
4226   __ br(Assembler::NE, has_identity);
4227   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4228                      InterpreterRuntime::throw_illegal_monitor_state_exception));
4229   __ should_not_reach_here();
4230   __ bind(has_identity);
4231 
4232   const Address monitor_block_top(
4233         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4234   const Address monitor_block_bot(
4235         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
4236   const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
4237 
4238   Label found;
4239 
4240   // find matching slot
4241   {
4242     Label entry, loop;
4243     __ ldr(c_rarg1, monitor_block_top); // derelativize pointer
4244     __ lea(c_rarg1, Address(rfp, c_rarg1, Address::lsl(Interpreter::logStackElementSize)));
4245     // c_rarg1 points to current entry, starting with top-most entry
4246 
4247     __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
4248                                         // of monitor block
4249     __ b(entry);
4250 
4251     __ bind(loop);
4252     // check if current entry is for same object
4253     __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset()));
4254     __ cmp(r0, rscratch1);
4255     // if same object then stop searching
4256     __ br(Assembler::EQ, found);
4257     // otherwise advance to next entry
4258     __ add(c_rarg1, c_rarg1, entry_size);
4259     __ bind(entry);
4260     // check if bottom reached
4261     __ cmp(c_rarg1, c_rarg2);
4262     // if not at bottom then check this entry
4263     __ br(Assembler::NE, loop);
4264   }
4265 
4266   // error handling. Unlocking was not block-structured
4267   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4268                    InterpreterRuntime::throw_illegal_monitor_state_exception));
4269   __ should_not_reach_here();
4270 
4271   // call run-time routine
4272   __ bind(found);
4273   __ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
4274   __ unlock_object(c_rarg1);
4275   __ pop_ptr(r0); // discard object
4276 }
4277 
4278 
4279 // Wide instructions
4280 void TemplateTable::wide()
4281 {
4282   __ load_unsigned_byte(r19, at_bcp(1));
4283   __ mov(rscratch1, (address)Interpreter::_wentry_point);
4284   __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3)));
4285   __ br(rscratch1);
4286 }
4287 
4288 
4289 // Multi arrays
4290 void TemplateTable::multianewarray() {
4291   transition(vtos, atos);
4292   __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions
4293   // last dim is on top of stack; we want address of first one:
4294   // first_addr = last_addr + (ndims - 1) * wordSize
4295   __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3)));
4296   __ sub(c_rarg1, c_rarg1, wordSize);
4297   call_VM(r0,
4298           CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
4299           c_rarg1);
4300   __ load_unsigned_byte(r1, at_bcp(3));
4301   __ lea(esp, Address(esp, r1, Address::uxtw(3)));
4302 }