1 /*
   2  * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compilerDefinitions.inline.hpp"
  29 #include "gc/shared/barrierSetAssembler.hpp"
  30 #include "gc/shared/collectedHeap.hpp"
  31 #include "gc/shared/tlab_globals.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "interpreter/interp_masm.hpp"
  35 #include "interpreter/templateTable.hpp"
  36 #include "memory/universe.hpp"
  37 #include "oops/methodData.hpp"
  38 #include "oops/method.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "prims/jvmtiExport.hpp"
  42 #include "prims/methodHandles.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/synchronizer.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 
  49 #define __ _masm->
  50 
  51 // Address computation: local variables
  52 
  53 static inline Address iaddress(int n) {
  54   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  55 }
  56 
  57 static inline Address laddress(int n) {
  58   return iaddress(n + 1);
  59 }
  60 
  61 static inline Address faddress(int n) {
  62   return iaddress(n);
  63 }
  64 
  65 static inline Address daddress(int n) {
  66   return laddress(n);
  67 }
  68 
  69 static inline Address aaddress(int n) {
  70   return iaddress(n);
  71 }
  72 
  73 static inline Address iaddress(Register r) {
  74   return Address(rlocals, r, Address::lsl(3));
  75 }
  76 
  77 static inline Address laddress(Register r, Register scratch,
  78                                InterpreterMacroAssembler* _masm) {
  79   __ lea(scratch, Address(rlocals, r, Address::lsl(3)));
  80   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  81 }
  82 
  83 static inline Address faddress(Register r) {
  84   return iaddress(r);
  85 }
  86 
  87 static inline Address daddress(Register r, Register scratch,
  88                                InterpreterMacroAssembler* _masm) {
  89   return laddress(r, scratch, _masm);
  90 }
  91 
  92 static inline Address aaddress(Register r) {
  93   return iaddress(r);
  94 }
  95 
  96 static inline Address at_rsp() {
  97   return Address(esp, 0);
  98 }
  99 
 100 // At top of Java expression stack which may be different than esp().  It
 101 // isn't for category 1 objects.
 102 static inline Address at_tos   () {
 103   return Address(esp,  Interpreter::expr_offset_in_bytes(0));
 104 }
 105 
 106 static inline Address at_tos_p1() {
 107   return Address(esp,  Interpreter::expr_offset_in_bytes(1));
 108 }
 109 
 110 static inline Address at_tos_p2() {
 111   return Address(esp,  Interpreter::expr_offset_in_bytes(2));
 112 }
 113 
 114 static inline Address at_tos_p3() {
 115   return Address(esp,  Interpreter::expr_offset_in_bytes(3));
 116 }
 117 
 118 static inline Address at_tos_p4() {
 119   return Address(esp,  Interpreter::expr_offset_in_bytes(4));
 120 }
 121 
 122 static inline Address at_tos_p5() {
 123   return Address(esp,  Interpreter::expr_offset_in_bytes(5));
 124 }
 125 
 126 // Condition conversion
 127 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 128   switch (cc) {
 129   case TemplateTable::equal        : return Assembler::NE;
 130   case TemplateTable::not_equal    : return Assembler::EQ;
 131   case TemplateTable::less         : return Assembler::GE;
 132   case TemplateTable::less_equal   : return Assembler::GT;
 133   case TemplateTable::greater      : return Assembler::LE;
 134   case TemplateTable::greater_equal: return Assembler::LT;
 135   }
 136   ShouldNotReachHere();
 137   return Assembler::EQ;
 138 }
 139 
 140 
 141 // Miscellaneous helper routines
 142 // Store an oop (or NULL) at the Address described by obj.
 143 // If val == noreg this means store a NULL
 144 static void do_oop_store(InterpreterMacroAssembler* _masm,
 145                          Address dst,
 146                          Register val,
 147                          DecoratorSet decorators) {
 148   assert(val == noreg || val == r0, "parameter is just for looks");
 149   __ store_heap_oop(dst, val, r10, r11, r3, decorators);
 150 }
 151 
 152 static void do_oop_load(InterpreterMacroAssembler* _masm,
 153                         Address src,
 154                         Register dst,
 155                         DecoratorSet decorators) {
 156   __ load_heap_oop(dst, src, r10, r11, decorators);
 157 }
 158 
 159 Address TemplateTable::at_bcp(int offset) {
 160   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 161   return Address(rbcp, offset);
 162 }
 163 
 164 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 165                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 166                                    int byte_no)
 167 {
 168   if (!RewriteBytecodes)  return;
 169   Label L_patch_done;
 170 
 171   switch (bc) {
 172   case Bytecodes::_fast_qputfield:
 173   case Bytecodes::_fast_aputfield:
 174   case Bytecodes::_fast_bputfield:
 175   case Bytecodes::_fast_zputfield:
 176   case Bytecodes::_fast_cputfield:
 177   case Bytecodes::_fast_dputfield:
 178   case Bytecodes::_fast_fputfield:
 179   case Bytecodes::_fast_iputfield:
 180   case Bytecodes::_fast_lputfield:
 181   case Bytecodes::_fast_sputfield:
 182     {
 183       // We skip bytecode quickening for putfield instructions when
 184       // the put_code written to the constant pool cache is zero.
 185       // This is required so that every execution of this instruction
 186       // calls out to InterpreterRuntime::resolve_get_put to do
 187       // additional, required work.
 188       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 189       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 190       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 191       __ movw(bc_reg, bc);
 192       __ cbzw(temp_reg, L_patch_done);  // don't patch
 193     }
 194     break;
 195   default:
 196     assert(byte_no == -1, "sanity");
 197     // the pair bytecodes have already done the load.
 198     if (load_bc_into_bc_reg) {
 199       __ movw(bc_reg, bc);
 200     }
 201   }
 202 
 203   if (JvmtiExport::can_post_breakpoint()) {
 204     Label L_fast_patch;
 205     // if a breakpoint is present we can't rewrite the stream directly
 206     __ load_unsigned_byte(temp_reg, at_bcp(0));
 207     __ cmpw(temp_reg, Bytecodes::_breakpoint);
 208     __ br(Assembler::NE, L_fast_patch);
 209     // Let breakpoint table handling rewrite to quicker bytecode
 210     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
 211     __ b(L_patch_done);
 212     __ bind(L_fast_patch);
 213   }
 214 
 215 #ifdef ASSERT
 216   Label L_okay;
 217   __ load_unsigned_byte(temp_reg, at_bcp(0));
 218   __ cmpw(temp_reg, (int) Bytecodes::java_code(bc));
 219   __ br(Assembler::EQ, L_okay);
 220   __ cmpw(temp_reg, bc_reg);
 221   __ br(Assembler::EQ, L_okay);
 222   __ stop("patching the wrong bytecode");
 223   __ bind(L_okay);
 224 #endif
 225 
 226   // patch bytecode
 227   __ strb(bc_reg, at_bcp(0));
 228   __ bind(L_patch_done);
 229 }
 230 
 231 
 232 // Individual instructions
 233 
 234 void TemplateTable::nop() {
 235   transition(vtos, vtos);
 236   // nothing to do
 237 }
 238 
 239 void TemplateTable::shouldnotreachhere() {
 240   transition(vtos, vtos);
 241   __ stop("shouldnotreachhere bytecode");
 242 }
 243 
 244 void TemplateTable::aconst_null()
 245 {
 246   transition(vtos, atos);
 247   __ mov(r0, 0);
 248 }
 249 
 250 void TemplateTable::iconst(int value)
 251 {
 252   transition(vtos, itos);
 253   __ mov(r0, value);
 254 }
 255 
 256 void TemplateTable::lconst(int value)
 257 {
 258   __ mov(r0, value);
 259 }
 260 
 261 void TemplateTable::fconst(int value)
 262 {
 263   transition(vtos, ftos);
 264   switch (value) {
 265   case 0:
 266     __ fmovs(v0, 0.0);
 267     break;
 268   case 1:
 269     __ fmovs(v0, 1.0);
 270     break;
 271   case 2:
 272     __ fmovs(v0, 2.0);
 273     break;
 274   default:
 275     ShouldNotReachHere();
 276     break;
 277   }
 278 }
 279 
 280 void TemplateTable::dconst(int value)
 281 {
 282   transition(vtos, dtos);
 283   switch (value) {
 284   case 0:
 285     __ fmovd(v0, 0.0);
 286     break;
 287   case 1:
 288     __ fmovd(v0, 1.0);
 289     break;
 290   case 2:
 291     __ fmovd(v0, 2.0);
 292     break;
 293   default:
 294     ShouldNotReachHere();
 295     break;
 296   }
 297 }
 298 
 299 void TemplateTable::bipush()
 300 {
 301   transition(vtos, itos);
 302   __ load_signed_byte32(r0, at_bcp(1));
 303 }
 304 
 305 void TemplateTable::sipush()
 306 {
 307   transition(vtos, itos);
 308   __ load_unsigned_short(r0, at_bcp(1));
 309   __ revw(r0, r0);
 310   __ asrw(r0, r0, 16);
 311 }
 312 
 313 void TemplateTable::ldc(LdcType type)
 314 {
 315   transition(vtos, vtos);
 316   Label call_ldc, notFloat, notClass, notInt, Done;
 317 
 318   if (is_ldc_wide(type)) {
 319     __ get_unsigned_2_byte_index_at_bcp(r1, 1);
 320   } else {
 321     __ load_unsigned_byte(r1, at_bcp(1));
 322   }
 323   __ get_cpool_and_tags(r2, r0);
 324 
 325   const int base_offset = ConstantPool::header_size() * wordSize;
 326   const int tags_offset = Array<u1>::base_offset_in_bytes();
 327 
 328   // get type
 329   __ add(r3, r1, tags_offset);
 330   __ lea(r3, Address(r0, r3));
 331   __ ldarb(r3, r3);
 332   __ andr(r3, r3, ~JVM_CONSTANT_QDescBit);
 333 
 334   // unresolved class - get the resolved class
 335   __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass);
 336   __ br(Assembler::EQ, call_ldc);
 337 
 338   // unresolved class in error state - call into runtime to throw the error
 339   // from the first resolution attempt
 340   __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError);
 341   __ br(Assembler::EQ, call_ldc);
 342 
 343   // resolved class - need to call vm to get java mirror of the class
 344   __ cmp(r3, (u1)JVM_CONSTANT_Class);
 345   __ br(Assembler::NE, notClass);
 346 
 347   __ bind(call_ldc);
 348   __ mov(c_rarg1, is_ldc_wide(type) ? 1 : 0);
 349   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
 350   __ push_ptr(r0);
 351   __ verify_oop(r0);
 352   __ b(Done);
 353 
 354   __ bind(notClass);
 355   __ cmp(r3, (u1)JVM_CONSTANT_Float);
 356   __ br(Assembler::NE, notFloat);
 357   // ftos
 358   __ adds(r1, r2, r1, Assembler::LSL, 3);
 359   __ ldrs(v0, Address(r1, base_offset));
 360   __ push_f();
 361   __ b(Done);
 362 
 363   __ bind(notFloat);
 364 
 365   __ cmp(r3, (u1)JVM_CONSTANT_Integer);
 366   __ br(Assembler::NE, notInt);
 367 
 368   // itos
 369   __ adds(r1, r2, r1, Assembler::LSL, 3);
 370   __ ldrw(r0, Address(r1, base_offset));
 371   __ push_i(r0);
 372   __ b(Done);
 373 
 374   __ bind(notInt);
 375   condy_helper(Done);
 376 
 377   __ bind(Done);
 378 }
 379 
 380 // Fast path for caching oop constants.
 381 void TemplateTable::fast_aldc(LdcType type)
 382 {
 383   transition(vtos, atos);
 384 
 385   Register result = r0;
 386   Register tmp = r1;
 387   Register rarg = r2;
 388 
 389   int index_size = is_ldc_wide(type) ? sizeof(u2) : sizeof(u1);
 390 
 391   Label resolved;
 392 
 393   // We are resolved if the resolved reference cache entry contains a
 394   // non-null object (String, MethodType, etc.)
 395   assert_different_registers(result, tmp);
 396   __ get_cache_index_at_bcp(tmp, 1, index_size);
 397   __ load_resolved_reference_at_index(result, tmp);
 398   __ cbnz(result, resolved);
 399 
 400   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 401 
 402   // first time invocation - must resolve first
 403   __ mov(rarg, (int)bytecode());
 404   __ call_VM(result, entry, rarg);
 405 
 406   __ bind(resolved);
 407 
 408   { // Check for the null sentinel.
 409     // If we just called the VM, it already did the mapping for us,
 410     // but it's harmless to retry.
 411     Label notNull;
 412 
 413     // Stash null_sentinel address to get its value later
 414     __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
 415     __ ldr(tmp, Address(rarg));
 416     __ resolve_oop_handle(tmp, r5, rscratch2);
 417     __ cmpoop(result, tmp);
 418     __ br(Assembler::NE, notNull);
 419     __ mov(result, 0);  // NULL object reference
 420     __ bind(notNull);
 421   }
 422 
 423   if (VerifyOops) {
 424     // Safe to call with 0 result
 425     __ verify_oop(result);
 426   }
 427 }
 428 
 429 void TemplateTable::ldc2_w()
 430 {
 431   transition(vtos, vtos);
 432   Label notDouble, notLong, Done;
 433   __ get_unsigned_2_byte_index_at_bcp(r0, 1);
 434 
 435   __ get_cpool_and_tags(r1, r2);
 436   const int base_offset = ConstantPool::header_size() * wordSize;
 437   const int tags_offset = Array<u1>::base_offset_in_bytes();
 438 
 439   // get type
 440   __ lea(r2, Address(r2, r0, Address::lsl(0)));
 441   __ load_unsigned_byte(r2, Address(r2, tags_offset));
 442   __ cmpw(r2, (int)JVM_CONSTANT_Double);
 443   __ br(Assembler::NE, notDouble);
 444 
 445   // dtos
 446   __ lea (r2, Address(r1, r0, Address::lsl(3)));
 447   __ ldrd(v0, Address(r2, base_offset));
 448   __ push_d();
 449   __ b(Done);
 450 
 451   __ bind(notDouble);
 452   __ cmpw(r2, (int)JVM_CONSTANT_Long);
 453   __ br(Assembler::NE, notLong);
 454 
 455   // ltos
 456   __ lea(r0, Address(r1, r0, Address::lsl(3)));
 457   __ ldr(r0, Address(r0, base_offset));
 458   __ push_l();
 459   __ b(Done);
 460 
 461   __ bind(notLong);
 462   condy_helper(Done);
 463 
 464   __ bind(Done);
 465 }
 466 
 467 void TemplateTable::condy_helper(Label& Done)
 468 {
 469   Register obj = r0;
 470   Register rarg = r1;
 471   Register flags = r2;
 472   Register off = r3;
 473 
 474   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 475 
 476   __ mov(rarg, (int) bytecode());
 477   __ call_VM(obj, entry, rarg);
 478 
 479   __ get_vm_result_2(flags, rthread);
 480 
 481   // VMr = obj = base address to find primitive value to push
 482   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 483   __ mov(off, flags);
 484   __ andw(off, off, ConstantPoolCacheEntry::field_index_mask);
 485 
 486   const Address field(obj, off);
 487 
 488   // What sort of thing are we loading?
 489   // x86 uses a shift and mask or wings it with a shift plus assert
 490   // the mask is not needed. aarch64 just uses bitfield extract
 491   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,
 492            ConstantPoolCacheEntry::tos_state_bits);
 493 
 494   switch (bytecode()) {
 495     case Bytecodes::_ldc:
 496     case Bytecodes::_ldc_w:
 497       {
 498         // tos in (itos, ftos, stos, btos, ctos, ztos)
 499         Label notInt, notFloat, notShort, notByte, notChar, notBool;
 500         __ cmpw(flags, itos);
 501         __ br(Assembler::NE, notInt);
 502         // itos
 503         __ ldrw(r0, field);
 504         __ push(itos);
 505         __ b(Done);
 506 
 507         __ bind(notInt);
 508         __ cmpw(flags, ftos);
 509         __ br(Assembler::NE, notFloat);
 510         // ftos
 511         __ load_float(field);
 512         __ push(ftos);
 513         __ b(Done);
 514 
 515         __ bind(notFloat);
 516         __ cmpw(flags, stos);
 517         __ br(Assembler::NE, notShort);
 518         // stos
 519         __ load_signed_short(r0, field);
 520         __ push(stos);
 521         __ b(Done);
 522 
 523         __ bind(notShort);
 524         __ cmpw(flags, btos);
 525         __ br(Assembler::NE, notByte);
 526         // btos
 527         __ load_signed_byte(r0, field);
 528         __ push(btos);
 529         __ b(Done);
 530 
 531         __ bind(notByte);
 532         __ cmpw(flags, ctos);
 533         __ br(Assembler::NE, notChar);
 534         // ctos
 535         __ load_unsigned_short(r0, field);
 536         __ push(ctos);
 537         __ b(Done);
 538 
 539         __ bind(notChar);
 540         __ cmpw(flags, ztos);
 541         __ br(Assembler::NE, notBool);
 542         // ztos
 543         __ load_signed_byte(r0, field);
 544         __ push(ztos);
 545         __ b(Done);
 546 
 547         __ bind(notBool);
 548         break;
 549       }
 550 
 551     case Bytecodes::_ldc2_w:
 552       {
 553         Label notLong, notDouble;
 554         __ cmpw(flags, ltos);
 555         __ br(Assembler::NE, notLong);
 556         // ltos
 557         __ ldr(r0, field);
 558         __ push(ltos);
 559         __ b(Done);
 560 
 561         __ bind(notLong);
 562         __ cmpw(flags, dtos);
 563         __ br(Assembler::NE, notDouble);
 564         // dtos
 565         __ load_double(field);
 566         __ push(dtos);
 567         __ b(Done);
 568 
 569        __ bind(notDouble);
 570         break;
 571       }
 572 
 573     default:
 574       ShouldNotReachHere();
 575     }
 576 
 577     __ stop("bad ldc/condy");
 578 }
 579 
 580 void TemplateTable::locals_index(Register reg, int offset)
 581 {
 582   __ ldrb(reg, at_bcp(offset));
 583   __ neg(reg, reg);
 584 }
 585 
 586 void TemplateTable::iload() {
 587   iload_internal();
 588 }
 589 
 590 void TemplateTable::nofast_iload() {
 591   iload_internal(may_not_rewrite);
 592 }
 593 
 594 void TemplateTable::iload_internal(RewriteControl rc) {
 595   transition(vtos, itos);
 596   if (RewriteFrequentPairs && rc == may_rewrite) {
 597     Label rewrite, done;
 598     Register bc = r4;
 599 
 600     // get next bytecode
 601     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 602 
 603     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 604     // last two iloads in a pair.  Comparing against fast_iload means that
 605     // the next bytecode is neither an iload or a caload, and therefore
 606     // an iload pair.
 607     __ cmpw(r1, Bytecodes::_iload);
 608     __ br(Assembler::EQ, done);
 609 
 610     // if _fast_iload rewrite to _fast_iload2
 611     __ cmpw(r1, Bytecodes::_fast_iload);
 612     __ movw(bc, Bytecodes::_fast_iload2);
 613     __ br(Assembler::EQ, rewrite);
 614 
 615     // if _caload rewrite to _fast_icaload
 616     __ cmpw(r1, Bytecodes::_caload);
 617     __ movw(bc, Bytecodes::_fast_icaload);
 618     __ br(Assembler::EQ, rewrite);
 619 
 620     // else rewrite to _fast_iload
 621     __ movw(bc, Bytecodes::_fast_iload);
 622 
 623     // rewrite
 624     // bc: new bytecode
 625     __ bind(rewrite);
 626     patch_bytecode(Bytecodes::_iload, bc, r1, false);
 627     __ bind(done);
 628 
 629   }
 630 
 631   // do iload, get the local value into tos
 632   locals_index(r1);
 633   __ ldr(r0, iaddress(r1));
 634 
 635 }
 636 
 637 void TemplateTable::fast_iload2()
 638 {
 639   transition(vtos, itos);
 640   locals_index(r1);
 641   __ ldr(r0, iaddress(r1));
 642   __ push(itos);
 643   locals_index(r1, 3);
 644   __ ldr(r0, iaddress(r1));
 645 }
 646 
 647 void TemplateTable::fast_iload()
 648 {
 649   transition(vtos, itos);
 650   locals_index(r1);
 651   __ ldr(r0, iaddress(r1));
 652 }
 653 
 654 void TemplateTable::lload()
 655 {
 656   transition(vtos, ltos);
 657   __ ldrb(r1, at_bcp(1));
 658   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 659   __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 660 }
 661 
 662 void TemplateTable::fload()
 663 {
 664   transition(vtos, ftos);
 665   locals_index(r1);
 666   // n.b. we use ldrd here because this is a 64 bit slot
 667   // this is comparable to the iload case
 668   __ ldrd(v0, faddress(r1));
 669 }
 670 
 671 void TemplateTable::dload()
 672 {
 673   transition(vtos, dtos);
 674   __ ldrb(r1, at_bcp(1));
 675   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 676   __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 677 }
 678 
 679 void TemplateTable::aload()
 680 {
 681   transition(vtos, atos);
 682   locals_index(r1);
 683   __ ldr(r0, iaddress(r1));
 684 }
 685 
 686 void TemplateTable::locals_index_wide(Register reg) {
 687   __ ldrh(reg, at_bcp(2));
 688   __ rev16w(reg, reg);
 689   __ neg(reg, reg);
 690 }
 691 
 692 void TemplateTable::wide_iload() {
 693   transition(vtos, itos);
 694   locals_index_wide(r1);
 695   __ ldr(r0, iaddress(r1));
 696 }
 697 
 698 void TemplateTable::wide_lload()
 699 {
 700   transition(vtos, ltos);
 701   __ ldrh(r1, at_bcp(2));
 702   __ rev16w(r1, r1);
 703   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 704   __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 705 }
 706 
 707 void TemplateTable::wide_fload()
 708 {
 709   transition(vtos, ftos);
 710   locals_index_wide(r1);
 711   // n.b. we use ldrd here because this is a 64 bit slot
 712   // this is comparable to the iload case
 713   __ ldrd(v0, faddress(r1));
 714 }
 715 
 716 void TemplateTable::wide_dload()
 717 {
 718   transition(vtos, dtos);
 719   __ ldrh(r1, at_bcp(2));
 720   __ rev16w(r1, r1);
 721   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 722   __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 723 }
 724 
 725 void TemplateTable::wide_aload()
 726 {
 727   transition(vtos, atos);
 728   locals_index_wide(r1);
 729   __ ldr(r0, aaddress(r1));
 730 }
 731 
 732 void TemplateTable::index_check(Register array, Register index)
 733 {
 734   // destroys r1, rscratch1
 735   // check array
 736   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 737   // sign extend index for use by indexed load
 738   // __ movl2ptr(index, index);
 739   // check index
 740   Register length = rscratch1;
 741   __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
 742   __ cmpw(index, length);
 743   if (index != r1) {
 744     // ??? convention: move aberrant index into r1 for exception message
 745     assert(r1 != array, "different registers");
 746     __ mov(r1, index);
 747   }
 748   Label ok;
 749   __ br(Assembler::LO, ok);
 750   // ??? convention: move array into r3 for exception message
 751    __ mov(r3, array);
 752    __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
 753    __ br(rscratch1);
 754   __ bind(ok);
 755 }
 756 
 757 void TemplateTable::iaload()
 758 {
 759   transition(itos, itos);
 760   __ mov(r1, r0);
 761   __ pop_ptr(r0);
 762   // r0: array
 763   // r1: index
 764   index_check(r0, r1); // leaves index in r1, kills rscratch1
 765   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
 766   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 767 }
 768 
 769 void TemplateTable::laload()
 770 {
 771   transition(itos, ltos);
 772   __ mov(r1, r0);
 773   __ pop_ptr(r0);
 774   // r0: array
 775   // r1: index
 776   index_check(r0, r1); // leaves index in r1, kills rscratch1
 777   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
 778   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 779 }
 780 
 781 void TemplateTable::faload()
 782 {
 783   transition(itos, ftos);
 784   __ mov(r1, r0);
 785   __ pop_ptr(r0);
 786   // r0: array
 787   // r1: index
 788   index_check(r0, r1); // leaves index in r1, kills rscratch1
 789   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
 790   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 791 }
 792 
 793 void TemplateTable::daload()
 794 {
 795   transition(itos, dtos);
 796   __ mov(r1, r0);
 797   __ pop_ptr(r0);
 798   // r0: array
 799   // r1: index
 800   index_check(r0, r1); // leaves index in r1, kills rscratch1
 801   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
 802   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 803 }
 804 
 805 void TemplateTable::aaload()
 806 {
 807   transition(itos, atos);
 808   __ mov(r1, r0);
 809   __ pop_ptr(r0);
 810   // r0: array
 811   // r1: index
 812   index_check(r0, r1); // leaves index in r1, kills rscratch1
 813   __ profile_array(r2, r0, r4);
 814   if (UseFlatArray) {
 815     Label is_flat_array, done;
 816 
 817     __ test_flattened_array_oop(r0, r8 /*temp*/, is_flat_array);
 818     __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 819     do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
 820 
 821     __ b(done);
 822     __ bind(is_flat_array);
 823     __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), r0, r1);
 824     __ bind(done);
 825   } else {
 826     __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 827     do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
 828   }
 829   __ profile_element(r2, r0, r4);
 830 }
 831 
 832 void TemplateTable::baload()
 833 {
 834   transition(itos, itos);
 835   __ mov(r1, r0);
 836   __ pop_ptr(r0);
 837   // r0: array
 838   // r1: index
 839   index_check(r0, r1); // leaves index in r1, kills rscratch1
 840   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
 841   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
 842 }
 843 
 844 void TemplateTable::caload()
 845 {
 846   transition(itos, itos);
 847   __ mov(r1, r0);
 848   __ pop_ptr(r0);
 849   // r0: array
 850   // r1: index
 851   index_check(r0, r1); // leaves index in r1, kills rscratch1
 852   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
 853   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 854 }
 855 
 856 // iload followed by caload frequent pair
 857 void TemplateTable::fast_icaload()
 858 {
 859   transition(vtos, itos);
 860   // load index out of locals
 861   locals_index(r2);
 862   __ ldr(r1, iaddress(r2));
 863 
 864   __ pop_ptr(r0);
 865 
 866   // r0: array
 867   // r1: index
 868   index_check(r0, r1); // leaves index in r1, kills rscratch1
 869   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
 870   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 871 }
 872 
 873 void TemplateTable::saload()
 874 {
 875   transition(itos, itos);
 876   __ mov(r1, r0);
 877   __ pop_ptr(r0);
 878   // r0: array
 879   // r1: index
 880   index_check(r0, r1); // leaves index in r1, kills rscratch1
 881   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
 882   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 883 }
 884 
 885 void TemplateTable::iload(int n)
 886 {
 887   transition(vtos, itos);
 888   __ ldr(r0, iaddress(n));
 889 }
 890 
 891 void TemplateTable::lload(int n)
 892 {
 893   transition(vtos, ltos);
 894   __ ldr(r0, laddress(n));
 895 }
 896 
 897 void TemplateTable::fload(int n)
 898 {
 899   transition(vtos, ftos);
 900   __ ldrs(v0, faddress(n));
 901 }
 902 
 903 void TemplateTable::dload(int n)
 904 {
 905   transition(vtos, dtos);
 906   __ ldrd(v0, daddress(n));
 907 }
 908 
 909 void TemplateTable::aload(int n)
 910 {
 911   transition(vtos, atos);
 912   __ ldr(r0, iaddress(n));
 913 }
 914 
 915 void TemplateTable::aload_0() {
 916   aload_0_internal();
 917 }
 918 
 919 void TemplateTable::nofast_aload_0() {
 920   aload_0_internal(may_not_rewrite);
 921 }
 922 
 923 void TemplateTable::aload_0_internal(RewriteControl rc) {
 924   // According to bytecode histograms, the pairs:
 925   //
 926   // _aload_0, _fast_igetfield
 927   // _aload_0, _fast_agetfield
 928   // _aload_0, _fast_fgetfield
 929   //
 930   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 931   // _aload_0 bytecode checks if the next bytecode is either
 932   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 933   // rewrites the current bytecode into a pair bytecode; otherwise it
 934   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 935   // the pair check anymore.
 936   //
 937   // Note: If the next bytecode is _getfield, the rewrite must be
 938   //       delayed, otherwise we may miss an opportunity for a pair.
 939   //
 940   // Also rewrite frequent pairs
 941   //   aload_0, aload_1
 942   //   aload_0, iload_1
 943   // These bytecodes with a small amount of code are most profitable
 944   // to rewrite
 945   if (RewriteFrequentPairs && rc == may_rewrite) {
 946     Label rewrite, done;
 947     const Register bc = r4;
 948 
 949     // get next bytecode
 950     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 951 
 952     // if _getfield then wait with rewrite
 953     __ cmpw(r1, Bytecodes::Bytecodes::_getfield);
 954     __ br(Assembler::EQ, done);
 955 
 956     // if _igetfield then rewrite to _fast_iaccess_0
 957     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 958     __ cmpw(r1, Bytecodes::_fast_igetfield);
 959     __ movw(bc, Bytecodes::_fast_iaccess_0);
 960     __ br(Assembler::EQ, rewrite);
 961 
 962     // if _agetfield then rewrite to _fast_aaccess_0
 963     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 964     __ cmpw(r1, Bytecodes::_fast_agetfield);
 965     __ movw(bc, Bytecodes::_fast_aaccess_0);
 966     __ br(Assembler::EQ, rewrite);
 967 
 968     // if _fgetfield then rewrite to _fast_faccess_0
 969     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 970     __ cmpw(r1, Bytecodes::_fast_fgetfield);
 971     __ movw(bc, Bytecodes::_fast_faccess_0);
 972     __ br(Assembler::EQ, rewrite);
 973 
 974     // else rewrite to _fast_aload0
 975     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 976     __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0);
 977 
 978     // rewrite
 979     // bc: new bytecode
 980     __ bind(rewrite);
 981     patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
 982 
 983     __ bind(done);
 984   }
 985 
 986   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 987   aload(0);
 988 }
 989 
 990 void TemplateTable::istore()
 991 {
 992   transition(itos, vtos);
 993   locals_index(r1);
 994   // FIXME: We're being very pernickerty here storing a jint in a
 995   // local with strw, which costs an extra instruction over what we'd
 996   // be able to do with a simple str.  We should just store the whole
 997   // word.
 998   __ lea(rscratch1, iaddress(r1));
 999   __ strw(r0, Address(rscratch1));
1000 }
1001 
1002 void TemplateTable::lstore()
1003 {
1004   transition(ltos, vtos);
1005   locals_index(r1);
1006   __ str(r0, laddress(r1, rscratch1, _masm));
1007 }
1008 
1009 void TemplateTable::fstore() {
1010   transition(ftos, vtos);
1011   locals_index(r1);
1012   __ lea(rscratch1, iaddress(r1));
1013   __ strs(v0, Address(rscratch1));
1014 }
1015 
1016 void TemplateTable::dstore() {
1017   transition(dtos, vtos);
1018   locals_index(r1);
1019   __ strd(v0, daddress(r1, rscratch1, _masm));
1020 }
1021 
1022 void TemplateTable::astore()
1023 {
1024   transition(vtos, vtos);
1025   __ pop_ptr(r0);
1026   locals_index(r1);
1027   __ str(r0, aaddress(r1));
1028 }
1029 
1030 void TemplateTable::wide_istore() {
1031   transition(vtos, vtos);
1032   __ pop_i();
1033   locals_index_wide(r1);
1034   __ lea(rscratch1, iaddress(r1));
1035   __ strw(r0, Address(rscratch1));
1036 }
1037 
1038 void TemplateTable::wide_lstore() {
1039   transition(vtos, vtos);
1040   __ pop_l();
1041   locals_index_wide(r1);
1042   __ str(r0, laddress(r1, rscratch1, _masm));
1043 }
1044 
1045 void TemplateTable::wide_fstore() {
1046   transition(vtos, vtos);
1047   __ pop_f();
1048   locals_index_wide(r1);
1049   __ lea(rscratch1, faddress(r1));
1050   __ strs(v0, rscratch1);
1051 }
1052 
1053 void TemplateTable::wide_dstore() {
1054   transition(vtos, vtos);
1055   __ pop_d();
1056   locals_index_wide(r1);
1057   __ strd(v0, daddress(r1, rscratch1, _masm));
1058 }
1059 
1060 void TemplateTable::wide_astore() {
1061   transition(vtos, vtos);
1062   __ pop_ptr(r0);
1063   locals_index_wide(r1);
1064   __ str(r0, aaddress(r1));
1065 }
1066 
1067 void TemplateTable::iastore() {
1068   transition(itos, vtos);
1069   __ pop_i(r1);
1070   __ pop_ptr(r3);
1071   // r0: value
1072   // r1: index
1073   // r3: array
1074   index_check(r3, r1); // prefer index in r1
1075   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
1076   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg, noreg);
1077 }
1078 
1079 void TemplateTable::lastore() {
1080   transition(ltos, vtos);
1081   __ pop_i(r1);
1082   __ pop_ptr(r3);
1083   // r0: value
1084   // r1: index
1085   // r3: array
1086   index_check(r3, r1); // prefer index in r1
1087   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
1088   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg, noreg);
1089 }
1090 
1091 void TemplateTable::fastore() {
1092   transition(ftos, vtos);
1093   __ pop_i(r1);
1094   __ pop_ptr(r3);
1095   // v0: value
1096   // r1:  index
1097   // r3:  array
1098   index_check(r3, r1); // prefer index in r1
1099   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
1100   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg, noreg);
1101 }
1102 
1103 void TemplateTable::dastore() {
1104   transition(dtos, vtos);
1105   __ pop_i(r1);
1106   __ pop_ptr(r3);
1107   // v0: value
1108   // r1:  index
1109   // r3:  array
1110   index_check(r3, r1); // prefer index in r1
1111   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1112   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg, noreg);
1113 }
1114 
1115 void TemplateTable::aastore() {
1116   Label is_null, is_flat_array, ok_is_subtype, done;
1117   transition(vtos, vtos);
1118   // stack: ..., array, index, value
1119   __ ldr(r0, at_tos());    // value
1120   __ ldr(r2, at_tos_p1()); // index
1121   __ ldr(r3, at_tos_p2()); // array
1122 
1123   index_check(r3, r2);     // kills r1
1124 
1125   __ profile_array(r4, r3, r5);
1126   __ profile_element(r4, r0, r5);
1127 
1128   __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1129   Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1130   // Be careful not to clobber r4 below
1131 
1132   // do array store check - check for NULL value first
1133   __ cbz(r0, is_null);
1134 
1135   // Move array class to r5
1136   __ load_klass(r5, r3);
1137 
1138   if (UseFlatArray) {
1139     __ ldrw(r6, Address(r5, Klass::layout_helper_offset()));
1140     __ test_flattened_array_layout(r6, is_flat_array);
1141   }
1142 
1143   // Move subklass into r1
1144   __ load_klass(r1, r0);
1145 
1146   // Move array element superklass into r0
1147   __ ldr(r0, Address(r5, ObjArrayKlass::element_klass_offset()));
1148   // Compress array + index*oopSize + 12 into a single register.  Frees r2.
1149 
1150   // Generate subtype check.  Blows r2, r5
1151   // Superklass in r0.  Subklass in r1.
1152 
1153   // is "r1 <: r0" ? (value subclass <: array element superclass)
1154   __ gen_subtype_check(r1, ok_is_subtype, false);
1155 
1156   // Come here on failure
1157   // object is at TOS
1158   __ b(Interpreter::_throw_ArrayStoreException_entry);
1159 
1160   // Come here on success
1161   __ bind(ok_is_subtype);
1162 
1163   // Get the value we will store
1164   __ ldr(r0, at_tos());
1165   // Now store using the appropriate barrier
1166   do_oop_store(_masm, element_address, r0, IS_ARRAY);
1167   __ b(done);
1168 
1169   // Have a NULL in r0, r3=array, r2=index.  Store NULL at ary[idx]
1170   __ bind(is_null);
1171   if (EnablePrimitiveClasses) {
1172     Label is_null_into_value_array_npe, store_null;
1173 
1174     // No way to store null in flat null-free array
1175     __ test_null_free_array_oop(r3, r8, is_null_into_value_array_npe);
1176     __ b(store_null);
1177 
1178     __ bind(is_null_into_value_array_npe);
1179     __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
1180 
1181     __ bind(store_null);
1182   }
1183 
1184   // Store a NULL
1185   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1186   __ b(done);
1187 
1188   if (UseFlatArray) {
1189      Label is_type_ok;
1190     __ bind(is_flat_array); // Store non-null value to flat
1191 
1192     // Simplistic type check...
1193     // r0 - value, r2 - index, r3 - array.
1194 
1195     // Profile the not-null value's klass.
1196     // Load value class
1197      __ load_klass(r1, r0);
1198 
1199     // Move element klass into r7
1200      __ ldr(r7, Address(r5, ArrayKlass::element_klass_offset()));
1201 
1202     // flat value array needs exact type match
1203     // is "r1 == r7" (value subclass == array element superclass)
1204 
1205      __ cmp(r7, r1);
1206      __ br(Assembler::EQ, is_type_ok);
1207 
1208      __ b(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1209 
1210      __ bind(is_type_ok);
1211     // r1: value's klass
1212     // r3: array
1213     // r5: array klass
1214     __ test_klass_is_empty_inline_type(r1, r7, done);
1215 
1216     // calc dst for copy
1217     __ ldrw(r7, at_tos_p1()); // index
1218     __ data_for_value_array_index(r3, r5, r7, r7);
1219 
1220     // ...and src for copy
1221     __ ldr(r6, at_tos());  // value
1222     __ data_for_oop(r6, r6, r1);
1223 
1224     __ mov(r4, r1);  // Shuffle arguments to avoid conflict with c_rarg1
1225     __ access_value_copy(IN_HEAP, r6, r7, r4);
1226   }
1227 
1228   // Pop stack arguments
1229   __ bind(done);
1230   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1231 }
1232 
1233 void TemplateTable::bastore()
1234 {
1235   transition(itos, vtos);
1236   __ pop_i(r1);
1237   __ pop_ptr(r3);
1238   // r0: value
1239   // r1: index
1240   // r3: array
1241   index_check(r3, r1); // prefer index in r1
1242 
1243   // Need to check whether array is boolean or byte
1244   // since both types share the bastore bytecode.
1245   __ load_klass(r2, r3);
1246   __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
1247   int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
1248   Label L_skip;
1249   __ tbz(r2, diffbit_index, L_skip);
1250   __ andw(r0, r0, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1251   __ bind(L_skip);
1252 
1253   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
1254   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg, noreg);
1255 }
1256 
1257 void TemplateTable::castore()
1258 {
1259   transition(itos, vtos);
1260   __ pop_i(r1);
1261   __ pop_ptr(r3);
1262   // r0: value
1263   // r1: index
1264   // r3: array
1265   index_check(r3, r1); // prefer index in r1
1266   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
1267   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg, noreg);
1268 }
1269 
1270 void TemplateTable::sastore()
1271 {
1272   castore();
1273 }
1274 
1275 void TemplateTable::istore(int n)
1276 {
1277   transition(itos, vtos);
1278   __ str(r0, iaddress(n));
1279 }
1280 
1281 void TemplateTable::lstore(int n)
1282 {
1283   transition(ltos, vtos);
1284   __ str(r0, laddress(n));
1285 }
1286 
1287 void TemplateTable::fstore(int n)
1288 {
1289   transition(ftos, vtos);
1290   __ strs(v0, faddress(n));
1291 }
1292 
1293 void TemplateTable::dstore(int n)
1294 {
1295   transition(dtos, vtos);
1296   __ strd(v0, daddress(n));
1297 }
1298 
1299 void TemplateTable::astore(int n)
1300 {
1301   transition(vtos, vtos);
1302   __ pop_ptr(r0);
1303   __ str(r0, iaddress(n));
1304 }
1305 
1306 void TemplateTable::pop()
1307 {
1308   transition(vtos, vtos);
1309   __ add(esp, esp, Interpreter::stackElementSize);
1310 }
1311 
1312 void TemplateTable::pop2()
1313 {
1314   transition(vtos, vtos);
1315   __ add(esp, esp, 2 * Interpreter::stackElementSize);
1316 }
1317 
1318 void TemplateTable::dup()
1319 {
1320   transition(vtos, vtos);
1321   __ ldr(r0, Address(esp, 0));
1322   __ push(r0);
1323   // stack: ..., a, a
1324 }
1325 
1326 void TemplateTable::dup_x1()
1327 {
1328   transition(vtos, vtos);
1329   // stack: ..., a, b
1330   __ ldr(r0, at_tos());  // load b
1331   __ ldr(r2, at_tos_p1());  // load a
1332   __ str(r0, at_tos_p1());  // store b
1333   __ str(r2, at_tos());  // store a
1334   __ push(r0);                  // push b
1335   // stack: ..., b, a, b
1336 }
1337 
1338 void TemplateTable::dup_x2()
1339 {
1340   transition(vtos, vtos);
1341   // stack: ..., a, b, c
1342   __ ldr(r0, at_tos());  // load c
1343   __ ldr(r2, at_tos_p2());  // load a
1344   __ str(r0, at_tos_p2());  // store c in a
1345   __ push(r0);      // push c
1346   // stack: ..., c, b, c, c
1347   __ ldr(r0, at_tos_p2());  // load b
1348   __ str(r2, at_tos_p2());  // store a in b
1349   // stack: ..., c, a, c, c
1350   __ str(r0, at_tos_p1());  // store b in c
1351   // stack: ..., c, a, b, c
1352 }
1353 
1354 void TemplateTable::dup2()
1355 {
1356   transition(vtos, vtos);
1357   // stack: ..., a, b
1358   __ ldr(r0, at_tos_p1());  // load a
1359   __ push(r0);                  // push a
1360   __ ldr(r0, at_tos_p1());  // load b
1361   __ push(r0);                  // push b
1362   // stack: ..., a, b, a, b
1363 }
1364 
1365 void TemplateTable::dup2_x1()
1366 {
1367   transition(vtos, vtos);
1368   // stack: ..., a, b, c
1369   __ ldr(r2, at_tos());  // load c
1370   __ ldr(r0, at_tos_p1());  // load b
1371   __ push(r0);                  // push b
1372   __ push(r2);                  // push c
1373   // stack: ..., a, b, c, b, c
1374   __ str(r2, at_tos_p3());  // store c in b
1375   // stack: ..., a, c, c, b, c
1376   __ ldr(r2, at_tos_p4());  // load a
1377   __ str(r2, at_tos_p2());  // store a in 2nd c
1378   // stack: ..., a, c, a, b, c
1379   __ str(r0, at_tos_p4());  // store b in a
1380   // stack: ..., b, c, a, b, c
1381 }
1382 
1383 void TemplateTable::dup2_x2()
1384 {
1385   transition(vtos, vtos);
1386   // stack: ..., a, b, c, d
1387   __ ldr(r2, at_tos());  // load d
1388   __ ldr(r0, at_tos_p1());  // load c
1389   __ push(r0)            ;      // push c
1390   __ push(r2);                  // push d
1391   // stack: ..., a, b, c, d, c, d
1392   __ ldr(r0, at_tos_p4());  // load b
1393   __ str(r0, at_tos_p2());  // store b in d
1394   __ str(r2, at_tos_p4());  // store d in b
1395   // stack: ..., a, d, c, b, c, d
1396   __ ldr(r2, at_tos_p5());  // load a
1397   __ ldr(r0, at_tos_p3());  // load c
1398   __ str(r2, at_tos_p3());  // store a in c
1399   __ str(r0, at_tos_p5());  // store c in a
1400   // stack: ..., c, d, a, b, c, d
1401 }
1402 
1403 void TemplateTable::swap()
1404 {
1405   transition(vtos, vtos);
1406   // stack: ..., a, b
1407   __ ldr(r2, at_tos_p1());  // load a
1408   __ ldr(r0, at_tos());  // load b
1409   __ str(r2, at_tos());  // store a in b
1410   __ str(r0, at_tos_p1());  // store b in a
1411   // stack: ..., b, a
1412 }
1413 
1414 void TemplateTable::iop2(Operation op)
1415 {
1416   transition(itos, itos);
1417   // r0 <== r1 op r0
1418   __ pop_i(r1);
1419   switch (op) {
1420   case add  : __ addw(r0, r1, r0); break;
1421   case sub  : __ subw(r0, r1, r0); break;
1422   case mul  : __ mulw(r0, r1, r0); break;
1423   case _and : __ andw(r0, r1, r0); break;
1424   case _or  : __ orrw(r0, r1, r0); break;
1425   case _xor : __ eorw(r0, r1, r0); break;
1426   case shl  : __ lslvw(r0, r1, r0); break;
1427   case shr  : __ asrvw(r0, r1, r0); break;
1428   case ushr : __ lsrvw(r0, r1, r0);break;
1429   default   : ShouldNotReachHere();
1430   }
1431 }
1432 
1433 void TemplateTable::lop2(Operation op)
1434 {
1435   transition(ltos, ltos);
1436   // r0 <== r1 op r0
1437   __ pop_l(r1);
1438   switch (op) {
1439   case add  : __ add(r0, r1, r0); break;
1440   case sub  : __ sub(r0, r1, r0); break;
1441   case mul  : __ mul(r0, r1, r0); break;
1442   case _and : __ andr(r0, r1, r0); break;
1443   case _or  : __ orr(r0, r1, r0); break;
1444   case _xor : __ eor(r0, r1, r0); break;
1445   default   : ShouldNotReachHere();
1446   }
1447 }
1448 
1449 void TemplateTable::idiv()
1450 {
1451   transition(itos, itos);
1452   // explicitly check for div0
1453   Label no_div0;
1454   __ cbnzw(r0, no_div0);
1455   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1456   __ br(rscratch1);
1457   __ bind(no_div0);
1458   __ pop_i(r1);
1459   // r0 <== r1 idiv r0
1460   __ corrected_idivl(r0, r1, r0, /* want_remainder */ false);
1461 }
1462 
1463 void TemplateTable::irem()
1464 {
1465   transition(itos, itos);
1466   // explicitly check for div0
1467   Label no_div0;
1468   __ cbnzw(r0, no_div0);
1469   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1470   __ br(rscratch1);
1471   __ bind(no_div0);
1472   __ pop_i(r1);
1473   // r0 <== r1 irem r0
1474   __ corrected_idivl(r0, r1, r0, /* want_remainder */ true);
1475 }
1476 
1477 void TemplateTable::lmul()
1478 {
1479   transition(ltos, ltos);
1480   __ pop_l(r1);
1481   __ mul(r0, r0, r1);
1482 }
1483 
1484 void TemplateTable::ldiv()
1485 {
1486   transition(ltos, ltos);
1487   // explicitly check for div0
1488   Label no_div0;
1489   __ cbnz(r0, no_div0);
1490   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1491   __ br(rscratch1);
1492   __ bind(no_div0);
1493   __ pop_l(r1);
1494   // r0 <== r1 ldiv r0
1495   __ corrected_idivq(r0, r1, r0, /* want_remainder */ false);
1496 }
1497 
1498 void TemplateTable::lrem()
1499 {
1500   transition(ltos, ltos);
1501   // explicitly check for div0
1502   Label no_div0;
1503   __ cbnz(r0, no_div0);
1504   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1505   __ br(rscratch1);
1506   __ bind(no_div0);
1507   __ pop_l(r1);
1508   // r0 <== r1 lrem r0
1509   __ corrected_idivq(r0, r1, r0, /* want_remainder */ true);
1510 }
1511 
1512 void TemplateTable::lshl()
1513 {
1514   transition(itos, ltos);
1515   // shift count is in r0
1516   __ pop_l(r1);
1517   __ lslv(r0, r1, r0);
1518 }
1519 
1520 void TemplateTable::lshr()
1521 {
1522   transition(itos, ltos);
1523   // shift count is in r0
1524   __ pop_l(r1);
1525   __ asrv(r0, r1, r0);
1526 }
1527 
1528 void TemplateTable::lushr()
1529 {
1530   transition(itos, ltos);
1531   // shift count is in r0
1532   __ pop_l(r1);
1533   __ lsrv(r0, r1, r0);
1534 }
1535 
1536 void TemplateTable::fop2(Operation op)
1537 {
1538   transition(ftos, ftos);
1539   switch (op) {
1540   case add:
1541     // n.b. use ldrd because this is a 64 bit slot
1542     __ pop_f(v1);
1543     __ fadds(v0, v1, v0);
1544     break;
1545   case sub:
1546     __ pop_f(v1);
1547     __ fsubs(v0, v1, v0);
1548     break;
1549   case mul:
1550     __ pop_f(v1);
1551     __ fmuls(v0, v1, v0);
1552     break;
1553   case div:
1554     __ pop_f(v1);
1555     __ fdivs(v0, v1, v0);
1556     break;
1557   case rem:
1558     __ fmovs(v1, v0);
1559     __ pop_f(v0);
1560     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1561     break;
1562   default:
1563     ShouldNotReachHere();
1564     break;
1565   }
1566 }
1567 
1568 void TemplateTable::dop2(Operation op)
1569 {
1570   transition(dtos, dtos);
1571   switch (op) {
1572   case add:
1573     // n.b. use ldrd because this is a 64 bit slot
1574     __ pop_d(v1);
1575     __ faddd(v0, v1, v0);
1576     break;
1577   case sub:
1578     __ pop_d(v1);
1579     __ fsubd(v0, v1, v0);
1580     break;
1581   case mul:
1582     __ pop_d(v1);
1583     __ fmuld(v0, v1, v0);
1584     break;
1585   case div:
1586     __ pop_d(v1);
1587     __ fdivd(v0, v1, v0);
1588     break;
1589   case rem:
1590     __ fmovd(v1, v0);
1591     __ pop_d(v0);
1592     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1593     break;
1594   default:
1595     ShouldNotReachHere();
1596     break;
1597   }
1598 }
1599 
1600 void TemplateTable::ineg()
1601 {
1602   transition(itos, itos);
1603   __ negw(r0, r0);
1604 
1605 }
1606 
1607 void TemplateTable::lneg()
1608 {
1609   transition(ltos, ltos);
1610   __ neg(r0, r0);
1611 }
1612 
1613 void TemplateTable::fneg()
1614 {
1615   transition(ftos, ftos);
1616   __ fnegs(v0, v0);
1617 }
1618 
1619 void TemplateTable::dneg()
1620 {
1621   transition(dtos, dtos);
1622   __ fnegd(v0, v0);
1623 }
1624 
1625 void TemplateTable::iinc()
1626 {
1627   transition(vtos, vtos);
1628   __ load_signed_byte(r1, at_bcp(2)); // get constant
1629   locals_index(r2);
1630   __ ldr(r0, iaddress(r2));
1631   __ addw(r0, r0, r1);
1632   __ str(r0, iaddress(r2));
1633 }
1634 
1635 void TemplateTable::wide_iinc()
1636 {
1637   transition(vtos, vtos);
1638   // __ mov(r1, zr);
1639   __ ldrw(r1, at_bcp(2)); // get constant and index
1640   __ rev16(r1, r1);
1641   __ ubfx(r2, r1, 0, 16);
1642   __ neg(r2, r2);
1643   __ sbfx(r1, r1, 16, 16);
1644   __ ldr(r0, iaddress(r2));
1645   __ addw(r0, r0, r1);
1646   __ str(r0, iaddress(r2));
1647 }
1648 
1649 void TemplateTable::convert()
1650 {
1651   // Checking
1652 #ifdef ASSERT
1653   {
1654     TosState tos_in  = ilgl;
1655     TosState tos_out = ilgl;
1656     switch (bytecode()) {
1657     case Bytecodes::_i2l: // fall through
1658     case Bytecodes::_i2f: // fall through
1659     case Bytecodes::_i2d: // fall through
1660     case Bytecodes::_i2b: // fall through
1661     case Bytecodes::_i2c: // fall through
1662     case Bytecodes::_i2s: tos_in = itos; break;
1663     case Bytecodes::_l2i: // fall through
1664     case Bytecodes::_l2f: // fall through
1665     case Bytecodes::_l2d: tos_in = ltos; break;
1666     case Bytecodes::_f2i: // fall through
1667     case Bytecodes::_f2l: // fall through
1668     case Bytecodes::_f2d: tos_in = ftos; break;
1669     case Bytecodes::_d2i: // fall through
1670     case Bytecodes::_d2l: // fall through
1671     case Bytecodes::_d2f: tos_in = dtos; break;
1672     default             : ShouldNotReachHere();
1673     }
1674     switch (bytecode()) {
1675     case Bytecodes::_l2i: // fall through
1676     case Bytecodes::_f2i: // fall through
1677     case Bytecodes::_d2i: // fall through
1678     case Bytecodes::_i2b: // fall through
1679     case Bytecodes::_i2c: // fall through
1680     case Bytecodes::_i2s: tos_out = itos; break;
1681     case Bytecodes::_i2l: // fall through
1682     case Bytecodes::_f2l: // fall through
1683     case Bytecodes::_d2l: tos_out = ltos; break;
1684     case Bytecodes::_i2f: // fall through
1685     case Bytecodes::_l2f: // fall through
1686     case Bytecodes::_d2f: tos_out = ftos; break;
1687     case Bytecodes::_i2d: // fall through
1688     case Bytecodes::_l2d: // fall through
1689     case Bytecodes::_f2d: tos_out = dtos; break;
1690     default             : ShouldNotReachHere();
1691     }
1692     transition(tos_in, tos_out);
1693   }
1694 #endif // ASSERT
1695   // static const int64_t is_nan = 0x8000000000000000L;
1696 
1697   // Conversion
1698   switch (bytecode()) {
1699   case Bytecodes::_i2l:
1700     __ sxtw(r0, r0);
1701     break;
1702   case Bytecodes::_i2f:
1703     __ scvtfws(v0, r0);
1704     break;
1705   case Bytecodes::_i2d:
1706     __ scvtfwd(v0, r0);
1707     break;
1708   case Bytecodes::_i2b:
1709     __ sxtbw(r0, r0);
1710     break;
1711   case Bytecodes::_i2c:
1712     __ uxthw(r0, r0);
1713     break;
1714   case Bytecodes::_i2s:
1715     __ sxthw(r0, r0);
1716     break;
1717   case Bytecodes::_l2i:
1718     __ uxtw(r0, r0);
1719     break;
1720   case Bytecodes::_l2f:
1721     __ scvtfs(v0, r0);
1722     break;
1723   case Bytecodes::_l2d:
1724     __ scvtfd(v0, r0);
1725     break;
1726   case Bytecodes::_f2i:
1727   {
1728     Label L_Okay;
1729     __ clear_fpsr();
1730     __ fcvtzsw(r0, v0);
1731     __ get_fpsr(r1);
1732     __ cbzw(r1, L_Okay);
1733     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i));
1734     __ bind(L_Okay);
1735   }
1736     break;
1737   case Bytecodes::_f2l:
1738   {
1739     Label L_Okay;
1740     __ clear_fpsr();
1741     __ fcvtzs(r0, v0);
1742     __ get_fpsr(r1);
1743     __ cbzw(r1, L_Okay);
1744     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1745     __ bind(L_Okay);
1746   }
1747     break;
1748   case Bytecodes::_f2d:
1749     __ fcvts(v0, v0);
1750     break;
1751   case Bytecodes::_d2i:
1752   {
1753     Label L_Okay;
1754     __ clear_fpsr();
1755     __ fcvtzdw(r0, v0);
1756     __ get_fpsr(r1);
1757     __ cbzw(r1, L_Okay);
1758     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
1759     __ bind(L_Okay);
1760   }
1761     break;
1762   case Bytecodes::_d2l:
1763   {
1764     Label L_Okay;
1765     __ clear_fpsr();
1766     __ fcvtzd(r0, v0);
1767     __ get_fpsr(r1);
1768     __ cbzw(r1, L_Okay);
1769     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1770     __ bind(L_Okay);
1771   }
1772     break;
1773   case Bytecodes::_d2f:
1774     __ fcvtd(v0, v0);
1775     break;
1776   default:
1777     ShouldNotReachHere();
1778   }
1779 }
1780 
1781 void TemplateTable::lcmp()
1782 {
1783   transition(ltos, itos);
1784   Label done;
1785   __ pop_l(r1);
1786   __ cmp(r1, r0);
1787   __ mov(r0, (uint64_t)-1L);
1788   __ br(Assembler::LT, done);
1789   // __ mov(r0, 1UL);
1790   // __ csel(r0, r0, zr, Assembler::NE);
1791   // and here is a faster way
1792   __ csinc(r0, zr, zr, Assembler::EQ);
1793   __ bind(done);
1794 }
1795 
1796 void TemplateTable::float_cmp(bool is_float, int unordered_result)
1797 {
1798   Label done;
1799   if (is_float) {
1800     // XXX get rid of pop here, use ... reg, mem32
1801     __ pop_f(v1);
1802     __ fcmps(v1, v0);
1803   } else {
1804     // XXX get rid of pop here, use ... reg, mem64
1805     __ pop_d(v1);
1806     __ fcmpd(v1, v0);
1807   }
1808   if (unordered_result < 0) {
1809     // we want -1 for unordered or less than, 0 for equal and 1 for
1810     // greater than.
1811     __ mov(r0, (uint64_t)-1L);
1812     // for FP LT tests less than or unordered
1813     __ br(Assembler::LT, done);
1814     // install 0 for EQ otherwise 1
1815     __ csinc(r0, zr, zr, Assembler::EQ);
1816   } else {
1817     // we want -1 for less than, 0 for equal and 1 for unordered or
1818     // greater than.
1819     __ mov(r0, 1L);
1820     // for FP HI tests greater than or unordered
1821     __ br(Assembler::HI, done);
1822     // install 0 for EQ otherwise ~0
1823     __ csinv(r0, zr, zr, Assembler::EQ);
1824 
1825   }
1826   __ bind(done);
1827 }
1828 
1829 void TemplateTable::branch(bool is_jsr, bool is_wide)
1830 {
1831   // We might be moving to a safepoint.  The thread which calls
1832   // Interpreter::notice_safepoints() will effectively flush its cache
1833   // when it makes a system call, but we need to do something to
1834   // ensure that we see the changed dispatch table.
1835   __ membar(MacroAssembler::LoadLoad);
1836 
1837   __ profile_taken_branch(r0, r1);
1838   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1839                              InvocationCounter::counter_offset();
1840   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1841                               InvocationCounter::counter_offset();
1842 
1843   // load branch displacement
1844   if (!is_wide) {
1845     __ ldrh(r2, at_bcp(1));
1846     __ rev16(r2, r2);
1847     // sign extend the 16 bit value in r2
1848     __ sbfm(r2, r2, 0, 15);
1849   } else {
1850     __ ldrw(r2, at_bcp(1));
1851     __ revw(r2, r2);
1852     // sign extend the 32 bit value in r2
1853     __ sbfm(r2, r2, 0, 31);
1854   }
1855 
1856   // Handle all the JSR stuff here, then exit.
1857   // It's much shorter and cleaner than intermingling with the non-JSR
1858   // normal-branch stuff occurring below.
1859 
1860   if (is_jsr) {
1861     // Pre-load the next target bytecode into rscratch1
1862     __ load_unsigned_byte(rscratch1, Address(rbcp, r2));
1863     // compute return address as bci
1864     __ ldr(rscratch2, Address(rmethod, Method::const_offset()));
1865     __ add(rscratch2, rscratch2,
1866            in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
1867     __ sub(r1, rbcp, rscratch2);
1868     __ push_i(r1);
1869     // Adjust the bcp by the 16-bit displacement in r2
1870     __ add(rbcp, rbcp, r2);
1871     __ dispatch_only(vtos, /*generate_poll*/true);
1872     return;
1873   }
1874 
1875   // Normal (non-jsr) branch handling
1876 
1877   // Adjust the bcp by the displacement in r2
1878   __ add(rbcp, rbcp, r2);
1879 
1880   assert(UseLoopCounter || !UseOnStackReplacement,
1881          "on-stack-replacement requires loop counters");
1882   Label backedge_counter_overflow;
1883   Label dispatch;
1884   if (UseLoopCounter) {
1885     // increment backedge counter for backward branches
1886     // r0: MDO
1887     // w1: MDO bumped taken-count
1888     // r2: target offset
1889     __ cmp(r2, zr);
1890     __ br(Assembler::GT, dispatch); // count only if backward branch
1891 
1892     // ECN: FIXME: This code smells
1893     // check if MethodCounters exists
1894     Label has_counters;
1895     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1896     __ cbnz(rscratch1, has_counters);
1897     __ push(r0);
1898     __ push(r1);
1899     __ push(r2);
1900     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
1901             InterpreterRuntime::build_method_counters), rmethod);
1902     __ pop(r2);
1903     __ pop(r1);
1904     __ pop(r0);
1905     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1906     __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
1907     __ bind(has_counters);
1908 
1909     Label no_mdo;
1910     int increment = InvocationCounter::count_increment;
1911     if (ProfileInterpreter) {
1912       // Are we profiling?
1913       __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1914       __ cbz(r1, no_mdo);
1915       // Increment the MDO backedge counter
1916       const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
1917                                          in_bytes(InvocationCounter::counter_offset()));
1918       const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
1919       __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1920                                  r0, rscratch1, false, Assembler::EQ,
1921                                  UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1922       __ b(dispatch);
1923     }
1924     __ bind(no_mdo);
1925     // Increment backedge counter in MethodCounters*
1926     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1927     const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
1928     __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
1929                                r0, rscratch2, false, Assembler::EQ,
1930                                UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1931     __ bind(dispatch);
1932   }
1933 
1934   // Pre-load the next target bytecode into rscratch1
1935   __ load_unsigned_byte(rscratch1, Address(rbcp, 0));
1936 
1937   // continue with the bytecode @ target
1938   // rscratch1: target bytecode
1939   // rbcp: target bcp
1940   __ dispatch_only(vtos, /*generate_poll*/true);
1941 
1942   if (UseLoopCounter && UseOnStackReplacement) {
1943     // invocation counter overflow
1944     __ bind(backedge_counter_overflow);
1945     __ neg(r2, r2);
1946     __ add(r2, r2, rbcp);     // branch bcp
1947     // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1948     __ call_VM(noreg,
1949                CAST_FROM_FN_PTR(address,
1950                                 InterpreterRuntime::frequency_counter_overflow),
1951                r2);
1952     __ load_unsigned_byte(r1, Address(rbcp, 0));  // restore target bytecode
1953 
1954     // r0: osr nmethod (osr ok) or NULL (osr not possible)
1955     // w1: target bytecode
1956     // r2: scratch
1957     __ cbz(r0, dispatch);     // test result -- no osr if null
1958     // nmethod may have been invalidated (VM may block upon call_VM return)
1959     __ ldrb(r2, Address(r0, nmethod::state_offset()));
1960     if (nmethod::in_use != 0)
1961       __ sub(r2, r2, nmethod::in_use);
1962     __ cbnz(r2, dispatch);
1963 
1964     // We have the address of an on stack replacement routine in r0
1965     // We need to prepare to execute the OSR method. First we must
1966     // migrate the locals and monitors off of the stack.
1967 
1968     __ mov(r19, r0);                             // save the nmethod
1969 
1970     call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1971 
1972     // r0 is OSR buffer, move it to expected parameter location
1973     __ mov(j_rarg0, r0);
1974 
1975     // remove activation
1976     // get sender esp
1977     __ ldr(esp,
1978         Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
1979     // remove frame anchor
1980     __ leave();
1981     // Ensure compiled code always sees stack at proper alignment
1982     __ andr(sp, esp, -16);
1983 
1984     // and begin the OSR nmethod
1985     __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
1986     __ br(rscratch1);
1987   }
1988 }
1989 
1990 
1991 void TemplateTable::if_0cmp(Condition cc)
1992 {
1993   transition(itos, vtos);
1994   // assume branch is more often taken than not (loops use backward branches)
1995   Label not_taken;
1996   if (cc == equal)
1997     __ cbnzw(r0, not_taken);
1998   else if (cc == not_equal)
1999     __ cbzw(r0, not_taken);
2000   else {
2001     __ andsw(zr, r0, r0);
2002     __ br(j_not(cc), not_taken);
2003   }
2004 
2005   branch(false, false);
2006   __ bind(not_taken);
2007   __ profile_not_taken_branch(r0);
2008 }
2009 
2010 void TemplateTable::if_icmp(Condition cc)
2011 {
2012   transition(itos, vtos);
2013   // assume branch is more often taken than not (loops use backward branches)
2014   Label not_taken;
2015   __ pop_i(r1);
2016   __ cmpw(r1, r0, Assembler::LSL);
2017   __ br(j_not(cc), not_taken);
2018   branch(false, false);
2019   __ bind(not_taken);
2020   __ profile_not_taken_branch(r0);
2021 }
2022 
2023 void TemplateTable::if_nullcmp(Condition cc)
2024 {
2025   transition(atos, vtos);
2026   // assume branch is more often taken than not (loops use backward branches)
2027   Label not_taken;
2028   if (cc == equal)
2029     __ cbnz(r0, not_taken);
2030   else
2031     __ cbz(r0, not_taken);
2032   branch(false, false);
2033   __ bind(not_taken);
2034   __ profile_not_taken_branch(r0);
2035 }
2036 
2037 void TemplateTable::if_acmp(Condition cc) {
2038   transition(atos, vtos);
2039   // assume branch is more often taken than not (loops use backward branches)
2040   Label taken, not_taken;
2041   __ pop_ptr(r1);
2042 
2043   __ profile_acmp(r2, r1, r0, r4);
2044 
2045   Register is_inline_type_mask = rscratch1;
2046   __ mov(is_inline_type_mask, markWord::inline_type_pattern);
2047 
2048   if (EnableValhalla) {
2049     __ cmp(r1, r0);
2050     __ br(Assembler::EQ, (cc == equal) ? taken : not_taken);
2051 
2052     // might be substitutable, test if either r0 or r1 is null
2053     __ andr(r2, r0, r1);
2054     __ cbz(r2, (cc == equal) ? not_taken : taken);
2055 
2056     // and both are values ?
2057     __ ldr(r2, Address(r1, oopDesc::mark_offset_in_bytes()));
2058     __ andr(r2, r2, is_inline_type_mask);
2059     __ ldr(r4, Address(r0, oopDesc::mark_offset_in_bytes()));
2060     __ andr(r4, r4, is_inline_type_mask);
2061     __ andr(r2, r2, r4);
2062     __ cmp(r2,  is_inline_type_mask);
2063     __ br(Assembler::NE, (cc == equal) ? not_taken : taken);
2064 
2065     // same value klass ?
2066     __ load_metadata(r2, r1);
2067     __ load_metadata(r4, r0);
2068     __ cmp(r2, r4);
2069     __ br(Assembler::NE, (cc == equal) ? not_taken : taken);
2070 
2071     // Know both are the same type, let's test for substitutability...
2072     if (cc == equal) {
2073       invoke_is_substitutable(r0, r1, taken, not_taken);
2074     } else {
2075       invoke_is_substitutable(r0, r1, not_taken, taken);
2076     }
2077     __ stop("Not reachable");
2078   }
2079 
2080   __ cmpoop(r1, r0);
2081   __ br(j_not(cc), not_taken);
2082   __ bind(taken);
2083   branch(false, false);
2084   __ bind(not_taken);
2085   __ profile_not_taken_branch(r0, true);
2086 }
2087 
2088 void TemplateTable::invoke_is_substitutable(Register aobj, Register bobj,
2089                                             Label& is_subst, Label& not_subst) {
2090 
2091   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::is_substitutable), aobj, bobj);
2092   // Restored... r0 answer, jmp to outcome...
2093   __ cbz(r0, not_subst);
2094   __ b(is_subst);
2095 }
2096 
2097 
2098 void TemplateTable::ret() {
2099   transition(vtos, vtos);
2100   // We might be moving to a safepoint.  The thread which calls
2101   // Interpreter::notice_safepoints() will effectively flush its cache
2102   // when it makes a system call, but we need to do something to
2103   // ensure that we see the changed dispatch table.
2104   __ membar(MacroAssembler::LoadLoad);
2105 
2106   locals_index(r1);
2107   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2108   __ profile_ret(r1, r2);
2109   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2110   __ lea(rbcp, Address(rbcp, r1));
2111   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2112   __ dispatch_next(vtos, 0, /*generate_poll*/true);
2113 }
2114 
2115 void TemplateTable::wide_ret() {
2116   transition(vtos, vtos);
2117   locals_index_wide(r1);
2118   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2119   __ profile_ret(r1, r2);
2120   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2121   __ lea(rbcp, Address(rbcp, r1));
2122   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2123   __ dispatch_next(vtos, 0, /*generate_poll*/true);
2124 }
2125 
2126 
2127 void TemplateTable::tableswitch() {
2128   Label default_case, continue_execution;
2129   transition(itos, vtos);
2130   // align rbcp
2131   __ lea(r1, at_bcp(BytesPerInt));
2132   __ andr(r1, r1, -BytesPerInt);
2133   // load lo & hi
2134   __ ldrw(r2, Address(r1, BytesPerInt));
2135   __ ldrw(r3, Address(r1, 2 * BytesPerInt));
2136   __ rev32(r2, r2);
2137   __ rev32(r3, r3);
2138   // check against lo & hi
2139   __ cmpw(r0, r2);
2140   __ br(Assembler::LT, default_case);
2141   __ cmpw(r0, r3);
2142   __ br(Assembler::GT, default_case);
2143   // lookup dispatch offset
2144   __ subw(r0, r0, r2);
2145   __ lea(r3, Address(r1, r0, Address::uxtw(2)));
2146   __ ldrw(r3, Address(r3, 3 * BytesPerInt));
2147   __ profile_switch_case(r0, r1, r2);
2148   // continue execution
2149   __ bind(continue_execution);
2150   __ rev32(r3, r3);
2151   __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
2152   __ add(rbcp, rbcp, r3, ext::sxtw);
2153   __ dispatch_only(vtos, /*generate_poll*/true);
2154   // handle default
2155   __ bind(default_case);
2156   __ profile_switch_default(r0);
2157   __ ldrw(r3, Address(r1, 0));
2158   __ b(continue_execution);
2159 }
2160 
2161 void TemplateTable::lookupswitch() {
2162   transition(itos, itos);
2163   __ stop("lookupswitch bytecode should have been rewritten");
2164 }
2165 
2166 void TemplateTable::fast_linearswitch() {
2167   transition(itos, vtos);
2168   Label loop_entry, loop, found, continue_execution;
2169   // bswap r0 so we can avoid bswapping the table entries
2170   __ rev32(r0, r0);
2171   // align rbcp
2172   __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2173                                     // this instruction (change offsets
2174                                     // below)
2175   __ andr(r19, r19, -BytesPerInt);
2176   // set counter
2177   __ ldrw(r1, Address(r19, BytesPerInt));
2178   __ rev32(r1, r1);
2179   __ b(loop_entry);
2180   // table search
2181   __ bind(loop);
2182   __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2183   __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt));
2184   __ cmpw(r0, rscratch1);
2185   __ br(Assembler::EQ, found);
2186   __ bind(loop_entry);
2187   __ subs(r1, r1, 1);
2188   __ br(Assembler::PL, loop);
2189   // default case
2190   __ profile_switch_default(r0);
2191   __ ldrw(r3, Address(r19, 0));
2192   __ b(continue_execution);
2193   // entry found -> get offset
2194   __ bind(found);
2195   __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2196   __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt));
2197   __ profile_switch_case(r1, r0, r19);
2198   // continue execution
2199   __ bind(continue_execution);
2200   __ rev32(r3, r3);
2201   __ add(rbcp, rbcp, r3, ext::sxtw);
2202   __ ldrb(rscratch1, Address(rbcp, 0));
2203   __ dispatch_only(vtos, /*generate_poll*/true);
2204 }
2205 
2206 void TemplateTable::fast_binaryswitch() {
2207   transition(itos, vtos);
2208   // Implementation using the following core algorithm:
2209   //
2210   // int binary_search(int key, LookupswitchPair* array, int n) {
2211   //   // Binary search according to "Methodik des Programmierens" by
2212   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2213   //   int i = 0;
2214   //   int j = n;
2215   //   while (i+1 < j) {
2216   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2217   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2218   //     // where a stands for the array and assuming that the (inexisting)
2219   //     // element a[n] is infinitely big.
2220   //     int h = (i + j) >> 1;
2221   //     // i < h < j
2222   //     if (key < array[h].fast_match()) {
2223   //       j = h;
2224   //     } else {
2225   //       i = h;
2226   //     }
2227   //   }
2228   //   // R: a[i] <= key < a[i+1] or Q
2229   //   // (i.e., if key is within array, i is the correct index)
2230   //   return i;
2231   // }
2232 
2233   // Register allocation
2234   const Register key   = r0; // already set (tosca)
2235   const Register array = r1;
2236   const Register i     = r2;
2237   const Register j     = r3;
2238   const Register h     = rscratch1;
2239   const Register temp  = rscratch2;
2240 
2241   // Find array start
2242   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2243                                           // get rid of this
2244                                           // instruction (change
2245                                           // offsets below)
2246   __ andr(array, array, -BytesPerInt);
2247 
2248   // Initialize i & j
2249   __ mov(i, 0);                            // i = 0;
2250   __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array);
2251 
2252   // Convert j into native byteordering
2253   __ rev32(j, j);
2254 
2255   // And start
2256   Label entry;
2257   __ b(entry);
2258 
2259   // binary search loop
2260   {
2261     Label loop;
2262     __ bind(loop);
2263     // int h = (i + j) >> 1;
2264     __ addw(h, i, j);                           // h = i + j;
2265     __ lsrw(h, h, 1);                                   // h = (i + j) >> 1;
2266     // if (key < array[h].fast_match()) {
2267     //   j = h;
2268     // } else {
2269     //   i = h;
2270     // }
2271     // Convert array[h].match to native byte-ordering before compare
2272     __ ldr(temp, Address(array, h, Address::lsl(3)));
2273     __ rev32(temp, temp);
2274     __ cmpw(key, temp);
2275     // j = h if (key <  array[h].fast_match())
2276     __ csel(j, h, j, Assembler::LT);
2277     // i = h if (key >= array[h].fast_match())
2278     __ csel(i, h, i, Assembler::GE);
2279     // while (i+1 < j)
2280     __ bind(entry);
2281     __ addw(h, i, 1);          // i+1
2282     __ cmpw(h, j);             // i+1 < j
2283     __ br(Assembler::LT, loop);
2284   }
2285 
2286   // end of binary search, result index is i (must check again!)
2287   Label default_case;
2288   // Convert array[i].match to native byte-ordering before compare
2289   __ ldr(temp, Address(array, i, Address::lsl(3)));
2290   __ rev32(temp, temp);
2291   __ cmpw(key, temp);
2292   __ br(Assembler::NE, default_case);
2293 
2294   // entry found -> j = offset
2295   __ add(j, array, i, ext::uxtx, 3);
2296   __ ldrw(j, Address(j, BytesPerInt));
2297   __ profile_switch_case(i, key, array);
2298   __ rev32(j, j);
2299   __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2300   __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2301   __ dispatch_only(vtos, /*generate_poll*/true);
2302 
2303   // default case -> j = default offset
2304   __ bind(default_case);
2305   __ profile_switch_default(i);
2306   __ ldrw(j, Address(array, -2 * BytesPerInt));
2307   __ rev32(j, j);
2308   __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2309   __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2310   __ dispatch_only(vtos, /*generate_poll*/true);
2311 }
2312 
2313 
2314 void TemplateTable::_return(TosState state)
2315 {
2316   transition(state, state);
2317   assert(_desc->calls_vm(),
2318          "inconsistent calls_vm information"); // call in remove_activation
2319 
2320   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2321     assert(state == vtos, "only valid state");
2322 
2323     __ ldr(c_rarg1, aaddress(0));
2324     __ load_klass(r3, c_rarg1);
2325     __ ldrw(r3, Address(r3, Klass::access_flags_offset()));
2326     Label skip_register_finalizer;
2327     __ tbz(r3, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2328 
2329     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2330 
2331     __ bind(skip_register_finalizer);
2332   }
2333 
2334   // Issue a StoreStore barrier after all stores but before return
2335   // from any constructor for any class with a final field.  We don't
2336   // know if this is a finalizer, so we always do so.
2337   if (_desc->bytecode() == Bytecodes::_return)
2338     __ membar(MacroAssembler::StoreStore);
2339 
2340   // Narrow result if state is itos but result type is smaller.
2341   // Need to narrow in the return bytecode rather than in generate_return_entry
2342   // since compiled code callers expect the result to already be narrowed.
2343   if (state == itos) {
2344     __ narrow(r0);
2345   }
2346 
2347   __ remove_activation(state);
2348   __ ret(lr);
2349 }
2350 
2351 // ----------------------------------------------------------------------------
2352 // Volatile variables demand their effects be made known to all CPU's
2353 // in order.  Store buffers on most chips allow reads & writes to
2354 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2355 // without some kind of memory barrier (i.e., it's not sufficient that
2356 // the interpreter does not reorder volatile references, the hardware
2357 // also must not reorder them).
2358 //
2359 // According to the new Java Memory Model (JMM):
2360 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2361 //     writes act as acquire & release, so:
2362 // (2) A read cannot let unrelated NON-volatile memory refs that
2363 //     happen after the read float up to before the read.  It's OK for
2364 //     non-volatile memory refs that happen before the volatile read to
2365 //     float down below it.
2366 // (3) Similar a volatile write cannot let unrelated NON-volatile
2367 //     memory refs that happen BEFORE the write float down to after the
2368 //     write.  It's OK for non-volatile memory refs that happen after the
2369 //     volatile write to float up before it.
2370 //
2371 // We only put in barriers around volatile refs (they are expensive),
2372 // not _between_ memory refs (that would require us to track the
2373 // flavor of the previous memory refs).  Requirements (2) and (3)
2374 // require some barriers before volatile stores and after volatile
2375 // loads.  These nearly cover requirement (1) but miss the
2376 // volatile-store-volatile-load case.  This final case is placed after
2377 // volatile-stores although it could just as well go before
2378 // volatile-loads.
2379 
2380 void TemplateTable::resolve_cache_and_index(int byte_no,
2381                                             Register Rcache,
2382                                             Register index,
2383                                             size_t index_size) {
2384   const Register temp = r19;
2385   assert_different_registers(Rcache, index, temp);
2386 
2387   Label resolved, clinit_barrier_slow;
2388 
2389   Bytecodes::Code code = bytecode();
2390   switch (code) {
2391   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2392   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2393   default: break;
2394   }
2395 
2396   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2397   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2398   __ subs(zr, temp, (int) code);  // have we resolved this bytecode?
2399   __ br(Assembler::EQ, resolved);
2400 
2401   // resolve first time through
2402   // Class initialization barrier slow path lands here as well.
2403   __ bind(clinit_barrier_slow);
2404   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2405   __ mov(temp, (int) code);
2406   __ call_VM(noreg, entry, temp);
2407 
2408   // Update registers with resolved info
2409   __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2410   // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2411   // so all clients ofthis method must be modified accordingly
2412   __ bind(resolved);
2413 
2414   // Class initialization barrier for static methods
2415   if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2416     __ load_resolved_method_at_index(byte_no, temp, Rcache);
2417     __ load_method_holder(temp, temp);
2418     __ clinit_barrier(temp, rscratch1, NULL, &clinit_barrier_slow);
2419   }
2420 }
2421 
2422 // The Rcache and index registers must be set before call
2423 // n.b unlike x86 cache already includes the index offset
2424 void TemplateTable::load_field_cp_cache_entry(Register obj,
2425                                               Register cache,
2426                                               Register index,
2427                                               Register off,
2428                                               Register flags,
2429                                               bool is_static = false) {
2430   assert_different_registers(cache, index, flags, off);
2431 
2432   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2433   // Field offset
2434   __ ldr(off, Address(cache, in_bytes(cp_base_offset +
2435                                       ConstantPoolCacheEntry::f2_offset())));
2436   // Flags
2437   __ ldrw(flags, Address(cache, in_bytes(cp_base_offset +
2438                                          ConstantPoolCacheEntry::flags_offset())));
2439 
2440   // klass overwrite register
2441   if (is_static) {
2442     __ ldr(obj, Address(cache, in_bytes(cp_base_offset +
2443                                         ConstantPoolCacheEntry::f1_offset())));
2444     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2445     __ ldr(obj, Address(obj, mirror_offset));
2446     __ resolve_oop_handle(obj, r5, rscratch2);
2447   }
2448 }
2449 
2450 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2451                                                Register method,
2452                                                Register itable_index,
2453                                                Register flags,
2454                                                bool is_invokevirtual,
2455                                                bool is_invokevfinal, /*unused*/
2456                                                bool is_invokedynamic) {
2457   // setup registers
2458   const Register cache = rscratch2;
2459   const Register index = r4;
2460   assert_different_registers(method, flags);
2461   assert_different_registers(method, cache, index);
2462   assert_different_registers(itable_index, flags);
2463   assert_different_registers(itable_index, cache, index);
2464   // determine constant pool cache field offsets
2465   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2466   const int method_offset = in_bytes(
2467     ConstantPoolCache::base_offset() +
2468       (is_invokevirtual
2469        ? ConstantPoolCacheEntry::f2_offset()
2470        : ConstantPoolCacheEntry::f1_offset()));
2471   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2472                                     ConstantPoolCacheEntry::flags_offset());
2473   // access constant pool cache fields
2474   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2475                                     ConstantPoolCacheEntry::f2_offset());
2476 
2477   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2478   resolve_cache_and_index(byte_no, cache, index, index_size);
2479   __ ldr(method, Address(cache, method_offset));
2480 
2481   if (itable_index != noreg) {
2482     __ ldr(itable_index, Address(cache, index_offset));
2483   }
2484   __ ldrw(flags, Address(cache, flags_offset));
2485 }
2486 
2487 
2488 // The registers cache and index expected to be set before call.
2489 // Correct values of the cache and index registers are preserved.
2490 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2491                                             bool is_static, bool has_tos) {
2492   // do the JVMTI work here to avoid disturbing the register state below
2493   // We use c_rarg registers here because we want to use the register used in
2494   // the call to the VM
2495   if (JvmtiExport::can_post_field_access()) {
2496     // Check to see if a field access watch has been set before we
2497     // take the time to call into the VM.
2498     Label L1;
2499     assert_different_registers(cache, index, r0);
2500     __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2501     __ ldrw(r0, Address(rscratch1));
2502     __ cbzw(r0, L1);
2503 
2504     __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2505     __ lea(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset())));
2506 
2507     if (is_static) {
2508       __ mov(c_rarg1, zr); // NULL object reference
2509     } else {
2510       __ ldr(c_rarg1, at_tos()); // get object pointer without popping it
2511       __ verify_oop(c_rarg1);
2512     }
2513     // c_rarg1: object pointer or NULL
2514     // c_rarg2: cache entry pointer
2515     // c_rarg3: jvalue object on the stack
2516     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2517                                        InterpreterRuntime::post_field_access),
2518                c_rarg1, c_rarg2, c_rarg3);
2519     __ get_cache_and_index_at_bcp(cache, index, 1);
2520     __ bind(L1);
2521   }
2522 }
2523 
2524 void TemplateTable::pop_and_check_object(Register r)
2525 {
2526   __ pop_ptr(r);
2527   __ null_check(r);  // for field access must check obj.
2528   __ verify_oop(r);
2529 }
2530 
2531 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2532 {
2533   const Register cache = r2;
2534   const Register index = r3;
2535   const Register obj   = r4;
2536   const Register klass = r5;
2537   const Register inline_klass = r7;
2538   const Register off   = r19;
2539   const Register flags = r0;
2540   const Register raw_flags = r6;
2541   const Register bc    = r4; // uses same reg as obj, so don't mix them
2542 
2543   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2544   jvmti_post_field_access(cache, index, is_static, false);
2545   load_field_cp_cache_entry(obj, cache, index, off, raw_flags, is_static);
2546 
2547   if (!is_static) {
2548     // obj is on the stack
2549     pop_and_check_object(obj);
2550   }
2551 
2552   // 8179954: We need to make sure that the code generated for
2553   // volatile accesses forms a sequentially-consistent set of
2554   // operations when combined with STLR and LDAR.  Without a leading
2555   // membar it's possible for a simple Dekker test to fail if loads
2556   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
2557   // the stores in one method and we interpret the loads in another.
2558   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){
2559     Label notVolatile;
2560     __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2561     __ membar(MacroAssembler::AnyAny);
2562     __ bind(notVolatile);
2563   }
2564 
2565   const Address field(obj, off);
2566 
2567   Label Done, notByte, notBool, notInt, notShort, notChar,
2568               notLong, notFloat, notObj, notDouble;
2569 
2570   if (!is_static) {
2571     __ ldr(klass, Address(cache, in_bytes(ConstantPoolCache::base_offset() +
2572                                           ConstantPoolCacheEntry::f1_offset())));
2573   }
2574 
2575   // x86 uses a shift and mask or wings it with a shift plus assert
2576   // the mask is not needed. aarch64 just uses bitfield extract
2577   __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
2578 
2579   assert(btos == 0, "change code, btos != 0");
2580   __ cbnz(flags, notByte);
2581 
2582   // Don't rewrite getstatic, only getfield
2583   if (is_static) rc = may_not_rewrite;
2584 
2585   // btos
2586   __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2587   __ push(btos);
2588   // Rewrite bytecode to be faster
2589   if (rc == may_rewrite) {
2590     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2591   }
2592   __ b(Done);
2593 
2594   __ bind(notByte);
2595   __ cmp(flags, (u1)ztos);
2596   __ br(Assembler::NE, notBool);
2597 
2598   // ztos (same code as btos)
2599   __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2600   __ push(ztos);
2601   // Rewrite bytecode to be faster
2602   if (rc == may_rewrite) {
2603     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2604     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2605   }
2606   __ b(Done);
2607 
2608   __ bind(notBool);
2609   __ cmp(flags, (u1)atos);
2610   __ br(Assembler::NE, notObj);
2611   // atos
2612   if (!EnablePrimitiveClasses) {
2613     do_oop_load(_masm, field, r0, IN_HEAP);
2614     __ push(atos);
2615     if (rc == may_rewrite) {
2616       patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2617     }
2618     __ b(Done);
2619   } else { // Valhalla
2620     if (is_static) {
2621       __ load_heap_oop(r0, field, rscratch1, rscratch2);
2622       Label is_null_free_inline_type, uninitialized;
2623       // Issue below if the static field has not been initialized yet
2624       __ test_field_is_null_free_inline_type(raw_flags, noreg /*temp*/, is_null_free_inline_type);
2625         // field is not a null free inline type
2626         __ push(atos);
2627         __ b(Done);
2628       // field is a null free inline type, must not return null even if uninitialized
2629       __ bind(is_null_free_inline_type);
2630         __ cbz(r0, uninitialized);
2631           __ push(atos);
2632           __ b(Done);
2633         __ bind(uninitialized);
2634           __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2635           Label slow_case, finish;
2636           __ ldrb(rscratch1, Address(cache, InstanceKlass::init_state_offset()));
2637           __ cmp(rscratch1, (u1)InstanceKlass::fully_initialized);
2638           __ br(Assembler::NE, slow_case);
2639           __ get_default_value_oop(klass, off /* temp */, r0);
2640         __ b(finish);
2641         __ bind(slow_case);
2642           __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_inline_type_field), obj, raw_flags);
2643           __ bind(finish);
2644           __ verify_oop(r0);
2645           __ push(atos);
2646           __ b(Done);
2647     } else {
2648       Label is_inlined, nonnull, is_inline_type, rewrite_inline;
2649       __ test_field_is_null_free_inline_type(raw_flags, noreg /*temp*/, is_inline_type);
2650         // Non-inline field case
2651         __ load_heap_oop(r0, field, rscratch1, rscratch2);
2652         __ push(atos);
2653         if (rc == may_rewrite) {
2654           patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2655         }
2656         __ b(Done);
2657       __ bind(is_inline_type);
2658         __ test_field_is_inlined(raw_flags, noreg /* temp */, is_inlined);
2659          // field is not inlined
2660           __ load_heap_oop(r0, field, rscratch1, rscratch2);
2661           __ cbnz(r0, nonnull);
2662             __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2663             __ get_inline_type_field_klass(klass, raw_flags, inline_klass);
2664             __ get_default_value_oop(inline_klass, klass /* temp */, r0);
2665           __ bind(nonnull);
2666           __ verify_oop(r0);
2667           __ push(atos);
2668           __ b(rewrite_inline);
2669         __ bind(is_inlined);
2670         // field is inlined
2671           __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2672           __ mov(r0, obj);
2673           __ read_inlined_field(klass, raw_flags, off, inline_klass /* temp */, r0);
2674           __ verify_oop(r0);
2675           __ push(atos);
2676       __ bind(rewrite_inline);
2677       if (rc == may_rewrite) {
2678         patch_bytecode(Bytecodes::_fast_qgetfield, bc, r1);
2679       }
2680       __ b(Done);
2681     }
2682   }
2683 
2684   __ bind(notObj);
2685   __ cmp(flags, (u1)itos);
2686   __ br(Assembler::NE, notInt);
2687   // itos
2688   __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2689   __ push(itos);
2690   // Rewrite bytecode to be faster
2691   if (rc == may_rewrite) {
2692     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2693   }
2694   __ b(Done);
2695 
2696   __ bind(notInt);
2697   __ cmp(flags, (u1)ctos);
2698   __ br(Assembler::NE, notChar);
2699   // ctos
2700   __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2701   __ push(ctos);
2702   // Rewrite bytecode to be faster
2703   if (rc == may_rewrite) {
2704     patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2705   }
2706   __ b(Done);
2707 
2708   __ bind(notChar);
2709   __ cmp(flags, (u1)stos);
2710   __ br(Assembler::NE, notShort);
2711   // stos
2712   __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
2713   __ push(stos);
2714   // Rewrite bytecode to be faster
2715   if (rc == may_rewrite) {
2716     patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2717   }
2718   __ b(Done);
2719 
2720   __ bind(notShort);
2721   __ cmp(flags, (u1)ltos);
2722   __ br(Assembler::NE, notLong);
2723   // ltos
2724   __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
2725   __ push(ltos);
2726   // Rewrite bytecode to be faster
2727   if (rc == may_rewrite) {
2728     patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2729   }
2730   __ b(Done);
2731 
2732   __ bind(notLong);
2733   __ cmp(flags, (u1)ftos);
2734   __ br(Assembler::NE, notFloat);
2735   // ftos
2736   __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2737   __ push(ftos);
2738   // Rewrite bytecode to be faster
2739   if (rc == may_rewrite) {
2740     patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2741   }
2742   __ b(Done);
2743 
2744   __ bind(notFloat);
2745 #ifdef ASSERT
2746   __ cmp(flags, (u1)dtos);
2747   __ br(Assembler::NE, notDouble);
2748 #endif
2749   // dtos
2750   __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2751   __ push(dtos);
2752   // Rewrite bytecode to be faster
2753   if (rc == may_rewrite) {
2754     patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2755   }
2756 #ifdef ASSERT
2757   __ b(Done);
2758 
2759   __ bind(notDouble);
2760   __ stop("Bad state");
2761 #endif
2762 
2763   __ bind(Done);
2764 
2765   Label notVolatile;
2766   __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2767   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2768   __ bind(notVolatile);
2769 }
2770 
2771 
2772 void TemplateTable::getfield(int byte_no)
2773 {
2774   getfield_or_static(byte_no, false);
2775 }
2776 
2777 void TemplateTable::nofast_getfield(int byte_no) {
2778   getfield_or_static(byte_no, false, may_not_rewrite);
2779 }
2780 
2781 void TemplateTable::getstatic(int byte_no)
2782 {
2783   getfield_or_static(byte_no, true);
2784 }
2785 
2786 // The registers cache and index expected to be set before call.
2787 // The function may destroy various registers, just not the cache and index registers.
2788 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2789   transition(vtos, vtos);
2790 
2791   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2792 
2793   if (JvmtiExport::can_post_field_modification()) {
2794     // Check to see if a field modification watch has been set before
2795     // we take the time to call into the VM.
2796     Label L1;
2797     assert_different_registers(cache, index, r0);
2798     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2799     __ ldrw(r0, Address(rscratch1));
2800     __ cbz(r0, L1);
2801 
2802     __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2803 
2804     if (is_static) {
2805       // Life is simple.  Null out the object pointer.
2806       __ mov(c_rarg1, zr);
2807     } else {
2808       // Life is harder. The stack holds the value on top, followed by
2809       // the object.  We don't know the size of the value, though; it
2810       // could be one or two words depending on its type. As a result,
2811       // we must find the type to determine where the object is.
2812       __ ldrw(c_rarg3, Address(c_rarg2,
2813                                in_bytes(cp_base_offset +
2814                                         ConstantPoolCacheEntry::flags_offset())));
2815       __ lsr(c_rarg3, c_rarg3,
2816              ConstantPoolCacheEntry::tos_state_shift);
2817       ConstantPoolCacheEntry::verify_tos_state_shift();
2818       Label nope2, done, ok;
2819       __ ldr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
2820       __ cmpw(c_rarg3, ltos);
2821       __ br(Assembler::EQ, ok);
2822       __ cmpw(c_rarg3, dtos);
2823       __ br(Assembler::NE, nope2);
2824       __ bind(ok);
2825       __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2826       __ bind(nope2);
2827     }
2828     // cache entry pointer
2829     __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
2830     // object (tos)
2831     __ mov(c_rarg3, esp);
2832     // c_rarg1: object pointer set up above (NULL if static)
2833     // c_rarg2: cache entry pointer
2834     // c_rarg3: jvalue object on the stack
2835     __ call_VM(noreg,
2836                CAST_FROM_FN_PTR(address,
2837                                 InterpreterRuntime::post_field_modification),
2838                c_rarg1, c_rarg2, c_rarg3);
2839     __ get_cache_and_index_at_bcp(cache, index, 1);
2840     __ bind(L1);
2841   }
2842 }
2843 
2844 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2845   transition(vtos, vtos);
2846 
2847   const Register cache = r2;
2848   const Register index = r3;
2849   const Register obj   = r2;
2850   const Register off   = r19;
2851   const Register flags = r0;
2852   const Register flags2 = r6;
2853   const Register bc    = r4;
2854   const Register inline_klass = r5;
2855 
2856   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2857   jvmti_post_field_mod(cache, index, is_static);
2858   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2859 
2860   Label Done;
2861   __ mov(r5, flags);
2862 
2863   {
2864     Label notVolatile;
2865     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2866     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2867     __ bind(notVolatile);
2868   }
2869 
2870   // field address
2871   const Address field(obj, off);
2872 
2873   Label notByte, notBool, notInt, notShort, notChar,
2874         notLong, notFloat, notObj, notDouble;
2875 
2876   __ mov(flags2, flags);
2877 
2878   // x86 uses a shift and mask or wings it with a shift plus assert
2879   // the mask is not needed. aarch64 just uses bitfield extract
2880   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2881 
2882   assert(btos == 0, "change code, btos != 0");
2883   __ cbnz(flags, notByte);
2884 
2885   // Don't rewrite putstatic, only putfield
2886   if (is_static) rc = may_not_rewrite;
2887 
2888   // btos
2889   {
2890     __ pop(btos);
2891     if (!is_static) pop_and_check_object(obj);
2892     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg);
2893     if (rc == may_rewrite) {
2894       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2895     }
2896     __ b(Done);
2897   }
2898 
2899   __ bind(notByte);
2900   __ cmp(flags, (u1)ztos);
2901   __ br(Assembler::NE, notBool);
2902 
2903   // ztos
2904   {
2905     __ pop(ztos);
2906     if (!is_static) pop_and_check_object(obj);
2907     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg);
2908     if (rc == may_rewrite) {
2909       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2910     }
2911     __ b(Done);
2912   }
2913 
2914   __ bind(notBool);
2915   __ cmp(flags, (u1)atos);
2916   __ br(Assembler::NE, notObj);
2917 
2918   // atos
2919   {
2920      if (!EnablePrimitiveClasses) {
2921       __ pop(atos);
2922       if (!is_static) pop_and_check_object(obj);
2923       // Store into the field
2924       do_oop_store(_masm, field, r0, IN_HEAP);
2925       if (rc == may_rewrite) {
2926         patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2927       }
2928       __ b(Done);
2929      } else { // Valhalla
2930       __ pop(atos);
2931       if (is_static) {
2932         Label is_inline_type;
2933          __ test_field_is_not_null_free_inline_type(flags2, noreg /* temp */, is_inline_type);
2934          __ null_check(r0);
2935          __ bind(is_inline_type);
2936          do_oop_store(_masm, field, r0, IN_HEAP);
2937          __ b(Done);
2938       } else {
2939         Label is_inline_type, is_inlined, rewrite_not_inline, rewrite_inline;
2940         __ test_field_is_null_free_inline_type(flags2, noreg /*temp*/, is_inline_type);
2941         // Not an inline type
2942         pop_and_check_object(obj);
2943         // Store into the field
2944         do_oop_store(_masm, field, r0, IN_HEAP);
2945         __ bind(rewrite_not_inline);
2946         if (rc == may_rewrite) {
2947           patch_bytecode(Bytecodes::_fast_aputfield, bc, r19, true, byte_no);
2948         }
2949         __ b(Done);
2950         // Implementation of the inline type semantic
2951         __ bind(is_inline_type);
2952         __ null_check(r0);
2953         __ test_field_is_inlined(flags2, noreg /*temp*/, is_inlined);
2954         // field is not inlined
2955         pop_and_check_object(obj);
2956         // Store into the field
2957         do_oop_store(_masm, field, r0, IN_HEAP);
2958         __ b(rewrite_inline);
2959         __ bind(is_inlined);
2960         // field is inlined
2961         pop_and_check_object(obj);
2962         assert_different_registers(r0, inline_klass, obj, off);
2963         __ load_klass(inline_klass, r0);
2964         __ data_for_oop(r0, r0, inline_klass);
2965         __ add(obj, obj, off);
2966         __ access_value_copy(IN_HEAP, r0, obj, inline_klass);
2967         __ bind(rewrite_inline);
2968         if (rc == may_rewrite) {
2969           patch_bytecode(Bytecodes::_fast_qputfield, bc, r19, true, byte_no);
2970         }
2971         __ b(Done);
2972       }
2973      }  // Valhalla
2974   }
2975 
2976   __ bind(notObj);
2977   __ cmp(flags, (u1)itos);
2978   __ br(Assembler::NE, notInt);
2979 
2980   // itos
2981   {
2982     __ pop(itos);
2983     if (!is_static) pop_and_check_object(obj);
2984     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg);
2985     if (rc == may_rewrite) {
2986       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2987     }
2988     __ b(Done);
2989   }
2990 
2991   __ bind(notInt);
2992   __ cmp(flags, (u1)ctos);
2993   __ br(Assembler::NE, notChar);
2994 
2995   // ctos
2996   {
2997     __ pop(ctos);
2998     if (!is_static) pop_and_check_object(obj);
2999     __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg);
3000     if (rc == may_rewrite) {
3001       patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
3002     }
3003     __ b(Done);
3004   }
3005 
3006   __ bind(notChar);
3007   __ cmp(flags, (u1)stos);
3008   __ br(Assembler::NE, notShort);
3009 
3010   // stos
3011   {
3012     __ pop(stos);
3013     if (!is_static) pop_and_check_object(obj);
3014     __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg);
3015     if (rc == may_rewrite) {
3016       patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
3017     }
3018     __ b(Done);
3019   }
3020 
3021   __ bind(notShort);
3022   __ cmp(flags, (u1)ltos);
3023   __ br(Assembler::NE, notLong);
3024 
3025   // ltos
3026   {
3027     __ pop(ltos);
3028     if (!is_static) pop_and_check_object(obj);
3029     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
3030     if (rc == may_rewrite) {
3031       patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
3032     }
3033     __ b(Done);
3034   }
3035 
3036   __ bind(notLong);
3037   __ cmp(flags, (u1)ftos);
3038   __ br(Assembler::NE, notFloat);
3039 
3040   // ftos
3041   {
3042     __ pop(ftos);
3043     if (!is_static) pop_and_check_object(obj);
3044     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3045     if (rc == may_rewrite) {
3046       patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
3047     }
3048     __ b(Done);
3049   }
3050 
3051   __ bind(notFloat);
3052 #ifdef ASSERT
3053   __ cmp(flags, (u1)dtos);
3054   __ br(Assembler::NE, notDouble);
3055 #endif
3056 
3057   // dtos
3058   {
3059     __ pop(dtos);
3060     if (!is_static) pop_and_check_object(obj);
3061     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg);
3062     if (rc == may_rewrite) {
3063       patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
3064     }
3065   }
3066 
3067 #ifdef ASSERT
3068   __ b(Done);
3069 
3070   __ bind(notDouble);
3071   __ stop("Bad state");
3072 #endif
3073 
3074   __ bind(Done);
3075 
3076   {
3077     Label notVolatile;
3078     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3079     __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
3080     __ bind(notVolatile);
3081   }
3082 }
3083 
3084 void TemplateTable::putfield(int byte_no)
3085 {
3086   putfield_or_static(byte_no, false);
3087 }
3088 
3089 void TemplateTable::nofast_putfield(int byte_no) {
3090   putfield_or_static(byte_no, false, may_not_rewrite);
3091 }
3092 
3093 void TemplateTable::putstatic(int byte_no) {
3094   putfield_or_static(byte_no, true);
3095 }
3096 
3097 void TemplateTable::jvmti_post_fast_field_mod()
3098 {
3099   if (JvmtiExport::can_post_field_modification()) {
3100     // Check to see if a field modification watch has been set before
3101     // we take the time to call into the VM.
3102     Label L2;
3103     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3104     __ ldrw(c_rarg3, Address(rscratch1));
3105     __ cbzw(c_rarg3, L2);
3106     __ pop_ptr(r19);                  // copy the object pointer from tos
3107     __ verify_oop(r19);
3108     __ push_ptr(r19);                 // put the object pointer back on tos
3109     // Save tos values before call_VM() clobbers them. Since we have
3110     // to do it for every data type, we use the saved values as the
3111     // jvalue object.
3112     switch (bytecode()) {          // load values into the jvalue object
3113     case Bytecodes::_fast_qputfield: //fall through
3114     case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
3115     case Bytecodes::_fast_bputfield: // fall through
3116     case Bytecodes::_fast_zputfield: // fall through
3117     case Bytecodes::_fast_sputfield: // fall through
3118     case Bytecodes::_fast_cputfield: // fall through
3119     case Bytecodes::_fast_iputfield: __ push_i(r0); break;
3120     case Bytecodes::_fast_dputfield: __ push_d(); break;
3121     case Bytecodes::_fast_fputfield: __ push_f(); break;
3122     case Bytecodes::_fast_lputfield: __ push_l(r0); break;
3123 
3124     default:
3125       ShouldNotReachHere();
3126     }
3127     __ mov(c_rarg3, esp);             // points to jvalue on the stack
3128     // access constant pool cache entry
3129     __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
3130     __ verify_oop(r19);
3131     // r19: object pointer copied above
3132     // c_rarg2: cache entry pointer
3133     // c_rarg3: jvalue object on the stack
3134     __ call_VM(noreg,
3135                CAST_FROM_FN_PTR(address,
3136                                 InterpreterRuntime::post_field_modification),
3137                r19, c_rarg2, c_rarg3);
3138 
3139     switch (bytecode()) {             // restore tos values
3140     case Bytecodes::_fast_qputfield: //fall through
3141     case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
3142     case Bytecodes::_fast_bputfield: // fall through
3143     case Bytecodes::_fast_zputfield: // fall through
3144     case Bytecodes::_fast_sputfield: // fall through
3145     case Bytecodes::_fast_cputfield: // fall through
3146     case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
3147     case Bytecodes::_fast_dputfield: __ pop_d(); break;
3148     case Bytecodes::_fast_fputfield: __ pop_f(); break;
3149     case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
3150     default: break;
3151     }
3152     __ bind(L2);
3153   }
3154 }
3155 
3156 void TemplateTable::fast_storefield(TosState state)
3157 {
3158   transition(state, vtos);
3159 
3160   ByteSize base = ConstantPoolCache::base_offset();
3161 
3162   jvmti_post_fast_field_mod();
3163 
3164   // access constant pool cache
3165   __ get_cache_and_index_at_bcp(r2, r1, 1);
3166 
3167   // Must prevent reordering of the following cp cache loads with bytecode load
3168   __ membar(MacroAssembler::LoadLoad);
3169 
3170   // test for volatile with r3
3171   __ ldrw(r3, Address(r2, in_bytes(base +
3172                                    ConstantPoolCacheEntry::flags_offset())));
3173 
3174   // replace index with field offset from cache entry
3175   __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3176 
3177   {
3178     Label notVolatile;
3179     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3180     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
3181     __ bind(notVolatile);
3182   }
3183 
3184   Label notVolatile;
3185 
3186   // Get object from stack
3187   pop_and_check_object(r2);
3188 
3189   // field address
3190   const Address field(r2, r1);
3191 
3192   // access field
3193   switch (bytecode()) {
3194   case Bytecodes::_fast_qputfield: //fall through
3195    {
3196       Label is_inlined, done;
3197       __ null_check(r0);
3198       __ test_field_is_inlined(r3, noreg /* temp */, is_inlined);
3199       // field is not inlined
3200       do_oop_store(_masm, field, r0, IN_HEAP);
3201       __ b(done);
3202       __ bind(is_inlined);
3203       // field is inlined
3204       __ load_klass(r4, r0);
3205       __ data_for_oop(r0, r0, r4);
3206       __ lea(rscratch1, field);
3207       __ access_value_copy(IN_HEAP, r0, rscratch1, r4);
3208       __ bind(done);
3209     }
3210     break;
3211   case Bytecodes::_fast_aputfield:
3212     do_oop_store(_masm, field, r0, IN_HEAP);
3213     break;
3214   case Bytecodes::_fast_lputfield:
3215     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg);
3216     break;
3217   case Bytecodes::_fast_iputfield:
3218     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg);
3219     break;
3220   case Bytecodes::_fast_zputfield:
3221     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg);
3222     break;
3223   case Bytecodes::_fast_bputfield:
3224     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg);
3225     break;
3226   case Bytecodes::_fast_sputfield:
3227     __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg);
3228     break;
3229   case Bytecodes::_fast_cputfield:
3230     __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg);
3231     break;
3232   case Bytecodes::_fast_fputfield:
3233     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg);
3234     break;
3235   case Bytecodes::_fast_dputfield:
3236     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg);
3237     break;
3238   default:
3239     ShouldNotReachHere();
3240   }
3241 
3242   {
3243     Label notVolatile;
3244     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3245     __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
3246     __ bind(notVolatile);
3247   }
3248 }
3249 
3250 
3251 void TemplateTable::fast_accessfield(TosState state)
3252 {
3253   transition(atos, state);
3254   // Do the JVMTI work here to avoid disturbing the register state below
3255   if (JvmtiExport::can_post_field_access()) {
3256     // Check to see if a field access watch has been set before we
3257     // take the time to call into the VM.
3258     Label L1;
3259     __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3260     __ ldrw(r2, Address(rscratch1));
3261     __ cbzw(r2, L1);
3262     // access constant pool cache entry
3263     __ get_cache_entry_pointer_at_bcp(c_rarg2, rscratch2, 1);
3264     __ verify_oop(r0);
3265     __ push_ptr(r0);  // save object pointer before call_VM() clobbers it
3266     __ mov(c_rarg1, r0);
3267     // c_rarg1: object pointer copied above
3268     // c_rarg2: cache entry pointer
3269     __ call_VM(noreg,
3270                CAST_FROM_FN_PTR(address,
3271                                 InterpreterRuntime::post_field_access),
3272                c_rarg1, c_rarg2);
3273     __ pop_ptr(r0); // restore object pointer
3274     __ bind(L1);
3275   }
3276 
3277   // access constant pool cache
3278   __ get_cache_and_index_at_bcp(r2, r1, 1);
3279 
3280   // Must prevent reordering of the following cp cache loads with bytecode load
3281   __ membar(MacroAssembler::LoadLoad);
3282 
3283   __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3284                                   ConstantPoolCacheEntry::f2_offset())));
3285   __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3286                                    ConstantPoolCacheEntry::flags_offset())));
3287 
3288   // r0: object
3289   __ verify_oop(r0);
3290   __ null_check(r0);
3291   const Address field(r0, r1);
3292 
3293   // 8179954: We need to make sure that the code generated for
3294   // volatile accesses forms a sequentially-consistent set of
3295   // operations when combined with STLR and LDAR.  Without a leading
3296   // membar it's possible for a simple Dekker test to fail if loads
3297   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3298   // the stores in one method and we interpret the loads in another.
3299   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3300     Label notVolatile;
3301     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3302     __ membar(MacroAssembler::AnyAny);
3303     __ bind(notVolatile);
3304   }
3305 
3306   // access field
3307   switch (bytecode()) {
3308   case Bytecodes::_fast_qgetfield:
3309     {
3310       Register index = r4, klass = r5, inline_klass = r6, tmp = r7;
3311       Label is_inlined, nonnull, Done;
3312       __ test_field_is_inlined(r3, noreg /* temp */, is_inlined);
3313         // field is not inlined
3314         __ load_heap_oop(r0, field, rscratch1, rscratch2);
3315         __ cbnz(r0, nonnull);
3316           __ andw(index, r3, ConstantPoolCacheEntry::field_index_mask);
3317           __ ldr(klass, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3318                                              ConstantPoolCacheEntry::f1_offset())));
3319           __ get_inline_type_field_klass(klass, index, inline_klass);
3320           __ get_default_value_oop(inline_klass, tmp /* temp */, r0);
3321         __ bind(nonnull);
3322         __ verify_oop(r0);
3323         __ b(Done);
3324       __ bind(is_inlined);
3325       // field is inlined
3326         __ andw(index, r3, ConstantPoolCacheEntry::field_index_mask);
3327         __ ldr(klass, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3328                                            ConstantPoolCacheEntry::f1_offset())));
3329         __ read_inlined_field(klass, index, r1, tmp /* temp */, r0);
3330         __ verify_oop(r0);
3331       __ bind(Done);
3332     }
3333     break;
3334   case Bytecodes::_fast_agetfield:
3335     do_oop_load(_masm, field, r0, IN_HEAP);
3336     __ verify_oop(r0);
3337     break;
3338   case Bytecodes::_fast_lgetfield:
3339     __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3340     break;
3341   case Bytecodes::_fast_igetfield:
3342     __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3343     break;
3344   case Bytecodes::_fast_bgetfield:
3345     __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3346     break;
3347   case Bytecodes::_fast_sgetfield:
3348     __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3349     break;
3350   case Bytecodes::_fast_cgetfield:
3351     __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3352     break;
3353   case Bytecodes::_fast_fgetfield:
3354     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3355     break;
3356   case Bytecodes::_fast_dgetfield:
3357     __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3358     break;
3359   default:
3360     ShouldNotReachHere();
3361   }
3362   {
3363     Label notVolatile;
3364     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3365     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3366     __ bind(notVolatile);
3367   }
3368 }
3369 
3370 void TemplateTable::fast_xaccess(TosState state)
3371 {
3372   transition(vtos, state);
3373 
3374   // get receiver
3375   __ ldr(r0, aaddress(0));
3376   // access constant pool cache
3377   __ get_cache_and_index_at_bcp(r2, r3, 2);
3378   __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3379                                   ConstantPoolCacheEntry::f2_offset())));
3380 
3381   // 8179954: We need to make sure that the code generated for
3382   // volatile accesses forms a sequentially-consistent set of
3383   // operations when combined with STLR and LDAR.  Without a leading
3384   // membar it's possible for a simple Dekker test to fail if loads
3385   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3386   // the stores in one method and we interpret the loads in another.
3387   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3388     Label notVolatile;
3389     __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3390                                      ConstantPoolCacheEntry::flags_offset())));
3391     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3392     __ membar(MacroAssembler::AnyAny);
3393     __ bind(notVolatile);
3394   }
3395 
3396   // make sure exception is reported in correct bcp range (getfield is
3397   // next instruction)
3398   __ increment(rbcp);
3399   __ null_check(r0);
3400   switch (state) {
3401   case itos:
3402     __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3403     break;
3404   case atos:
3405     do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
3406     __ verify_oop(r0);
3407     break;
3408   case ftos:
3409     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3410     break;
3411   default:
3412     ShouldNotReachHere();
3413   }
3414 
3415   {
3416     Label notVolatile;
3417     __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3418                                      ConstantPoolCacheEntry::flags_offset())));
3419     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3420     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3421     __ bind(notVolatile);
3422   }
3423 
3424   __ decrement(rbcp);
3425 }
3426 
3427 
3428 
3429 //-----------------------------------------------------------------------------
3430 // Calls
3431 
3432 void TemplateTable::prepare_invoke(int byte_no,
3433                                    Register method, // linked method (or i-klass)
3434                                    Register index,  // itable index, MethodType, etc.
3435                                    Register recv,   // if caller wants to see it
3436                                    Register flags   // if caller wants to test it
3437                                    ) {
3438   // determine flags
3439   Bytecodes::Code code = bytecode();
3440   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3441   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3442   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3443   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3444   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3445   const bool load_receiver       = (recv  != noreg);
3446   const bool save_flags          = (flags != noreg);
3447   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3448   assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3449   assert(flags == noreg || flags == r3, "");
3450   assert(recv  == noreg || recv  == r2, "");
3451 
3452   // setup registers & access constant pool cache
3453   if (recv  == noreg)  recv  = r2;
3454   if (flags == noreg)  flags = r3;
3455   assert_different_registers(method, index, recv, flags);
3456 
3457   // save 'interpreter return address'
3458   __ save_bcp();
3459 
3460   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3461 
3462   // maybe push appendix to arguments (just before return address)
3463   if (is_invokedynamic || is_invokehandle) {
3464     Label L_no_push;
3465     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
3466     // Push the appendix as a trailing parameter.
3467     // This must be done before we get the receiver,
3468     // since the parameter_size includes it.
3469     __ push(r19);
3470     __ mov(r19, index);
3471     __ load_resolved_reference_at_index(index, r19);
3472     __ pop(r19);
3473     __ push(index);  // push appendix (MethodType, CallSite, etc.)
3474     __ bind(L_no_push);
3475   }
3476 
3477   // load receiver if needed (note: no return address pushed yet)
3478   if (load_receiver) {
3479     __ andw(recv, flags, ConstantPoolCacheEntry::parameter_size_mask);
3480     // FIXME -- is this actually correct? looks like it should be 2
3481     // const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
3482     // const int receiver_is_at_end      = -1;  // back off one slot to get receiver
3483     // Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3484     // __ movptr(recv, recv_addr);
3485     __ add(rscratch1, esp, recv, ext::uxtx, 3); // FIXME: uxtb here?
3486     __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1)));
3487     __ verify_oop(recv);
3488   }
3489 
3490   // compute return type
3491   // x86 uses a shift and mask or wings it with a shift plus assert
3492   // the mask is not needed. aarch64 just uses bitfield extract
3493   __ ubfxw(rscratch2, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
3494   // load return address
3495   {
3496     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3497     __ mov(rscratch1, table_addr);
3498     __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3)));
3499   }
3500 }
3501 
3502 
3503 void TemplateTable::invokevirtual_helper(Register index,
3504                                          Register recv,
3505                                          Register flags)
3506 {
3507   // Uses temporary registers r0, r3
3508   assert_different_registers(index, recv, r0, r3);
3509   // Test for an invoke of a final method
3510   Label notFinal;
3511   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
3512 
3513   const Register method = index;  // method must be rmethod
3514   assert(method == rmethod,
3515          "Method must be rmethod for interpreter calling convention");
3516 
3517   // do the call - the index is actually the method to call
3518   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3519 
3520   // It's final, need a null check here!
3521   __ null_check(recv);
3522 
3523   // profile this call
3524   __ profile_final_call(r0);
3525   __ profile_arguments_type(r0, method, r4, true);
3526 
3527   __ jump_from_interpreted(method, r0);
3528 
3529   __ bind(notFinal);
3530 
3531   // get receiver klass
3532   __ null_check(recv, oopDesc::klass_offset_in_bytes());
3533   __ load_klass(r0, recv);
3534 
3535   // profile this call
3536   __ profile_virtual_call(r0, rlocals, r3);
3537 
3538   // get target Method & entry point
3539   __ lookup_virtual_method(r0, index, method);
3540   __ profile_arguments_type(r3, method, r4, true);
3541   // FIXME -- this looks completely redundant. is it?
3542   // __ ldr(r3, Address(method, Method::interpreter_entry_offset()));
3543   __ jump_from_interpreted(method, r3);
3544 }
3545 
3546 void TemplateTable::invokevirtual(int byte_no)
3547 {
3548   transition(vtos, vtos);
3549   assert(byte_no == f2_byte, "use this argument");
3550 
3551   prepare_invoke(byte_no, rmethod, noreg, r2, r3);
3552 
3553   // rmethod: index (actually a Method*)
3554   // r2: receiver
3555   // r3: flags
3556 
3557   invokevirtual_helper(rmethod, r2, r3);
3558 }
3559 
3560 void TemplateTable::invokespecial(int byte_no)
3561 {
3562   transition(vtos, vtos);
3563   assert(byte_no == f1_byte, "use this argument");
3564 
3565   prepare_invoke(byte_no, rmethod, noreg,  // get f1 Method*
3566                  r2);  // get receiver also for null check
3567   __ verify_oop(r2);
3568   __ null_check(r2);
3569   // do the call
3570   __ profile_call(r0);
3571   __ profile_arguments_type(r0, rmethod, rbcp, false);
3572   __ jump_from_interpreted(rmethod, r0);
3573 }
3574 
3575 void TemplateTable::invokestatic(int byte_no)
3576 {
3577   transition(vtos, vtos);
3578   assert(byte_no == f1_byte, "use this argument");
3579 
3580   prepare_invoke(byte_no, rmethod);  // get f1 Method*
3581   // do the call
3582   __ profile_call(r0);
3583   __ profile_arguments_type(r0, rmethod, r4, false);
3584   __ jump_from_interpreted(rmethod, r0);
3585 }
3586 
3587 void TemplateTable::fast_invokevfinal(int byte_no)
3588 {
3589   __ call_Unimplemented();
3590 }
3591 
3592 void TemplateTable::invokeinterface(int byte_no) {
3593   transition(vtos, vtos);
3594   assert(byte_no == f1_byte, "use this argument");
3595 
3596   prepare_invoke(byte_no, r0, rmethod,  // get f1 Klass*, f2 Method*
3597                  r2, r3); // recv, flags
3598 
3599   // r0: interface klass (from f1)
3600   // rmethod: method (from f2)
3601   // r2: receiver
3602   // r3: flags
3603 
3604   // First check for Object case, then private interface method,
3605   // then regular interface method.
3606 
3607   // Special case of invokeinterface called for virtual method of
3608   // java.lang.Object.  See cpCache.cpp for details.
3609   Label notObjectMethod;
3610   __ tbz(r3, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
3611 
3612   invokevirtual_helper(rmethod, r2, r3);
3613   __ bind(notObjectMethod);
3614 
3615   Label no_such_interface;
3616 
3617   // Check for private method invocation - indicated by vfinal
3618   Label notVFinal;
3619   __ tbz(r3, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
3620 
3621   // Get receiver klass into r3 - also a null check
3622   __ null_check(r2, oopDesc::klass_offset_in_bytes());
3623   __ load_klass(r3, r2);
3624 
3625   Label subtype;
3626   __ check_klass_subtype(r3, r0, r4, subtype);
3627   // If we get here the typecheck failed
3628   __ b(no_such_interface);
3629   __ bind(subtype);
3630 
3631   __ profile_final_call(r0);
3632   __ profile_arguments_type(r0, rmethod, r4, true);
3633   __ jump_from_interpreted(rmethod, r0);
3634 
3635   __ bind(notVFinal);
3636 
3637   // Get receiver klass into r3 - also a null check
3638   __ restore_locals();
3639   __ null_check(r2, oopDesc::klass_offset_in_bytes());
3640   __ load_klass(r3, r2);
3641 
3642   Label no_such_method;
3643 
3644   // Preserve method for throw_AbstractMethodErrorVerbose.
3645   __ mov(r16, rmethod);
3646   // Receiver subtype check against REFC.
3647   // Superklass in r0. Subklass in r3. Blows rscratch2, r13
3648   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3649                              r3, r0, noreg,
3650                              // outputs: scan temp. reg, scan temp. reg
3651                              rscratch2, r13,
3652                              no_such_interface,
3653                              /*return_method=*/false);
3654 
3655   // profile this call
3656   __ profile_virtual_call(r3, r13, r19);
3657 
3658   // Get declaring interface class from method, and itable index
3659 
3660   __ load_method_holder(r0, rmethod);
3661   __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset()));
3662   __ subw(rmethod, rmethod, Method::itable_index_max);
3663   __ negw(rmethod, rmethod);
3664 
3665   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3666   __ mov(rlocals, r3);
3667   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3668                              rlocals, r0, rmethod,
3669                              // outputs: method, scan temp. reg
3670                              rmethod, r13,
3671                              no_such_interface);
3672 
3673   // rmethod,: Method to call
3674   // r2: receiver
3675   // Check for abstract method error
3676   // Note: This should be done more efficiently via a throw_abstract_method_error
3677   //       interpreter entry point and a conditional jump to it in case of a null
3678   //       method.
3679   __ cbz(rmethod, no_such_method);
3680 
3681   __ profile_arguments_type(r3, rmethod, r13, true);
3682 
3683   // do the call
3684   // r2: receiver
3685   // rmethod,: Method
3686   __ jump_from_interpreted(rmethod, r3);
3687   __ should_not_reach_here();
3688 
3689   // exception handling code follows...
3690   // note: must restore interpreter registers to canonical
3691   //       state for exception handling to work correctly!
3692 
3693   __ bind(no_such_method);
3694   // throw exception
3695   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
3696   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3697   // Pass arguments for generating a verbose error message.
3698   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16);
3699   // the call_VM checks for exception, so we should never return here.
3700   __ should_not_reach_here();
3701 
3702   __ bind(no_such_interface);
3703   // throw exception
3704   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
3705   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3706   // Pass arguments for generating a verbose error message.
3707   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3708                    InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0);
3709   // the call_VM checks for exception, so we should never return here.
3710   __ should_not_reach_here();
3711   return;
3712 }
3713 
3714 void TemplateTable::invokehandle(int byte_no) {
3715   transition(vtos, vtos);
3716   assert(byte_no == f1_byte, "use this argument");
3717 
3718   prepare_invoke(byte_no, rmethod, r0, r2);
3719   __ verify_method_ptr(r2);
3720   __ verify_oop(r2);
3721   __ null_check(r2);
3722 
3723   // FIXME: profile the LambdaForm also
3724 
3725   // r13 is safe to use here as a scratch reg because it is about to
3726   // be clobbered by jump_from_interpreted().
3727   __ profile_final_call(r13);
3728   __ profile_arguments_type(r13, rmethod, r4, true);
3729 
3730   __ jump_from_interpreted(rmethod, r0);
3731 }
3732 
3733 void TemplateTable::invokedynamic(int byte_no) {
3734   transition(vtos, vtos);
3735   assert(byte_no == f1_byte, "use this argument");
3736 
3737   prepare_invoke(byte_no, rmethod, r0);
3738 
3739   // r0: CallSite object (from cpool->resolved_references[])
3740   // rmethod: MH.linkToCallSite method (from f2)
3741 
3742   // Note:  r0_callsite is already pushed by prepare_invoke
3743 
3744   // %%% should make a type profile for any invokedynamic that takes a ref argument
3745   // profile this call
3746   __ profile_call(rbcp);
3747   __ profile_arguments_type(r3, rmethod, r13, false);
3748 
3749   __ verify_oop(r0);
3750 
3751   __ jump_from_interpreted(rmethod, r0);
3752 }
3753 
3754 
3755 //-----------------------------------------------------------------------------
3756 // Allocation
3757 
3758 void TemplateTable::_new() {
3759   transition(vtos, atos);
3760 
3761   __ get_unsigned_2_byte_index_at_bcp(r3, 1);
3762   Label slow_case;
3763   Label done;
3764   Label is_not_value;
3765   Label initialize_header;
3766 
3767   __ get_cpool_and_tags(r4, r0);
3768   // Make sure the class we're about to instantiate has been resolved.
3769   // This is done before loading InstanceKlass to be consistent with the order
3770   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3771   const int tags_offset = Array<u1>::base_offset_in_bytes();
3772   __ lea(rscratch1, Address(r0, r3, Address::lsl(0)));
3773   __ lea(rscratch1, Address(rscratch1, tags_offset));
3774   __ ldarb(rscratch1, rscratch1);
3775   __ cmp(rscratch1, (u1)JVM_CONSTANT_Class);
3776   __ br(Assembler::NE, slow_case);
3777 
3778   // get InstanceKlass
3779   __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
3780 
3781   __ ldrb(rscratch1, Address(r4, InstanceKlass::kind_offset()));
3782   __ cmp(rscratch1, (u1)InlineKlassKind);
3783   __ br(Assembler::NE, is_not_value);
3784 
3785   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_InstantiationError));
3786 
3787   __ bind(is_not_value);
3788 
3789   // make sure klass is initialized & doesn't have finalizer
3790   // make sure klass is fully initialized
3791   __ ldrb(rscratch1, Address(r4, InstanceKlass::init_state_offset()));
3792   __ cmp(rscratch1, (u1)InstanceKlass::fully_initialized);
3793   __ br(Assembler::NE, slow_case);
3794 
3795   __ allocate_instance(r4, r0, r3, r1, true, slow_case);
3796   __ b(done);
3797 
3798   // slow case
3799   __ bind(slow_case);
3800   __ get_constant_pool(c_rarg1);
3801   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3802   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3803   __ verify_oop(r0);
3804 
3805   // continue
3806   __ bind(done);
3807   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3808   __ membar(Assembler::StoreStore);
3809 }
3810 
3811 void TemplateTable::aconst_init() {
3812   transition(vtos, atos);
3813   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3814   __ get_constant_pool(c_rarg1);
3815   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::aconst_init),
3816           c_rarg1, c_rarg2);
3817   __ verify_oop(r0);
3818   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3819   __ membar(Assembler::StoreStore);
3820 }
3821 
3822 void TemplateTable::withfield() {
3823   transition(vtos, atos);
3824   resolve_cache_and_index(f2_byte, c_rarg1 /*cache*/, c_rarg2 /*index*/, sizeof(u2));
3825 
3826   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3827 
3828   // n.b. unlike x86 cache is now rcpool plus the indexed offset
3829   __ lea(c_rarg1, Address(c_rarg1, in_bytes(cp_base_offset)));
3830 
3831   __ lea(c_rarg2, at_tos());
3832   call_VM(r1, CAST_FROM_FN_PTR(address, InterpreterRuntime::withfield), c_rarg1, c_rarg2);
3833   // new value type is returned in r1
3834   // stack adjustment is returned in r0
3835   __ verify_oop(r1);
3836   __ add(esp, esp, r0);
3837   __ mov(r0, r1);
3838 }
3839 
3840 void TemplateTable::newarray() {
3841   transition(itos, atos);
3842   __ load_unsigned_byte(c_rarg1, at_bcp(1));
3843   __ mov(c_rarg2, r0);
3844   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3845           c_rarg1, c_rarg2);
3846   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3847   __ membar(Assembler::StoreStore);
3848 }
3849 
3850 void TemplateTable::anewarray() {
3851   transition(itos, atos);
3852   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3853   __ get_constant_pool(c_rarg1);
3854   __ mov(c_rarg3, r0);
3855   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3856           c_rarg1, c_rarg2, c_rarg3);
3857   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3858   __ membar(Assembler::StoreStore);
3859 }
3860 
3861 void TemplateTable::arraylength() {
3862   transition(atos, itos);
3863   __ null_check(r0, arrayOopDesc::length_offset_in_bytes());
3864   __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes()));
3865 }
3866 
3867 void TemplateTable::checkcast()
3868 {
3869   transition(atos, atos);
3870   Label done, is_null, ok_is_subtype, quicked, resolved;
3871   __ cbz(r0, is_null);
3872 
3873   // Get cpool & tags index
3874   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3875   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3876   // See if bytecode has already been quicked
3877   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3878   __ lea(r1, Address(rscratch1, r19));
3879   __ ldarb(r1, r1);
3880   __ andr(r1, r1, ~JVM_CONSTANT_QDescBit);
3881   __ cmp(r1, (u1)JVM_CONSTANT_Class);
3882   __ br(Assembler::EQ, quicked);
3883 
3884   __ push(atos); // save receiver for result, and for GC
3885   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3886   // vm_result_2 has metadata result
3887   __ get_vm_result_2(r0, rthread);
3888   __ pop(r3); // restore receiver
3889   __ b(resolved);
3890 
3891   // Get superklass in r0 and subklass in r3
3892   __ bind(quicked);
3893   __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3894   __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3895 
3896   __ bind(resolved);
3897   __ load_klass(r19, r3);
3898 
3899   // Generate subtype check.  Blows r2, r5.  Object in r3.
3900   // Superklass in r0.  Subklass in r19.
3901   __ gen_subtype_check(r19, ok_is_subtype);
3902 
3903   // Come here on failure
3904   __ push(r3);
3905   // object is at TOS
3906   __ b(Interpreter::_throw_ClassCastException_entry);
3907 
3908   // Come here on success
3909   __ bind(ok_is_subtype);
3910   __ mov(r0, r3); // Restore object in r3
3911 
3912   __ b(done);
3913   __ bind(is_null);
3914 
3915   // Collect counts on whether this test sees NULLs a lot or not.
3916   if (ProfileInterpreter) {
3917     __ profile_null_seen(r2);
3918   }
3919 
3920   if (EnablePrimitiveClasses) {
3921     // Get cpool & tags index
3922     __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3923     __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3924      // See if bytecode has already been quicked
3925     __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3926     __ lea(r1, Address(rscratch1, r19));
3927     __ ldarb(r1, r1);
3928     // See if CP entry is a Q-descriptor
3929     __ andr (r1, r1, JVM_CONSTANT_QDescBit);
3930     __ cmp(r1, (u1) JVM_CONSTANT_QDescBit);
3931     __ br(Assembler::NE, done);
3932     __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
3933   }
3934 
3935   __ bind(done);
3936 }
3937 
3938 void TemplateTable::instanceof() {
3939   transition(atos, itos);
3940   Label done, is_null, ok_is_subtype, quicked, resolved;
3941   __ cbz(r0, is_null);
3942 
3943   // Get cpool & tags index
3944   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3945   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3946   // See if bytecode has already been quicked
3947   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3948   __ lea(r1, Address(rscratch1, r19));
3949   __ ldarb(r1, r1);
3950   __ andr(r1, r1, ~JVM_CONSTANT_QDescBit);
3951   __ cmp(r1, (u1)JVM_CONSTANT_Class);
3952   __ br(Assembler::EQ, quicked);
3953 
3954   __ push(atos); // save receiver for result, and for GC
3955   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3956   // vm_result_2 has metadata result
3957   __ get_vm_result_2(r0, rthread);
3958   __ pop(r3); // restore receiver
3959   __ verify_oop(r3);
3960   __ load_klass(r3, r3);
3961   __ b(resolved);
3962 
3963   // Get superklass in r0 and subklass in r3
3964   __ bind(quicked);
3965   __ load_klass(r3, r0);
3966   __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
3967 
3968   __ bind(resolved);
3969 
3970   // Generate subtype check.  Blows r2, r5
3971   // Superklass in r0.  Subklass in r3.
3972   __ gen_subtype_check(r3, ok_is_subtype);
3973 
3974   // Come here on failure
3975   __ mov(r0, 0);
3976   __ b(done);
3977   // Come here on success
3978   __ bind(ok_is_subtype);
3979   __ mov(r0, 1);
3980 
3981   // Collect counts on whether this test sees NULLs a lot or not.
3982   if (ProfileInterpreter) {
3983     __ b(done);
3984     __ bind(is_null);
3985     __ profile_null_seen(r2);
3986   } else {
3987     __ bind(is_null);   // same as 'done'
3988   }
3989   __ bind(done);
3990   // r0 = 0: obj == NULL or  obj is not an instanceof the specified klass
3991   // r0 = 1: obj != NULL and obj is     an instanceof the specified klass
3992 }
3993 
3994 //-----------------------------------------------------------------------------
3995 // Breakpoints
3996 void TemplateTable::_breakpoint() {
3997   // Note: We get here even if we are single stepping..
3998   // jbug inists on setting breakpoints at every bytecode
3999   // even if we are in single step mode.
4000 
4001   transition(vtos, vtos);
4002 
4003   // get the unpatched byte code
4004   __ get_method(c_rarg1);
4005   __ call_VM(noreg,
4006              CAST_FROM_FN_PTR(address,
4007                               InterpreterRuntime::get_original_bytecode_at),
4008              c_rarg1, rbcp);
4009   __ mov(r19, r0);
4010 
4011   // post the breakpoint event
4012   __ call_VM(noreg,
4013              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4014              rmethod, rbcp);
4015 
4016   // complete the execution of original bytecode
4017   __ mov(rscratch1, r19);
4018   __ dispatch_only_normal(vtos);
4019 }
4020 
4021 //-----------------------------------------------------------------------------
4022 // Exceptions
4023 
4024 void TemplateTable::athrow() {
4025   transition(atos, vtos);
4026   __ null_check(r0);
4027   __ b(Interpreter::throw_exception_entry());
4028 }
4029 
4030 //-----------------------------------------------------------------------------
4031 // Synchronization
4032 //
4033 // Note: monitorenter & exit are symmetric routines; which is reflected
4034 //       in the assembly code structure as well
4035 //
4036 // Stack layout:
4037 //
4038 // [expressions  ] <--- esp               = expression stack top
4039 // ..
4040 // [expressions  ]
4041 // [monitor entry] <--- monitor block top = expression stack bot
4042 // ..
4043 // [monitor entry]
4044 // [frame data   ] <--- monitor block bot
4045 // ...
4046 // [saved rfp    ] <--- rfp
4047 void TemplateTable::monitorenter()
4048 {
4049   transition(atos, vtos);
4050 
4051   // check for NULL object
4052   __ null_check(r0);
4053 
4054   Label is_inline_type;
4055   __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
4056   __ test_markword_is_inline_type(rscratch1, is_inline_type);
4057 
4058   const Address monitor_block_top(
4059         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4060   const Address monitor_block_bot(
4061         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
4062   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4063 
4064   Label allocated;
4065 
4066   // initialize entry pointer
4067   __ mov(c_rarg1, zr); // points to free slot or NULL
4068 
4069   // find a free slot in the monitor block (result in c_rarg1)
4070   {
4071     Label entry, loop, exit;
4072     __ ldr(c_rarg3, monitor_block_top); // points to current entry,
4073                                         // starting with top-most entry
4074     __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
4075 
4076     __ b(entry);
4077 
4078     __ bind(loop);
4079     // check if current entry is used
4080     // if not used then remember entry in c_rarg1
4081     __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
4082     __ cmp(zr, rscratch1);
4083     __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ);
4084     // check if current entry is for same object
4085     __ cmp(r0, rscratch1);
4086     // if same object then stop searching
4087     __ br(Assembler::EQ, exit);
4088     // otherwise advance to next entry
4089     __ add(c_rarg3, c_rarg3, entry_size);
4090     __ bind(entry);
4091     // check if bottom reached
4092     __ cmp(c_rarg3, c_rarg2);
4093     // if not at bottom then check this entry
4094     __ br(Assembler::NE, loop);
4095     __ bind(exit);
4096   }
4097 
4098   __ cbnz(c_rarg1, allocated); // check if a slot has been found and
4099                             // if found, continue with that on
4100 
4101   // allocate one if there's no free slot
4102   {
4103     Label entry, loop;
4104     // 1. compute new pointers            // rsp: old expression stack top
4105 
4106     __ check_extended_sp();
4107     __ sub(sp, sp, entry_size);           // make room for the monitor
4108     __ mov(rscratch1, sp);
4109     __ str(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
4110 
4111     __ ldr(c_rarg1, monitor_block_bot);   // c_rarg1: old expression stack bottom
4112     __ sub(esp, esp, entry_size);         // move expression stack top
4113     __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom
4114     __ mov(c_rarg3, esp);                 // set start value for copy loop
4115     __ str(c_rarg1, monitor_block_bot);   // set new monitor block bottom
4116 
4117     __ b(entry);
4118     // 2. move expression stack contents
4119     __ bind(loop);
4120     __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
4121                                                    // word from old location
4122     __ str(c_rarg2, Address(c_rarg3, 0));          // and store it at new location
4123     __ add(c_rarg3, c_rarg3, wordSize);            // advance to next word
4124     __ bind(entry);
4125     __ cmp(c_rarg3, c_rarg1);        // check if bottom reached
4126     __ br(Assembler::NE, loop);      // if not at bottom then
4127                                      // copy next word
4128   }
4129 
4130   // call run-time routine
4131   // c_rarg1: points to monitor entry
4132   __ bind(allocated);
4133 
4134   // Increment bcp to point to the next bytecode, so exception
4135   // handling for async. exceptions work correctly.
4136   // The object has already been popped from the stack, so the
4137   // expression stack looks correct.
4138   __ increment(rbcp);
4139 
4140   // store object
4141   __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
4142   __ lock_object(c_rarg1);
4143 
4144   // check to make sure this monitor doesn't cause stack overflow after locking
4145   __ save_bcp();  // in case of exception
4146   __ generate_stack_overflow_check(0);
4147 
4148   // The bcp has already been incremented. Just need to dispatch to
4149   // next instruction.
4150   __ dispatch_next(vtos);
4151 
4152   __ bind(is_inline_type);
4153   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4154                     InterpreterRuntime::throw_illegal_monitor_state_exception));
4155   __ should_not_reach_here();
4156 }
4157 
4158 
4159 void TemplateTable::monitorexit()
4160 {
4161   transition(atos, vtos);
4162 
4163   // check for NULL object
4164   __ null_check(r0);
4165 
4166   const int is_inline_type_mask = markWord::inline_type_pattern;
4167   Label has_identity;
4168   __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
4169   __ mov(rscratch2, is_inline_type_mask);
4170   __ andr(rscratch1, rscratch1, rscratch2);
4171   __ cmp(rscratch1, rscratch2);
4172   __ br(Assembler::NE, has_identity);
4173   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4174                      InterpreterRuntime::throw_illegal_monitor_state_exception));
4175   __ should_not_reach_here();
4176   __ bind(has_identity);
4177 
4178   const Address monitor_block_top(
4179         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4180   const Address monitor_block_bot(
4181         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
4182   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4183 
4184   Label found;
4185 
4186   // find matching slot
4187   {
4188     Label entry, loop;
4189     __ ldr(c_rarg1, monitor_block_top); // points to current entry,
4190                                         // starting with top-most entry
4191     __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
4192                                         // of monitor block
4193     __ b(entry);
4194 
4195     __ bind(loop);
4196     // check if current entry is for same object
4197     __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
4198     __ cmp(r0, rscratch1);
4199     // if same object then stop searching
4200     __ br(Assembler::EQ, found);
4201     // otherwise advance to next entry
4202     __ add(c_rarg1, c_rarg1, entry_size);
4203     __ bind(entry);
4204     // check if bottom reached
4205     __ cmp(c_rarg1, c_rarg2);
4206     // if not at bottom then check this entry
4207     __ br(Assembler::NE, loop);
4208   }
4209 
4210   // error handling. Unlocking was not block-structured
4211   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4212                    InterpreterRuntime::throw_illegal_monitor_state_exception));
4213   __ should_not_reach_here();
4214 
4215   // call run-time routine
4216   __ bind(found);
4217   __ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
4218   __ unlock_object(c_rarg1);
4219   __ pop_ptr(r0); // discard object
4220 }
4221 
4222 
4223 // Wide instructions
4224 void TemplateTable::wide()
4225 {
4226   __ load_unsigned_byte(r19, at_bcp(1));
4227   __ mov(rscratch1, (address)Interpreter::_wentry_point);
4228   __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3)));
4229   __ br(rscratch1);
4230 }
4231 
4232 
4233 // Multi arrays
4234 void TemplateTable::multianewarray() {
4235   transition(vtos, atos);
4236   __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions
4237   // last dim is on top of stack; we want address of first one:
4238   // first_addr = last_addr + (ndims - 1) * wordSize
4239   __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3)));
4240   __ sub(c_rarg1, c_rarg1, wordSize);
4241   call_VM(r0,
4242           CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
4243           c_rarg1);
4244   __ load_unsigned_byte(r1, at_bcp(3));
4245   __ lea(esp, Address(esp, r1, Address::uxtw(3)));
4246 }