1 /*
   2  * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "gc/shared/barrierSetAssembler.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "interpreter/interp_masm.hpp"
  34 #include "interpreter/templateTable.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/methodData.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "prims/jvmtiExport.hpp"
  41 #include "prims/methodHandles.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "runtime/synchronizer.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 
  48 #define __ _masm->
  49 
  50 // Address computation: local variables
  51 
  52 static inline Address iaddress(int n) {
  53   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  54 }
  55 
  56 static inline Address laddress(int n) {
  57   return iaddress(n + 1);
  58 }
  59 
  60 static inline Address faddress(int n) {
  61   return iaddress(n);
  62 }
  63 
  64 static inline Address daddress(int n) {
  65   return laddress(n);
  66 }
  67 
  68 static inline Address aaddress(int n) {
  69   return iaddress(n);
  70 }
  71 
  72 static inline Address iaddress(Register r) {
  73   return Address(rlocals, r, Address::lsl(3));
  74 }
  75 
  76 static inline Address laddress(Register r, Register scratch,
  77                                InterpreterMacroAssembler* _masm) {
  78   __ lea(scratch, Address(rlocals, r, Address::lsl(3)));
  79   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  80 }
  81 
  82 static inline Address faddress(Register r) {
  83   return iaddress(r);
  84 }
  85 
  86 static inline Address daddress(Register r, Register scratch,
  87                                InterpreterMacroAssembler* _masm) {
  88   return laddress(r, scratch, _masm);
  89 }
  90 
  91 static inline Address aaddress(Register r) {
  92   return iaddress(r);
  93 }
  94 
  95 static inline Address at_rsp() {
  96   return Address(esp, 0);
  97 }
  98 
  99 // At top of Java expression stack which may be different than esp().  It
 100 // isn't for category 1 objects.
 101 static inline Address at_tos   () {
 102   return Address(esp,  Interpreter::expr_offset_in_bytes(0));
 103 }
 104 
 105 static inline Address at_tos_p1() {
 106   return Address(esp,  Interpreter::expr_offset_in_bytes(1));
 107 }
 108 
 109 static inline Address at_tos_p2() {
 110   return Address(esp,  Interpreter::expr_offset_in_bytes(2));
 111 }
 112 
 113 static inline Address at_tos_p3() {
 114   return Address(esp,  Interpreter::expr_offset_in_bytes(3));
 115 }
 116 
 117 static inline Address at_tos_p4() {
 118   return Address(esp,  Interpreter::expr_offset_in_bytes(4));
 119 }
 120 
 121 static inline Address at_tos_p5() {
 122   return Address(esp,  Interpreter::expr_offset_in_bytes(5));
 123 }
 124 
 125 // Condition conversion
 126 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 127   switch (cc) {
 128   case TemplateTable::equal        : return Assembler::NE;
 129   case TemplateTable::not_equal    : return Assembler::EQ;
 130   case TemplateTable::less         : return Assembler::GE;
 131   case TemplateTable::less_equal   : return Assembler::GT;
 132   case TemplateTable::greater      : return Assembler::LE;
 133   case TemplateTable::greater_equal: return Assembler::LT;
 134   }
 135   ShouldNotReachHere();
 136   return Assembler::EQ;
 137 }
 138 
 139 
 140 // Miscelaneous helper routines
 141 // Store an oop (or NULL) at the Address described by obj.
 142 // If val == noreg this means store a NULL
 143 static void do_oop_store(InterpreterMacroAssembler* _masm,
 144                          Address dst,
 145                          Register val,
 146                          DecoratorSet decorators) {
 147   assert(val == noreg || val == r0, "parameter is just for looks");
 148   __ store_heap_oop(dst, val, r10, r1, decorators);
 149 }
 150 
 151 static void do_oop_load(InterpreterMacroAssembler* _masm,
 152                         Address src,
 153                         Register dst,
 154                         DecoratorSet decorators) {
 155   __ load_heap_oop(dst, src, r10, r1, decorators);
 156 }
 157 
 158 Address TemplateTable::at_bcp(int offset) {
 159   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 160   return Address(rbcp, offset);
 161 }
 162 
 163 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 164                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 165                                    int byte_no)
 166 {
 167   if (!RewriteBytecodes)  return;
 168   Label L_patch_done;
 169 
 170   switch (bc) {
 171   case Bytecodes::_fast_aputfield:
 172   case Bytecodes::_fast_bputfield:
 173   case Bytecodes::_fast_zputfield:
 174   case Bytecodes::_fast_cputfield:
 175   case Bytecodes::_fast_dputfield:
 176   case Bytecodes::_fast_fputfield:
 177   case Bytecodes::_fast_iputfield:
 178   case Bytecodes::_fast_lputfield:
 179   case Bytecodes::_fast_sputfield:
 180     {
 181       // We skip bytecode quickening for putfield instructions when
 182       // the put_code written to the constant pool cache is zero.
 183       // This is required so that every execution of this instruction
 184       // calls out to InterpreterRuntime::resolve_get_put to do
 185       // additional, required work.
 186       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 187       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 188       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 189       __ movw(bc_reg, bc);
 190       __ cbzw(temp_reg, L_patch_done);  // don't patch
 191     }
 192     break;
 193   default:
 194     assert(byte_no == -1, "sanity");
 195     // the pair bytecodes have already done the load.
 196     if (load_bc_into_bc_reg) {
 197       __ movw(bc_reg, bc);
 198     }
 199   }
 200 
 201   if (JvmtiExport::can_post_breakpoint()) {
 202     Label L_fast_patch;
 203     // if a breakpoint is present we can't rewrite the stream directly
 204     __ load_unsigned_byte(temp_reg, at_bcp(0));
 205     __ cmpw(temp_reg, Bytecodes::_breakpoint);
 206     __ br(Assembler::NE, L_fast_patch);
 207     // Let breakpoint table handling rewrite to quicker bytecode
 208     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
 209     __ b(L_patch_done);
 210     __ bind(L_fast_patch);
 211   }
 212 
 213 #ifdef ASSERT
 214   Label L_okay;
 215   __ load_unsigned_byte(temp_reg, at_bcp(0));
 216   __ cmpw(temp_reg, (int) Bytecodes::java_code(bc));
 217   __ br(Assembler::EQ, L_okay);
 218   __ cmpw(temp_reg, bc_reg);
 219   __ br(Assembler::EQ, L_okay);
 220   __ stop("patching the wrong bytecode");
 221   __ bind(L_okay);
 222 #endif
 223 
 224   // patch bytecode
 225   __ strb(bc_reg, at_bcp(0));
 226   __ bind(L_patch_done);
 227 }
 228 
 229 
 230 // Individual instructions
 231 
 232 void TemplateTable::nop() {
 233   transition(vtos, vtos);
 234   // nothing to do
 235 }
 236 
 237 void TemplateTable::shouldnotreachhere() {
 238   transition(vtos, vtos);
 239   __ stop("shouldnotreachhere bytecode");
 240 }
 241 
 242 void TemplateTable::aconst_null()
 243 {
 244   transition(vtos, atos);
 245   __ mov(r0, 0);
 246 }
 247 
 248 void TemplateTable::iconst(int value)
 249 {
 250   transition(vtos, itos);
 251   __ mov(r0, value);
 252 }
 253 
 254 void TemplateTable::lconst(int value)
 255 {
 256   __ mov(r0, value);
 257 }
 258 
 259 void TemplateTable::fconst(int value)
 260 {
 261   transition(vtos, ftos);
 262   switch (value) {
 263   case 0:
 264     __ fmovs(v0, 0.0);
 265     break;
 266   case 1:
 267     __ fmovs(v0, 1.0);
 268     break;
 269   case 2:
 270     __ fmovs(v0, 2.0);
 271     break;
 272   default:
 273     ShouldNotReachHere();
 274     break;
 275   }
 276 }
 277 
 278 void TemplateTable::dconst(int value)
 279 {
 280   transition(vtos, dtos);
 281   switch (value) {
 282   case 0:
 283     __ fmovd(v0, 0.0);
 284     break;
 285   case 1:
 286     __ fmovd(v0, 1.0);
 287     break;
 288   case 2:
 289     __ fmovd(v0, 2.0);
 290     break;
 291   default:
 292     ShouldNotReachHere();
 293     break;
 294   }
 295 }
 296 
 297 void TemplateTable::bipush()
 298 {
 299   transition(vtos, itos);
 300   __ load_signed_byte32(r0, at_bcp(1));
 301 }
 302 
 303 void TemplateTable::sipush()
 304 {
 305   transition(vtos, itos);
 306   __ load_unsigned_short(r0, at_bcp(1));
 307   __ revw(r0, r0);
 308   __ asrw(r0, r0, 16);
 309 }
 310 
 311 void TemplateTable::ldc(bool wide)
 312 {
 313   transition(vtos, vtos);
 314   Label call_ldc, notFloat, notClass, notInt, Done;
 315 
 316   if (wide) {
 317     __ get_unsigned_2_byte_index_at_bcp(r1, 1);
 318   } else {
 319     __ load_unsigned_byte(r1, at_bcp(1));
 320   }
 321   __ get_cpool_and_tags(r2, r0);
 322 
 323   const int base_offset = ConstantPool::header_size() * wordSize;
 324   const int tags_offset = Array<u1>::base_offset_in_bytes();
 325 
 326   // get type
 327   __ add(r3, r1, tags_offset);
 328   __ lea(r3, Address(r0, r3));
 329   __ ldarb(r3, r3);
 330 
 331   // unresolved class - get the resolved class
 332   __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass);
 333   __ br(Assembler::EQ, call_ldc);
 334 
 335   // unresolved class in error state - call into runtime to throw the error
 336   // from the first resolution attempt
 337   __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError);
 338   __ br(Assembler::EQ, call_ldc);
 339 
 340   // resolved class - need to call vm to get java mirror of the class
 341   __ cmp(r3, (u1)JVM_CONSTANT_Class);
 342   __ br(Assembler::NE, notClass);
 343 
 344   __ bind(call_ldc);
 345   __ mov(c_rarg1, wide);
 346   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
 347   __ push_ptr(r0);
 348   __ verify_oop(r0);
 349   __ b(Done);
 350 
 351   __ bind(notClass);
 352   __ cmp(r3, (u1)JVM_CONSTANT_Float);
 353   __ br(Assembler::NE, notFloat);
 354   // ftos
 355   __ adds(r1, r2, r1, Assembler::LSL, 3);
 356   __ ldrs(v0, Address(r1, base_offset));
 357   __ push_f();
 358   __ b(Done);
 359 
 360   __ bind(notFloat);
 361 
 362   __ cmp(r3, (u1)JVM_CONSTANT_Integer);
 363   __ br(Assembler::NE, notInt);
 364 
 365   // itos
 366   __ adds(r1, r2, r1, Assembler::LSL, 3);
 367   __ ldrw(r0, Address(r1, base_offset));
 368   __ push_i(r0);
 369   __ b(Done);
 370 
 371   __ bind(notInt);
 372   condy_helper(Done);
 373 
 374   __ bind(Done);
 375 }
 376 
 377 // Fast path for caching oop constants.
 378 void TemplateTable::fast_aldc(bool wide)
 379 {
 380   transition(vtos, atos);
 381 
 382   Register result = r0;
 383   Register tmp = r1;
 384   Register rarg = r2;
 385 
 386   int index_size = wide ? sizeof(u2) : sizeof(u1);
 387 
 388   Label resolved;
 389 
 390   // We are resolved if the resolved reference cache entry contains a
 391   // non-null object (String, MethodType, etc.)
 392   assert_different_registers(result, tmp);
 393   __ get_cache_index_at_bcp(tmp, 1, index_size);
 394   __ load_resolved_reference_at_index(result, tmp);
 395   __ cbnz(result, resolved);
 396 
 397   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 398 
 399   // first time invocation - must resolve first
 400   __ mov(rarg, (int)bytecode());
 401   __ call_VM(result, entry, rarg);
 402 
 403   __ bind(resolved);
 404 
 405   { // Check for the null sentinel.
 406     // If we just called the VM, it already did the mapping for us,
 407     // but it's harmless to retry.
 408     Label notNull;
 409 
 410     // Stash null_sentinel address to get its value later
 411     __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
 412     __ ldr(tmp, Address(rarg));
 413     __ resolve_oop_handle(tmp);
 414     __ cmpoop(result, tmp);
 415     __ br(Assembler::NE, notNull);
 416     __ mov(result, 0);  // NULL object reference
 417     __ bind(notNull);
 418   }
 419 
 420   if (VerifyOops) {
 421     // Safe to call with 0 result
 422     __ verify_oop(result);
 423   }
 424 }
 425 
 426 void TemplateTable::ldc2_w()
 427 {
 428   transition(vtos, vtos);
 429   Label notDouble, notLong, Done;
 430   __ get_unsigned_2_byte_index_at_bcp(r0, 1);
 431 
 432   __ get_cpool_and_tags(r1, r2);
 433   const int base_offset = ConstantPool::header_size() * wordSize;
 434   const int tags_offset = Array<u1>::base_offset_in_bytes();
 435 
 436   // get type
 437   __ lea(r2, Address(r2, r0, Address::lsl(0)));
 438   __ load_unsigned_byte(r2, Address(r2, tags_offset));
 439   __ cmpw(r2, (int)JVM_CONSTANT_Double);
 440   __ br(Assembler::NE, notDouble);
 441 
 442   // dtos
 443   __ lea (r2, Address(r1, r0, Address::lsl(3)));
 444   __ ldrd(v0, Address(r2, base_offset));
 445   __ push_d();
 446   __ b(Done);
 447 
 448   __ bind(notDouble);
 449   __ cmpw(r2, (int)JVM_CONSTANT_Long);
 450   __ br(Assembler::NE, notLong);
 451 
 452   // ltos
 453   __ lea(r0, Address(r1, r0, Address::lsl(3)));
 454   __ ldr(r0, Address(r0, base_offset));
 455   __ push_l();
 456   __ b(Done);
 457 
 458   __ bind(notLong);
 459   condy_helper(Done);
 460 
 461   __ bind(Done);
 462 }
 463 
 464 void TemplateTable::condy_helper(Label& Done)
 465 {
 466   Register obj = r0;
 467   Register rarg = r1;
 468   Register flags = r2;
 469   Register off = r3;
 470 
 471   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 472 
 473   __ mov(rarg, (int) bytecode());
 474   __ call_VM(obj, entry, rarg);
 475 
 476   __ get_vm_result_2(flags, rthread);
 477 
 478   // VMr = obj = base address to find primitive value to push
 479   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 480   __ mov(off, flags);
 481   __ andw(off, off, ConstantPoolCacheEntry::field_index_mask);
 482 
 483   const Address field(obj, off);
 484 
 485   // What sort of thing are we loading?
 486   // x86 uses a shift and mask or wings it with a shift plus assert
 487   // the mask is not needed. aarch64 just uses bitfield extract
 488   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,
 489            ConstantPoolCacheEntry::tos_state_bits);
 490 
 491   switch (bytecode()) {
 492     case Bytecodes::_ldc:
 493     case Bytecodes::_ldc_w:
 494       {
 495         // tos in (itos, ftos, stos, btos, ctos, ztos)
 496         Label notInt, notFloat, notShort, notByte, notChar, notBool;
 497         __ cmpw(flags, itos);
 498         __ br(Assembler::NE, notInt);
 499         // itos
 500         __ ldrw(r0, field);
 501         __ push(itos);
 502         __ b(Done);
 503 
 504         __ bind(notInt);
 505         __ cmpw(flags, ftos);
 506         __ br(Assembler::NE, notFloat);
 507         // ftos
 508         __ load_float(field);
 509         __ push(ftos);
 510         __ b(Done);
 511 
 512         __ bind(notFloat);
 513         __ cmpw(flags, stos);
 514         __ br(Assembler::NE, notShort);
 515         // stos
 516         __ load_signed_short(r0, field);
 517         __ push(stos);
 518         __ b(Done);
 519 
 520         __ bind(notShort);
 521         __ cmpw(flags, btos);
 522         __ br(Assembler::NE, notByte);
 523         // btos
 524         __ load_signed_byte(r0, field);
 525         __ push(btos);
 526         __ b(Done);
 527 
 528         __ bind(notByte);
 529         __ cmpw(flags, ctos);
 530         __ br(Assembler::NE, notChar);
 531         // ctos
 532         __ load_unsigned_short(r0, field);
 533         __ push(ctos);
 534         __ b(Done);
 535 
 536         __ bind(notChar);
 537         __ cmpw(flags, ztos);
 538         __ br(Assembler::NE, notBool);
 539         // ztos
 540         __ load_signed_byte(r0, field);
 541         __ push(ztos);
 542         __ b(Done);
 543 
 544         __ bind(notBool);
 545         break;
 546       }
 547 
 548     case Bytecodes::_ldc2_w:
 549       {
 550         Label notLong, notDouble;
 551         __ cmpw(flags, ltos);
 552         __ br(Assembler::NE, notLong);
 553         // ltos
 554         __ ldr(r0, field);
 555         __ push(ltos);
 556         __ b(Done);
 557 
 558         __ bind(notLong);
 559         __ cmpw(flags, dtos);
 560         __ br(Assembler::NE, notDouble);
 561         // dtos
 562         __ load_double(field);
 563         __ push(dtos);
 564         __ b(Done);
 565 
 566        __ bind(notDouble);
 567         break;
 568       }
 569 
 570     default:
 571       ShouldNotReachHere();
 572     }
 573 
 574     __ stop("bad ldc/condy");
 575 }
 576 
 577 void TemplateTable::locals_index(Register reg, int offset)
 578 {
 579   __ ldrb(reg, at_bcp(offset));
 580   __ neg(reg, reg);
 581 }
 582 
 583 void TemplateTable::iload() {
 584   iload_internal();
 585 }
 586 
 587 void TemplateTable::nofast_iload() {
 588   iload_internal(may_not_rewrite);
 589 }
 590 
 591 void TemplateTable::iload_internal(RewriteControl rc) {
 592   transition(vtos, itos);
 593   if (RewriteFrequentPairs && rc == may_rewrite) {
 594     Label rewrite, done;
 595     Register bc = r4;
 596 
 597     // get next bytecode
 598     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 599 
 600     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 601     // last two iloads in a pair.  Comparing against fast_iload means that
 602     // the next bytecode is neither an iload or a caload, and therefore
 603     // an iload pair.
 604     __ cmpw(r1, Bytecodes::_iload);
 605     __ br(Assembler::EQ, done);
 606 
 607     // if _fast_iload rewrite to _fast_iload2
 608     __ cmpw(r1, Bytecodes::_fast_iload);
 609     __ movw(bc, Bytecodes::_fast_iload2);
 610     __ br(Assembler::EQ, rewrite);
 611 
 612     // if _caload rewrite to _fast_icaload
 613     __ cmpw(r1, Bytecodes::_caload);
 614     __ movw(bc, Bytecodes::_fast_icaload);
 615     __ br(Assembler::EQ, rewrite);
 616 
 617     // else rewrite to _fast_iload
 618     __ movw(bc, Bytecodes::_fast_iload);
 619 
 620     // rewrite
 621     // bc: new bytecode
 622     __ bind(rewrite);
 623     patch_bytecode(Bytecodes::_iload, bc, r1, false);
 624     __ bind(done);
 625 
 626   }
 627 
 628   // do iload, get the local value into tos
 629   locals_index(r1);
 630   __ ldr(r0, iaddress(r1));
 631 
 632 }
 633 
 634 void TemplateTable::fast_iload2()
 635 {
 636   transition(vtos, itos);
 637   locals_index(r1);
 638   __ ldr(r0, iaddress(r1));
 639   __ push(itos);
 640   locals_index(r1, 3);
 641   __ ldr(r0, iaddress(r1));
 642 }
 643 
 644 void TemplateTable::fast_iload()
 645 {
 646   transition(vtos, itos);
 647   locals_index(r1);
 648   __ ldr(r0, iaddress(r1));
 649 }
 650 
 651 void TemplateTable::lload()
 652 {
 653   transition(vtos, ltos);
 654   __ ldrb(r1, at_bcp(1));
 655   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 656   __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 657 }
 658 
 659 void TemplateTable::fload()
 660 {
 661   transition(vtos, ftos);
 662   locals_index(r1);
 663   // n.b. we use ldrd here because this is a 64 bit slot
 664   // this is comparable to the iload case
 665   __ ldrd(v0, faddress(r1));
 666 }
 667 
 668 void TemplateTable::dload()
 669 {
 670   transition(vtos, dtos);
 671   __ ldrb(r1, at_bcp(1));
 672   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 673   __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 674 }
 675 
 676 void TemplateTable::aload()
 677 {
 678   transition(vtos, atos);
 679   locals_index(r1);
 680   __ ldr(r0, iaddress(r1));
 681 }
 682 
 683 void TemplateTable::locals_index_wide(Register reg) {
 684   __ ldrh(reg, at_bcp(2));
 685   __ rev16w(reg, reg);
 686   __ neg(reg, reg);
 687 }
 688 
 689 void TemplateTable::wide_iload() {
 690   transition(vtos, itos);
 691   locals_index_wide(r1);
 692   __ ldr(r0, iaddress(r1));
 693 }
 694 
 695 void TemplateTable::wide_lload()
 696 {
 697   transition(vtos, ltos);
 698   __ ldrh(r1, at_bcp(2));
 699   __ rev16w(r1, r1);
 700   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 701   __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 702 }
 703 
 704 void TemplateTable::wide_fload()
 705 {
 706   transition(vtos, ftos);
 707   locals_index_wide(r1);
 708   // n.b. we use ldrd here because this is a 64 bit slot
 709   // this is comparable to the iload case
 710   __ ldrd(v0, faddress(r1));
 711 }
 712 
 713 void TemplateTable::wide_dload()
 714 {
 715   transition(vtos, dtos);
 716   __ ldrh(r1, at_bcp(2));
 717   __ rev16w(r1, r1);
 718   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 719   __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 720 }
 721 
 722 void TemplateTable::wide_aload()
 723 {
 724   transition(vtos, atos);
 725   locals_index_wide(r1);
 726   __ ldr(r0, aaddress(r1));
 727 }
 728 
 729 void TemplateTable::index_check(Register array, Register index)
 730 {
 731   // destroys r1, rscratch1
 732   // check array
 733   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 734   // sign extend index for use by indexed load
 735   // __ movl2ptr(index, index);
 736   // check index
 737   Register length = rscratch1;
 738   __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
 739   __ cmpw(index, length);
 740   if (index != r1) {
 741     // ??? convention: move aberrant index into r1 for exception message
 742     assert(r1 != array, "different registers");
 743     __ mov(r1, index);
 744   }
 745   Label ok;
 746   __ br(Assembler::LO, ok);
 747     // ??? convention: move array into r3 for exception message
 748   __ mov(r3, array);
 749   __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
 750   __ br(rscratch1);
 751   __ bind(ok);
 752 }
 753 
 754 void TemplateTable::iaload()
 755 {
 756   transition(itos, itos);
 757   __ mov(r1, r0);
 758   __ pop_ptr(r0);
 759   // r0: array
 760   // r1: index
 761   index_check(r0, r1); // leaves index in r1, kills rscratch1
 762   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
 763   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 764 }
 765 
 766 void TemplateTable::laload()
 767 {
 768   transition(itos, ltos);
 769   __ mov(r1, r0);
 770   __ pop_ptr(r0);
 771   // r0: array
 772   // r1: index
 773   index_check(r0, r1); // leaves index in r1, kills rscratch1
 774   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
 775   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 776 }
 777 
 778 void TemplateTable::faload()
 779 {
 780   transition(itos, ftos);
 781   __ mov(r1, r0);
 782   __ pop_ptr(r0);
 783   // r0: array
 784   // r1: index
 785   index_check(r0, r1); // leaves index in r1, kills rscratch1
 786   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
 787   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 788 }
 789 
 790 void TemplateTable::daload()
 791 {
 792   transition(itos, dtos);
 793   __ mov(r1, r0);
 794   __ pop_ptr(r0);
 795   // r0: array
 796   // r1: index
 797   index_check(r0, r1); // leaves index in r1, kills rscratch1
 798   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
 799   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 800 }
 801 
 802 void TemplateTable::aaload()
 803 {
 804   transition(itos, atos);
 805   __ mov(r1, r0);
 806   __ pop_ptr(r0);
 807   // r0: array
 808   // r1: index
 809   index_check(r0, r1); // leaves index in r1, kills rscratch1
 810   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 811   do_oop_load(_masm,
 812               Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
 813               r0,
 814               IS_ARRAY);
 815 }
 816 
 817 void TemplateTable::baload()
 818 {
 819   transition(itos, itos);
 820   __ mov(r1, r0);
 821   __ pop_ptr(r0);
 822   // r0: array
 823   // r1: index
 824   index_check(r0, r1); // leaves index in r1, kills rscratch1
 825   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
 826   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
 827 }
 828 
 829 void TemplateTable::caload()
 830 {
 831   transition(itos, itos);
 832   __ mov(r1, r0);
 833   __ pop_ptr(r0);
 834   // r0: array
 835   // r1: index
 836   index_check(r0, r1); // leaves index in r1, kills rscratch1
 837   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
 838   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 839 }
 840 
 841 // iload followed by caload frequent pair
 842 void TemplateTable::fast_icaload()
 843 {
 844   transition(vtos, itos);
 845   // load index out of locals
 846   locals_index(r2);
 847   __ ldr(r1, iaddress(r2));
 848 
 849   __ pop_ptr(r0);
 850 
 851   // r0: array
 852   // r1: index
 853   index_check(r0, r1); // leaves index in r1, kills rscratch1
 854   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
 855   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 856 }
 857 
 858 void TemplateTable::saload()
 859 {
 860   transition(itos, itos);
 861   __ mov(r1, r0);
 862   __ pop_ptr(r0);
 863   // r0: array
 864   // r1: index
 865   index_check(r0, r1); // leaves index in r1, kills rscratch1
 866   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
 867   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
 868 }
 869 
 870 void TemplateTable::iload(int n)
 871 {
 872   transition(vtos, itos);
 873   __ ldr(r0, iaddress(n));
 874 }
 875 
 876 void TemplateTable::lload(int n)
 877 {
 878   transition(vtos, ltos);
 879   __ ldr(r0, laddress(n));
 880 }
 881 
 882 void TemplateTable::fload(int n)
 883 {
 884   transition(vtos, ftos);
 885   __ ldrs(v0, faddress(n));
 886 }
 887 
 888 void TemplateTable::dload(int n)
 889 {
 890   transition(vtos, dtos);
 891   __ ldrd(v0, daddress(n));
 892 }
 893 
 894 void TemplateTable::aload(int n)
 895 {
 896   transition(vtos, atos);
 897   __ ldr(r0, iaddress(n));
 898 }
 899 
 900 void TemplateTable::aload_0() {
 901   aload_0_internal();
 902 }
 903 
 904 void TemplateTable::nofast_aload_0() {
 905   aload_0_internal(may_not_rewrite);
 906 }
 907 
 908 void TemplateTable::aload_0_internal(RewriteControl rc) {
 909   // According to bytecode histograms, the pairs:
 910   //
 911   // _aload_0, _fast_igetfield
 912   // _aload_0, _fast_agetfield
 913   // _aload_0, _fast_fgetfield
 914   //
 915   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 916   // _aload_0 bytecode checks if the next bytecode is either
 917   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 918   // rewrites the current bytecode into a pair bytecode; otherwise it
 919   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 920   // the pair check anymore.
 921   //
 922   // Note: If the next bytecode is _getfield, the rewrite must be
 923   //       delayed, otherwise we may miss an opportunity for a pair.
 924   //
 925   // Also rewrite frequent pairs
 926   //   aload_0, aload_1
 927   //   aload_0, iload_1
 928   // These bytecodes with a small amount of code are most profitable
 929   // to rewrite
 930   if (RewriteFrequentPairs && rc == may_rewrite) {
 931     Label rewrite, done;
 932     const Register bc = r4;
 933 
 934     // get next bytecode
 935     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 936 
 937     // if _getfield then wait with rewrite
 938     __ cmpw(r1, Bytecodes::Bytecodes::_getfield);
 939     __ br(Assembler::EQ, done);
 940 
 941     // if _igetfield then rewrite to _fast_iaccess_0
 942     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 943     __ cmpw(r1, Bytecodes::_fast_igetfield);
 944     __ movw(bc, Bytecodes::_fast_iaccess_0);
 945     __ br(Assembler::EQ, rewrite);
 946 
 947     // if _agetfield then rewrite to _fast_aaccess_0
 948     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 949     __ cmpw(r1, Bytecodes::_fast_agetfield);
 950     __ movw(bc, Bytecodes::_fast_aaccess_0);
 951     __ br(Assembler::EQ, rewrite);
 952 
 953     // if _fgetfield then rewrite to _fast_faccess_0
 954     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 955     __ cmpw(r1, Bytecodes::_fast_fgetfield);
 956     __ movw(bc, Bytecodes::_fast_faccess_0);
 957     __ br(Assembler::EQ, rewrite);
 958 
 959     // else rewrite to _fast_aload0
 960     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 961     __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0);
 962 
 963     // rewrite
 964     // bc: new bytecode
 965     __ bind(rewrite);
 966     patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
 967 
 968     __ bind(done);
 969   }
 970 
 971   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 972   aload(0);
 973 }
 974 
 975 void TemplateTable::istore()
 976 {
 977   transition(itos, vtos);
 978   locals_index(r1);
 979   // FIXME: We're being very pernickerty here storing a jint in a
 980   // local with strw, which costs an extra instruction over what we'd
 981   // be able to do with a simple str.  We should just store the whole
 982   // word.
 983   __ lea(rscratch1, iaddress(r1));
 984   __ strw(r0, Address(rscratch1));
 985 }
 986 
 987 void TemplateTable::lstore()
 988 {
 989   transition(ltos, vtos);
 990   locals_index(r1);
 991   __ str(r0, laddress(r1, rscratch1, _masm));
 992 }
 993 
 994 void TemplateTable::fstore() {
 995   transition(ftos, vtos);
 996   locals_index(r1);
 997   __ lea(rscratch1, iaddress(r1));
 998   __ strs(v0, Address(rscratch1));
 999 }
1000 
1001 void TemplateTable::dstore() {
1002   transition(dtos, vtos);
1003   locals_index(r1);
1004   __ strd(v0, daddress(r1, rscratch1, _masm));
1005 }
1006 
1007 void TemplateTable::astore()
1008 {
1009   transition(vtos, vtos);
1010   __ pop_ptr(r0);
1011   locals_index(r1);
1012   __ str(r0, aaddress(r1));
1013 }
1014 
1015 void TemplateTable::wide_istore() {
1016   transition(vtos, vtos);
1017   __ pop_i();
1018   locals_index_wide(r1);
1019   __ lea(rscratch1, iaddress(r1));
1020   __ strw(r0, Address(rscratch1));
1021 }
1022 
1023 void TemplateTable::wide_lstore() {
1024   transition(vtos, vtos);
1025   __ pop_l();
1026   locals_index_wide(r1);
1027   __ str(r0, laddress(r1, rscratch1, _masm));
1028 }
1029 
1030 void TemplateTable::wide_fstore() {
1031   transition(vtos, vtos);
1032   __ pop_f();
1033   locals_index_wide(r1);
1034   __ lea(rscratch1, faddress(r1));
1035   __ strs(v0, rscratch1);
1036 }
1037 
1038 void TemplateTable::wide_dstore() {
1039   transition(vtos, vtos);
1040   __ pop_d();
1041   locals_index_wide(r1);
1042   __ strd(v0, daddress(r1, rscratch1, _masm));
1043 }
1044 
1045 void TemplateTable::wide_astore() {
1046   transition(vtos, vtos);
1047   __ pop_ptr(r0);
1048   locals_index_wide(r1);
1049   __ str(r0, aaddress(r1));
1050 }
1051 
1052 void TemplateTable::iastore() {
1053   transition(itos, vtos);
1054   __ pop_i(r1);
1055   __ pop_ptr(r3);
1056   // r0: value
1057   // r1: index
1058   // r3: array
1059   index_check(r3, r1); // prefer index in r1
1060   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
1061   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg);
1062 }
1063 
1064 void TemplateTable::lastore() {
1065   transition(ltos, vtos);
1066   __ pop_i(r1);
1067   __ pop_ptr(r3);
1068   // r0: value
1069   // r1: index
1070   // r3: array
1071   index_check(r3, r1); // prefer index in r1
1072   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
1073   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg);
1074 }
1075 
1076 void TemplateTable::fastore() {
1077   transition(ftos, vtos);
1078   __ pop_i(r1);
1079   __ pop_ptr(r3);
1080   // v0: value
1081   // r1:  index
1082   // r3:  array
1083   index_check(r3, r1); // prefer index in r1
1084   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
1085   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg);
1086 }
1087 
1088 void TemplateTable::dastore() {
1089   transition(dtos, vtos);
1090   __ pop_i(r1);
1091   __ pop_ptr(r3);
1092   // v0: value
1093   // r1:  index
1094   // r3:  array
1095   index_check(r3, r1); // prefer index in r1
1096   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1097   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
1098 }
1099 
1100 void TemplateTable::aastore() {
1101   Label is_null, ok_is_subtype, done;
1102   transition(vtos, vtos);
1103   // stack: ..., array, index, value
1104   __ ldr(r0, at_tos());    // value
1105   __ ldr(r2, at_tos_p1()); // index
1106   __ ldr(r3, at_tos_p2()); // array
1107 
1108   Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1109 
1110   index_check(r3, r2);     // kills r1
1111   __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1112 
1113   // do array store check - check for NULL value first
1114   __ cbz(r0, is_null);
1115 
1116   // Move subklass into r1
1117   __ load_klass(r1, r0);
1118   // Move superklass into r0
1119   __ load_klass(r0, r3);
1120   __ ldr(r0, Address(r0,
1121                      ObjArrayKlass::element_klass_offset()));
1122   // Compress array + index*oopSize + 12 into a single register.  Frees r2.
1123 
1124   // Generate subtype check.  Blows r2, r5
1125   // Superklass in r0.  Subklass in r1.
1126   __ gen_subtype_check(r1, ok_is_subtype);
1127 
1128   // Come here on failure
1129   // object is at TOS
1130   __ b(Interpreter::_throw_ArrayStoreException_entry);
1131 
1132   // Come here on success
1133   __ bind(ok_is_subtype);
1134 
1135   // Get the value we will store
1136   __ ldr(r0, at_tos());
1137   // Now store using the appropriate barrier
1138   do_oop_store(_masm, element_address, r0, IS_ARRAY);
1139   __ b(done);
1140 
1141   // Have a NULL in r0, r3=array, r2=index.  Store NULL at ary[idx]
1142   __ bind(is_null);
1143   __ profile_null_seen(r2);
1144 
1145   // Store a NULL
1146   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1147 
1148   // Pop stack arguments
1149   __ bind(done);
1150   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1151 }
1152 
1153 void TemplateTable::bastore()
1154 {
1155   transition(itos, vtos);
1156   __ pop_i(r1);
1157   __ pop_ptr(r3);
1158   // r0: value
1159   // r1: index
1160   // r3: array
1161   index_check(r3, r1); // prefer index in r1
1162 
1163   // Need to check whether array is boolean or byte
1164   // since both types share the bastore bytecode.
1165   __ load_klass(r2, r3);
1166   __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
1167   int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
1168   Label L_skip;
1169   __ tbz(r2, diffbit_index, L_skip);
1170   __ andw(r0, r0, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1171   __ bind(L_skip);
1172 
1173   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
1174   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg);
1175 }
1176 
1177 void TemplateTable::castore()
1178 {
1179   transition(itos, vtos);
1180   __ pop_i(r1);
1181   __ pop_ptr(r3);
1182   // r0: value
1183   // r1: index
1184   // r3: array
1185   index_check(r3, r1); // prefer index in r1
1186   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
1187   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg);
1188 }
1189 
1190 void TemplateTable::sastore()
1191 {
1192   castore();
1193 }
1194 
1195 void TemplateTable::istore(int n)
1196 {
1197   transition(itos, vtos);
1198   __ str(r0, iaddress(n));
1199 }
1200 
1201 void TemplateTable::lstore(int n)
1202 {
1203   transition(ltos, vtos);
1204   __ str(r0, laddress(n));
1205 }
1206 
1207 void TemplateTable::fstore(int n)
1208 {
1209   transition(ftos, vtos);
1210   __ strs(v0, faddress(n));
1211 }
1212 
1213 void TemplateTable::dstore(int n)
1214 {
1215   transition(dtos, vtos);
1216   __ strd(v0, daddress(n));
1217 }
1218 
1219 void TemplateTable::astore(int n)
1220 {
1221   transition(vtos, vtos);
1222   __ pop_ptr(r0);
1223   __ str(r0, iaddress(n));
1224 }
1225 
1226 void TemplateTable::pop()
1227 {
1228   transition(vtos, vtos);
1229   __ add(esp, esp, Interpreter::stackElementSize);
1230 }
1231 
1232 void TemplateTable::pop2()
1233 {
1234   transition(vtos, vtos);
1235   __ add(esp, esp, 2 * Interpreter::stackElementSize);
1236 }
1237 
1238 void TemplateTable::dup()
1239 {
1240   transition(vtos, vtos);
1241   __ ldr(r0, Address(esp, 0));
1242   __ push(r0);
1243   // stack: ..., a, a
1244 }
1245 
1246 void TemplateTable::dup_x1()
1247 {
1248   transition(vtos, vtos);
1249   // stack: ..., a, b
1250   __ ldr(r0, at_tos());  // load b
1251   __ ldr(r2, at_tos_p1());  // load a
1252   __ str(r0, at_tos_p1());  // store b
1253   __ str(r2, at_tos());  // store a
1254   __ push(r0);                  // push b
1255   // stack: ..., b, a, b
1256 }
1257 
1258 void TemplateTable::dup_x2()
1259 {
1260   transition(vtos, vtos);
1261   // stack: ..., a, b, c
1262   __ ldr(r0, at_tos());  // load c
1263   __ ldr(r2, at_tos_p2());  // load a
1264   __ str(r0, at_tos_p2());  // store c in a
1265   __ push(r0);      // push c
1266   // stack: ..., c, b, c, c
1267   __ ldr(r0, at_tos_p2());  // load b
1268   __ str(r2, at_tos_p2());  // store a in b
1269   // stack: ..., c, a, c, c
1270   __ str(r0, at_tos_p1());  // store b in c
1271   // stack: ..., c, a, b, c
1272 }
1273 
1274 void TemplateTable::dup2()
1275 {
1276   transition(vtos, vtos);
1277   // stack: ..., a, b
1278   __ ldr(r0, at_tos_p1());  // load a
1279   __ push(r0);                  // push a
1280   __ ldr(r0, at_tos_p1());  // load b
1281   __ push(r0);                  // push b
1282   // stack: ..., a, b, a, b
1283 }
1284 
1285 void TemplateTable::dup2_x1()
1286 {
1287   transition(vtos, vtos);
1288   // stack: ..., a, b, c
1289   __ ldr(r2, at_tos());  // load c
1290   __ ldr(r0, at_tos_p1());  // load b
1291   __ push(r0);                  // push b
1292   __ push(r2);                  // push c
1293   // stack: ..., a, b, c, b, c
1294   __ str(r2, at_tos_p3());  // store c in b
1295   // stack: ..., a, c, c, b, c
1296   __ ldr(r2, at_tos_p4());  // load a
1297   __ str(r2, at_tos_p2());  // store a in 2nd c
1298   // stack: ..., a, c, a, b, c
1299   __ str(r0, at_tos_p4());  // store b in a
1300   // stack: ..., b, c, a, b, c
1301 }
1302 
1303 void TemplateTable::dup2_x2()
1304 {
1305   transition(vtos, vtos);
1306   // stack: ..., a, b, c, d
1307   __ ldr(r2, at_tos());  // load d
1308   __ ldr(r0, at_tos_p1());  // load c
1309   __ push(r0)            ;      // push c
1310   __ push(r2);                  // push d
1311   // stack: ..., a, b, c, d, c, d
1312   __ ldr(r0, at_tos_p4());  // load b
1313   __ str(r0, at_tos_p2());  // store b in d
1314   __ str(r2, at_tos_p4());  // store d in b
1315   // stack: ..., a, d, c, b, c, d
1316   __ ldr(r2, at_tos_p5());  // load a
1317   __ ldr(r0, at_tos_p3());  // load c
1318   __ str(r2, at_tos_p3());  // store a in c
1319   __ str(r0, at_tos_p5());  // store c in a
1320   // stack: ..., c, d, a, b, c, d
1321 }
1322 
1323 void TemplateTable::swap()
1324 {
1325   transition(vtos, vtos);
1326   // stack: ..., a, b
1327   __ ldr(r2, at_tos_p1());  // load a
1328   __ ldr(r0, at_tos());  // load b
1329   __ str(r2, at_tos());  // store a in b
1330   __ str(r0, at_tos_p1());  // store b in a
1331   // stack: ..., b, a
1332 }
1333 
1334 void TemplateTable::iop2(Operation op)
1335 {
1336   transition(itos, itos);
1337   // r0 <== r1 op r0
1338   __ pop_i(r1);
1339   switch (op) {
1340   case add  : __ addw(r0, r1, r0); break;
1341   case sub  : __ subw(r0, r1, r0); break;
1342   case mul  : __ mulw(r0, r1, r0); break;
1343   case _and : __ andw(r0, r1, r0); break;
1344   case _or  : __ orrw(r0, r1, r0); break;
1345   case _xor : __ eorw(r0, r1, r0); break;
1346   case shl  : __ lslvw(r0, r1, r0); break;
1347   case shr  : __ asrvw(r0, r1, r0); break;
1348   case ushr : __ lsrvw(r0, r1, r0);break;
1349   default   : ShouldNotReachHere();
1350   }
1351 }
1352 
1353 void TemplateTable::lop2(Operation op)
1354 {
1355   transition(ltos, ltos);
1356   // r0 <== r1 op r0
1357   __ pop_l(r1);
1358   switch (op) {
1359   case add  : __ add(r0, r1, r0); break;
1360   case sub  : __ sub(r0, r1, r0); break;
1361   case mul  : __ mul(r0, r1, r0); break;
1362   case _and : __ andr(r0, r1, r0); break;
1363   case _or  : __ orr(r0, r1, r0); break;
1364   case _xor : __ eor(r0, r1, r0); break;
1365   default   : ShouldNotReachHere();
1366   }
1367 }
1368 
1369 void TemplateTable::idiv()
1370 {
1371   transition(itos, itos);
1372   // explicitly check for div0
1373   Label no_div0;
1374   __ cbnzw(r0, no_div0);
1375   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1376   __ br(rscratch1);
1377   __ bind(no_div0);
1378   __ pop_i(r1);
1379   // r0 <== r1 idiv r0
1380   __ corrected_idivl(r0, r1, r0, /* want_remainder */ false);
1381 }
1382 
1383 void TemplateTable::irem()
1384 {
1385   transition(itos, itos);
1386   // explicitly check for div0
1387   Label no_div0;
1388   __ cbnzw(r0, no_div0);
1389   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1390   __ br(rscratch1);
1391   __ bind(no_div0);
1392   __ pop_i(r1);
1393   // r0 <== r1 irem r0
1394   __ corrected_idivl(r0, r1, r0, /* want_remainder */ true);
1395 }
1396 
1397 void TemplateTable::lmul()
1398 {
1399   transition(ltos, ltos);
1400   __ pop_l(r1);
1401   __ mul(r0, r0, r1);
1402 }
1403 
1404 void TemplateTable::ldiv()
1405 {
1406   transition(ltos, ltos);
1407   // explicitly check for div0
1408   Label no_div0;
1409   __ cbnz(r0, no_div0);
1410   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1411   __ br(rscratch1);
1412   __ bind(no_div0);
1413   __ pop_l(r1);
1414   // r0 <== r1 ldiv r0
1415   __ corrected_idivq(r0, r1, r0, /* want_remainder */ false);
1416 }
1417 
1418 void TemplateTable::lrem()
1419 {
1420   transition(ltos, ltos);
1421   // explicitly check for div0
1422   Label no_div0;
1423   __ cbnz(r0, no_div0);
1424   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1425   __ br(rscratch1);
1426   __ bind(no_div0);
1427   __ pop_l(r1);
1428   // r0 <== r1 lrem r0
1429   __ corrected_idivq(r0, r1, r0, /* want_remainder */ true);
1430 }
1431 
1432 void TemplateTable::lshl()
1433 {
1434   transition(itos, ltos);
1435   // shift count is in r0
1436   __ pop_l(r1);
1437   __ lslv(r0, r1, r0);
1438 }
1439 
1440 void TemplateTable::lshr()
1441 {
1442   transition(itos, ltos);
1443   // shift count is in r0
1444   __ pop_l(r1);
1445   __ asrv(r0, r1, r0);
1446 }
1447 
1448 void TemplateTable::lushr()
1449 {
1450   transition(itos, ltos);
1451   // shift count is in r0
1452   __ pop_l(r1);
1453   __ lsrv(r0, r1, r0);
1454 }
1455 
1456 void TemplateTable::fop2(Operation op)
1457 {
1458   transition(ftos, ftos);
1459   switch (op) {
1460   case add:
1461     // n.b. use ldrd because this is a 64 bit slot
1462     __ pop_f(v1);
1463     __ fadds(v0, v1, v0);
1464     break;
1465   case sub:
1466     __ pop_f(v1);
1467     __ fsubs(v0, v1, v0);
1468     break;
1469   case mul:
1470     __ pop_f(v1);
1471     __ fmuls(v0, v1, v0);
1472     break;
1473   case div:
1474     __ pop_f(v1);
1475     __ fdivs(v0, v1, v0);
1476     break;
1477   case rem:
1478     __ fmovs(v1, v0);
1479     __ pop_f(v0);
1480     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1481     break;
1482   default:
1483     ShouldNotReachHere();
1484     break;
1485   }
1486 }
1487 
1488 void TemplateTable::dop2(Operation op)
1489 {
1490   transition(dtos, dtos);
1491   switch (op) {
1492   case add:
1493     // n.b. use ldrd because this is a 64 bit slot
1494     __ pop_d(v1);
1495     __ faddd(v0, v1, v0);
1496     break;
1497   case sub:
1498     __ pop_d(v1);
1499     __ fsubd(v0, v1, v0);
1500     break;
1501   case mul:
1502     __ pop_d(v1);
1503     __ fmuld(v0, v1, v0);
1504     break;
1505   case div:
1506     __ pop_d(v1);
1507     __ fdivd(v0, v1, v0);
1508     break;
1509   case rem:
1510     __ fmovd(v1, v0);
1511     __ pop_d(v0);
1512     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1513     break;
1514   default:
1515     ShouldNotReachHere();
1516     break;
1517   }
1518 }
1519 
1520 void TemplateTable::ineg()
1521 {
1522   transition(itos, itos);
1523   __ negw(r0, r0);
1524 
1525 }
1526 
1527 void TemplateTable::lneg()
1528 {
1529   transition(ltos, ltos);
1530   __ neg(r0, r0);
1531 }
1532 
1533 void TemplateTable::fneg()
1534 {
1535   transition(ftos, ftos);
1536   __ fnegs(v0, v0);
1537 }
1538 
1539 void TemplateTable::dneg()
1540 {
1541   transition(dtos, dtos);
1542   __ fnegd(v0, v0);
1543 }
1544 
1545 void TemplateTable::iinc()
1546 {
1547   transition(vtos, vtos);
1548   __ load_signed_byte(r1, at_bcp(2)); // get constant
1549   locals_index(r2);
1550   __ ldr(r0, iaddress(r2));
1551   __ addw(r0, r0, r1);
1552   __ str(r0, iaddress(r2));
1553 }
1554 
1555 void TemplateTable::wide_iinc()
1556 {
1557   transition(vtos, vtos);
1558   // __ mov(r1, zr);
1559   __ ldrw(r1, at_bcp(2)); // get constant and index
1560   __ rev16(r1, r1);
1561   __ ubfx(r2, r1, 0, 16);
1562   __ neg(r2, r2);
1563   __ sbfx(r1, r1, 16, 16);
1564   __ ldr(r0, iaddress(r2));
1565   __ addw(r0, r0, r1);
1566   __ str(r0, iaddress(r2));
1567 }
1568 
1569 void TemplateTable::convert()
1570 {
1571   // Checking
1572 #ifdef ASSERT
1573   {
1574     TosState tos_in  = ilgl;
1575     TosState tos_out = ilgl;
1576     switch (bytecode()) {
1577     case Bytecodes::_i2l: // fall through
1578     case Bytecodes::_i2f: // fall through
1579     case Bytecodes::_i2d: // fall through
1580     case Bytecodes::_i2b: // fall through
1581     case Bytecodes::_i2c: // fall through
1582     case Bytecodes::_i2s: tos_in = itos; break;
1583     case Bytecodes::_l2i: // fall through
1584     case Bytecodes::_l2f: // fall through
1585     case Bytecodes::_l2d: tos_in = ltos; break;
1586     case Bytecodes::_f2i: // fall through
1587     case Bytecodes::_f2l: // fall through
1588     case Bytecodes::_f2d: tos_in = ftos; break;
1589     case Bytecodes::_d2i: // fall through
1590     case Bytecodes::_d2l: // fall through
1591     case Bytecodes::_d2f: tos_in = dtos; break;
1592     default             : ShouldNotReachHere();
1593     }
1594     switch (bytecode()) {
1595     case Bytecodes::_l2i: // fall through
1596     case Bytecodes::_f2i: // fall through
1597     case Bytecodes::_d2i: // fall through
1598     case Bytecodes::_i2b: // fall through
1599     case Bytecodes::_i2c: // fall through
1600     case Bytecodes::_i2s: tos_out = itos; break;
1601     case Bytecodes::_i2l: // fall through
1602     case Bytecodes::_f2l: // fall through
1603     case Bytecodes::_d2l: tos_out = ltos; break;
1604     case Bytecodes::_i2f: // fall through
1605     case Bytecodes::_l2f: // fall through
1606     case Bytecodes::_d2f: tos_out = ftos; break;
1607     case Bytecodes::_i2d: // fall through
1608     case Bytecodes::_l2d: // fall through
1609     case Bytecodes::_f2d: tos_out = dtos; break;
1610     default             : ShouldNotReachHere();
1611     }
1612     transition(tos_in, tos_out);
1613   }
1614 #endif // ASSERT
1615   // static const int64_t is_nan = 0x8000000000000000L;
1616 
1617   // Conversion
1618   switch (bytecode()) {
1619   case Bytecodes::_i2l:
1620     __ sxtw(r0, r0);
1621     break;
1622   case Bytecodes::_i2f:
1623     __ scvtfws(v0, r0);
1624     break;
1625   case Bytecodes::_i2d:
1626     __ scvtfwd(v0, r0);
1627     break;
1628   case Bytecodes::_i2b:
1629     __ sxtbw(r0, r0);
1630     break;
1631   case Bytecodes::_i2c:
1632     __ uxthw(r0, r0);
1633     break;
1634   case Bytecodes::_i2s:
1635     __ sxthw(r0, r0);
1636     break;
1637   case Bytecodes::_l2i:
1638     __ uxtw(r0, r0);
1639     break;
1640   case Bytecodes::_l2f:
1641     __ scvtfs(v0, r0);
1642     break;
1643   case Bytecodes::_l2d:
1644     __ scvtfd(v0, r0);
1645     break;
1646   case Bytecodes::_f2i:
1647   {
1648     Label L_Okay;
1649     __ clear_fpsr();
1650     __ fcvtzsw(r0, v0);
1651     __ get_fpsr(r1);
1652     __ cbzw(r1, L_Okay);
1653     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i));
1654     __ bind(L_Okay);
1655   }
1656     break;
1657   case Bytecodes::_f2l:
1658   {
1659     Label L_Okay;
1660     __ clear_fpsr();
1661     __ fcvtzs(r0, v0);
1662     __ get_fpsr(r1);
1663     __ cbzw(r1, L_Okay);
1664     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1665     __ bind(L_Okay);
1666   }
1667     break;
1668   case Bytecodes::_f2d:
1669     __ fcvts(v0, v0);
1670     break;
1671   case Bytecodes::_d2i:
1672   {
1673     Label L_Okay;
1674     __ clear_fpsr();
1675     __ fcvtzdw(r0, v0);
1676     __ get_fpsr(r1);
1677     __ cbzw(r1, L_Okay);
1678     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
1679     __ bind(L_Okay);
1680   }
1681     break;
1682   case Bytecodes::_d2l:
1683   {
1684     Label L_Okay;
1685     __ clear_fpsr();
1686     __ fcvtzd(r0, v0);
1687     __ get_fpsr(r1);
1688     __ cbzw(r1, L_Okay);
1689     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1690     __ bind(L_Okay);
1691   }
1692     break;
1693   case Bytecodes::_d2f:
1694     __ fcvtd(v0, v0);
1695     break;
1696   default:
1697     ShouldNotReachHere();
1698   }
1699 }
1700 
1701 void TemplateTable::lcmp()
1702 {
1703   transition(ltos, itos);
1704   Label done;
1705   __ pop_l(r1);
1706   __ cmp(r1, r0);
1707   __ mov(r0, (uint64_t)-1L);
1708   __ br(Assembler::LT, done);
1709   // __ mov(r0, 1UL);
1710   // __ csel(r0, r0, zr, Assembler::NE);
1711   // and here is a faster way
1712   __ csinc(r0, zr, zr, Assembler::EQ);
1713   __ bind(done);
1714 }
1715 
1716 void TemplateTable::float_cmp(bool is_float, int unordered_result)
1717 {
1718   Label done;
1719   if (is_float) {
1720     // XXX get rid of pop here, use ... reg, mem32
1721     __ pop_f(v1);
1722     __ fcmps(v1, v0);
1723   } else {
1724     // XXX get rid of pop here, use ... reg, mem64
1725     __ pop_d(v1);
1726     __ fcmpd(v1, v0);
1727   }
1728   if (unordered_result < 0) {
1729     // we want -1 for unordered or less than, 0 for equal and 1 for
1730     // greater than.
1731     __ mov(r0, (uint64_t)-1L);
1732     // for FP LT tests less than or unordered
1733     __ br(Assembler::LT, done);
1734     // install 0 for EQ otherwise 1
1735     __ csinc(r0, zr, zr, Assembler::EQ);
1736   } else {
1737     // we want -1 for less than, 0 for equal and 1 for unordered or
1738     // greater than.
1739     __ mov(r0, 1L);
1740     // for FP HI tests greater than or unordered
1741     __ br(Assembler::HI, done);
1742     // install 0 for EQ otherwise ~0
1743     __ csinv(r0, zr, zr, Assembler::EQ);
1744 
1745   }
1746   __ bind(done);
1747 }
1748 
1749 void TemplateTable::branch(bool is_jsr, bool is_wide)
1750 {
1751   __ profile_taken_branch(r0, r1);
1752   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1753                              InvocationCounter::counter_offset();
1754   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1755                               InvocationCounter::counter_offset();
1756 
1757   // load branch displacement
1758   if (!is_wide) {
1759     __ ldrh(r2, at_bcp(1));
1760     __ rev16(r2, r2);
1761     // sign extend the 16 bit value in r2
1762     __ sbfm(r2, r2, 0, 15);
1763   } else {
1764     __ ldrw(r2, at_bcp(1));
1765     __ revw(r2, r2);
1766     // sign extend the 32 bit value in r2
1767     __ sbfm(r2, r2, 0, 31);
1768   }
1769 
1770   // Handle all the JSR stuff here, then exit.
1771   // It's much shorter and cleaner than intermingling with the non-JSR
1772   // normal-branch stuff occurring below.
1773 
1774   if (is_jsr) {
1775     // Pre-load the next target bytecode into rscratch1
1776     __ load_unsigned_byte(rscratch1, Address(rbcp, r2));
1777     // compute return address as bci
1778     __ ldr(rscratch2, Address(rmethod, Method::const_offset()));
1779     __ add(rscratch2, rscratch2,
1780            in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
1781     __ sub(r1, rbcp, rscratch2);
1782     __ push_i(r1);
1783     // Adjust the bcp by the 16-bit displacement in r2
1784     __ add(rbcp, rbcp, r2);
1785     __ dispatch_only(vtos, /*generate_poll*/true);
1786     return;
1787   }
1788 
1789   // Normal (non-jsr) branch handling
1790 
1791   // Adjust the bcp by the displacement in r2
1792   __ add(rbcp, rbcp, r2);
1793 
1794   assert(UseLoopCounter || !UseOnStackReplacement,
1795          "on-stack-replacement requires loop counters");
1796   Label backedge_counter_overflow;
1797   Label dispatch;
1798   if (UseLoopCounter) {
1799     // increment backedge counter for backward branches
1800     // r0: MDO
1801     // w1: MDO bumped taken-count
1802     // r2: target offset
1803     __ cmp(r2, zr);
1804     __ br(Assembler::GT, dispatch); // count only if backward branch
1805 
1806     // ECN: FIXME: This code smells
1807     // check if MethodCounters exists
1808     Label has_counters;
1809     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1810     __ cbnz(rscratch1, has_counters);
1811     __ push(r0);
1812     __ push(r1);
1813     __ push(r2);
1814     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
1815             InterpreterRuntime::build_method_counters), rmethod);
1816     __ pop(r2);
1817     __ pop(r1);
1818     __ pop(r0);
1819     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1820     __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
1821     __ bind(has_counters);
1822 
1823     Label no_mdo;
1824     int increment = InvocationCounter::count_increment;
1825     if (ProfileInterpreter) {
1826       // Are we profiling?
1827       __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1828       __ cbz(r1, no_mdo);
1829       // Increment the MDO backedge counter
1830       const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
1831                                          in_bytes(InvocationCounter::counter_offset()));
1832       const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
1833       __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1834                                  r0, rscratch1, false, Assembler::EQ,
1835                                  UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1836       __ b(dispatch);
1837     }
1838     __ bind(no_mdo);
1839     // Increment backedge counter in MethodCounters*
1840     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1841     const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
1842     __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
1843                                r0, rscratch2, false, Assembler::EQ,
1844                                UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
1845     __ bind(dispatch);
1846   }
1847 
1848   // Pre-load the next target bytecode into rscratch1
1849   __ load_unsigned_byte(rscratch1, Address(rbcp, 0));
1850 
1851   // continue with the bytecode @ target
1852   // rscratch1: target bytecode
1853   // rbcp: target bcp
1854   __ dispatch_only(vtos, /*generate_poll*/true);
1855 
1856   if (UseLoopCounter && UseOnStackReplacement) {
1857     // invocation counter overflow
1858     __ bind(backedge_counter_overflow);
1859     __ neg(r2, r2);
1860     __ add(r2, r2, rbcp);     // branch bcp
1861     // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1862     __ call_VM(noreg,
1863                CAST_FROM_FN_PTR(address,
1864                                 InterpreterRuntime::frequency_counter_overflow),
1865                r2);
1866     __ load_unsigned_byte(r1, Address(rbcp, 0));  // restore target bytecode
1867 
1868     // r0: osr nmethod (osr ok) or NULL (osr not possible)
1869     // w1: target bytecode
1870     // r2: scratch
1871     __ cbz(r0, dispatch);     // test result -- no osr if null
1872     // nmethod may have been invalidated (VM may block upon call_VM return)
1873     __ ldrb(r2, Address(r0, nmethod::state_offset()));
1874     if (nmethod::in_use != 0)
1875       __ sub(r2, r2, nmethod::in_use);
1876     __ cbnz(r2, dispatch);
1877 
1878     // We have the address of an on stack replacement routine in r0
1879     // We need to prepare to execute the OSR method. First we must
1880     // migrate the locals and monitors off of the stack.
1881 
1882     __ mov(r19, r0);                             // save the nmethod
1883 
1884     call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1885 
1886     // r0 is OSR buffer, move it to expected parameter location
1887     __ mov(j_rarg0, r0);
1888 
1889     // remove activation
1890     // get sender esp
1891     __ ldr(esp,
1892         Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
1893     // remove frame anchor
1894     __ leave();
1895     // Ensure compiled code always sees stack at proper alignment
1896     __ andr(sp, esp, -16);
1897 
1898     // and begin the OSR nmethod
1899     __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
1900     __ br(rscratch1);
1901   }
1902 }
1903 
1904 
1905 void TemplateTable::if_0cmp(Condition cc)
1906 {
1907   transition(itos, vtos);
1908   // assume branch is more often taken than not (loops use backward branches)
1909   Label not_taken;
1910   if (cc == equal)
1911     __ cbnzw(r0, not_taken);
1912   else if (cc == not_equal)
1913     __ cbzw(r0, not_taken);
1914   else {
1915     __ andsw(zr, r0, r0);
1916     __ br(j_not(cc), not_taken);
1917   }
1918 
1919   branch(false, false);
1920   __ bind(not_taken);
1921   __ profile_not_taken_branch(r0);
1922 }
1923 
1924 void TemplateTable::if_icmp(Condition cc)
1925 {
1926   transition(itos, vtos);
1927   // assume branch is more often taken than not (loops use backward branches)
1928   Label not_taken;
1929   __ pop_i(r1);
1930   __ cmpw(r1, r0, Assembler::LSL);
1931   __ br(j_not(cc), not_taken);
1932   branch(false, false);
1933   __ bind(not_taken);
1934   __ profile_not_taken_branch(r0);
1935 }
1936 
1937 void TemplateTable::if_nullcmp(Condition cc)
1938 {
1939   transition(atos, vtos);
1940   // assume branch is more often taken than not (loops use backward branches)
1941   Label not_taken;
1942   if (cc == equal)
1943     __ cbnz(r0, not_taken);
1944   else
1945     __ cbz(r0, not_taken);
1946   branch(false, false);
1947   __ bind(not_taken);
1948   __ profile_not_taken_branch(r0);
1949 }
1950 
1951 void TemplateTable::if_acmp(Condition cc)
1952 {
1953   transition(atos, vtos);
1954   // assume branch is more often taken than not (loops use backward branches)
1955   Label not_taken;
1956   __ pop_ptr(r1);
1957   __ cmpoop(r1, r0);
1958   __ br(j_not(cc), not_taken);
1959   branch(false, false);
1960   __ bind(not_taken);
1961   __ profile_not_taken_branch(r0);
1962 }
1963 
1964 void TemplateTable::ret() {
1965   transition(vtos, vtos);
1966   locals_index(r1);
1967   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
1968   __ profile_ret(r1, r2);
1969   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
1970   __ lea(rbcp, Address(rbcp, r1));
1971   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
1972   __ dispatch_next(vtos, 0, /*generate_poll*/true);
1973 }
1974 
1975 void TemplateTable::wide_ret() {
1976   transition(vtos, vtos);
1977   locals_index_wide(r1);
1978   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
1979   __ profile_ret(r1, r2);
1980   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
1981   __ lea(rbcp, Address(rbcp, r1));
1982   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
1983   __ dispatch_next(vtos, 0, /*generate_poll*/true);
1984 }
1985 
1986 
1987 void TemplateTable::tableswitch() {
1988   Label default_case, continue_execution;
1989   transition(itos, vtos);
1990   // align rbcp
1991   __ lea(r1, at_bcp(BytesPerInt));
1992   __ andr(r1, r1, -BytesPerInt);
1993   // load lo & hi
1994   __ ldrw(r2, Address(r1, BytesPerInt));
1995   __ ldrw(r3, Address(r1, 2 * BytesPerInt));
1996   __ rev32(r2, r2);
1997   __ rev32(r3, r3);
1998   // check against lo & hi
1999   __ cmpw(r0, r2);
2000   __ br(Assembler::LT, default_case);
2001   __ cmpw(r0, r3);
2002   __ br(Assembler::GT, default_case);
2003   // lookup dispatch offset
2004   __ subw(r0, r0, r2);
2005   __ lea(r3, Address(r1, r0, Address::uxtw(2)));
2006   __ ldrw(r3, Address(r3, 3 * BytesPerInt));
2007   __ profile_switch_case(r0, r1, r2);
2008   // continue execution
2009   __ bind(continue_execution);
2010   __ rev32(r3, r3);
2011   __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
2012   __ add(rbcp, rbcp, r3, ext::sxtw);
2013   __ dispatch_only(vtos, /*generate_poll*/true);
2014   // handle default
2015   __ bind(default_case);
2016   __ profile_switch_default(r0);
2017   __ ldrw(r3, Address(r1, 0));
2018   __ b(continue_execution);
2019 }
2020 
2021 void TemplateTable::lookupswitch() {
2022   transition(itos, itos);
2023   __ stop("lookupswitch bytecode should have been rewritten");
2024 }
2025 
2026 void TemplateTable::fast_linearswitch() {
2027   transition(itos, vtos);
2028   Label loop_entry, loop, found, continue_execution;
2029   // bswap r0 so we can avoid bswapping the table entries
2030   __ rev32(r0, r0);
2031   // align rbcp
2032   __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2033                                     // this instruction (change offsets
2034                                     // below)
2035   __ andr(r19, r19, -BytesPerInt);
2036   // set counter
2037   __ ldrw(r1, Address(r19, BytesPerInt));
2038   __ rev32(r1, r1);
2039   __ b(loop_entry);
2040   // table search
2041   __ bind(loop);
2042   __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2043   __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt));
2044   __ cmpw(r0, rscratch1);
2045   __ br(Assembler::EQ, found);
2046   __ bind(loop_entry);
2047   __ subs(r1, r1, 1);
2048   __ br(Assembler::PL, loop);
2049   // default case
2050   __ profile_switch_default(r0);
2051   __ ldrw(r3, Address(r19, 0));
2052   __ b(continue_execution);
2053   // entry found -> get offset
2054   __ bind(found);
2055   __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2056   __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt));
2057   __ profile_switch_case(r1, r0, r19);
2058   // continue execution
2059   __ bind(continue_execution);
2060   __ rev32(r3, r3);
2061   __ add(rbcp, rbcp, r3, ext::sxtw);
2062   __ ldrb(rscratch1, Address(rbcp, 0));
2063   __ dispatch_only(vtos, /*generate_poll*/true);
2064 }
2065 
2066 void TemplateTable::fast_binaryswitch() {
2067   transition(itos, vtos);
2068   // Implementation using the following core algorithm:
2069   //
2070   // int binary_search(int key, LookupswitchPair* array, int n) {
2071   //   // Binary search according to "Methodik des Programmierens" by
2072   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2073   //   int i = 0;
2074   //   int j = n;
2075   //   while (i+1 < j) {
2076   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2077   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2078   //     // where a stands for the array and assuming that the (inexisting)
2079   //     // element a[n] is infinitely big.
2080   //     int h = (i + j) >> 1;
2081   //     // i < h < j
2082   //     if (key < array[h].fast_match()) {
2083   //       j = h;
2084   //     } else {
2085   //       i = h;
2086   //     }
2087   //   }
2088   //   // R: a[i] <= key < a[i+1] or Q
2089   //   // (i.e., if key is within array, i is the correct index)
2090   //   return i;
2091   // }
2092 
2093   // Register allocation
2094   const Register key   = r0; // already set (tosca)
2095   const Register array = r1;
2096   const Register i     = r2;
2097   const Register j     = r3;
2098   const Register h     = rscratch1;
2099   const Register temp  = rscratch2;
2100 
2101   // Find array start
2102   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2103                                           // get rid of this
2104                                           // instruction (change
2105                                           // offsets below)
2106   __ andr(array, array, -BytesPerInt);
2107 
2108   // Initialize i & j
2109   __ mov(i, 0);                            // i = 0;
2110   __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array);
2111 
2112   // Convert j into native byteordering
2113   __ rev32(j, j);
2114 
2115   // And start
2116   Label entry;
2117   __ b(entry);
2118 
2119   // binary search loop
2120   {
2121     Label loop;
2122     __ bind(loop);
2123     // int h = (i + j) >> 1;
2124     __ addw(h, i, j);                           // h = i + j;
2125     __ lsrw(h, h, 1);                                   // h = (i + j) >> 1;
2126     // if (key < array[h].fast_match()) {
2127     //   j = h;
2128     // } else {
2129     //   i = h;
2130     // }
2131     // Convert array[h].match to native byte-ordering before compare
2132     __ ldr(temp, Address(array, h, Address::lsl(3)));
2133     __ rev32(temp, temp);
2134     __ cmpw(key, temp);
2135     // j = h if (key <  array[h].fast_match())
2136     __ csel(j, h, j, Assembler::LT);
2137     // i = h if (key >= array[h].fast_match())
2138     __ csel(i, h, i, Assembler::GE);
2139     // while (i+1 < j)
2140     __ bind(entry);
2141     __ addw(h, i, 1);          // i+1
2142     __ cmpw(h, j);             // i+1 < j
2143     __ br(Assembler::LT, loop);
2144   }
2145 
2146   // end of binary search, result index is i (must check again!)
2147   Label default_case;
2148   // Convert array[i].match to native byte-ordering before compare
2149   __ ldr(temp, Address(array, i, Address::lsl(3)));
2150   __ rev32(temp, temp);
2151   __ cmpw(key, temp);
2152   __ br(Assembler::NE, default_case);
2153 
2154   // entry found -> j = offset
2155   __ add(j, array, i, ext::uxtx, 3);
2156   __ ldrw(j, Address(j, BytesPerInt));
2157   __ profile_switch_case(i, key, array);
2158   __ rev32(j, j);
2159   __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2160   __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2161   __ dispatch_only(vtos, /*generate_poll*/true);
2162 
2163   // default case -> j = default offset
2164   __ bind(default_case);
2165   __ profile_switch_default(i);
2166   __ ldrw(j, Address(array, -2 * BytesPerInt));
2167   __ rev32(j, j);
2168   __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2169   __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2170   __ dispatch_only(vtos, /*generate_poll*/true);
2171 }
2172 
2173 
2174 void TemplateTable::_return(TosState state)
2175 {
2176   transition(state, state);
2177   assert(_desc->calls_vm(),
2178          "inconsistent calls_vm information"); // call in remove_activation
2179 
2180   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2181     assert(state == vtos, "only valid state");
2182 
2183     __ ldr(c_rarg1, aaddress(0));
2184     __ load_klass(r3, c_rarg1);
2185     __ ldrw(r3, Address(r3, Klass::access_flags_offset()));
2186     Label skip_register_finalizer;
2187     __ tbz(r3, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2188 
2189     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2190 
2191     __ bind(skip_register_finalizer);
2192   }
2193 
2194   // Issue a StoreStore barrier after all stores but before return
2195   // from any constructor for any class with a final field.  We don't
2196   // know if this is a finalizer, so we always do so.
2197   if (_desc->bytecode() == Bytecodes::_return)
2198     __ membar(MacroAssembler::StoreStore);
2199 
2200   // Narrow result if state is itos but result type is smaller.
2201   // Need to narrow in the return bytecode rather than in generate_return_entry
2202   // since compiled code callers expect the result to already be narrowed.
2203   if (state == itos) {
2204     __ narrow(r0);
2205   }
2206 
2207   __ remove_activation(state);
2208   __ ret(lr);
2209 }
2210 
2211 // ----------------------------------------------------------------------------
2212 // Volatile variables demand their effects be made known to all CPU's
2213 // in order.  Store buffers on most chips allow reads & writes to
2214 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2215 // without some kind of memory barrier (i.e., it's not sufficient that
2216 // the interpreter does not reorder volatile references, the hardware
2217 // also must not reorder them).
2218 //
2219 // According to the new Java Memory Model (JMM):
2220 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2221 //     writes act as aquire & release, so:
2222 // (2) A read cannot let unrelated NON-volatile memory refs that
2223 //     happen after the read float up to before the read.  It's OK for
2224 //     non-volatile memory refs that happen before the volatile read to
2225 //     float down below it.
2226 // (3) Similar a volatile write cannot let unrelated NON-volatile
2227 //     memory refs that happen BEFORE the write float down to after the
2228 //     write.  It's OK for non-volatile memory refs that happen after the
2229 //     volatile write to float up before it.
2230 //
2231 // We only put in barriers around volatile refs (they are expensive),
2232 // not _between_ memory refs (that would require us to track the
2233 // flavor of the previous memory refs).  Requirements (2) and (3)
2234 // require some barriers before volatile stores and after volatile
2235 // loads.  These nearly cover requirement (1) but miss the
2236 // volatile-store-volatile-load case.  This final case is placed after
2237 // volatile-stores although it could just as well go before
2238 // volatile-loads.
2239 
2240 void TemplateTable::resolve_cache_and_index(int byte_no,
2241                                             Register Rcache,
2242                                             Register index,
2243                                             size_t index_size) {
2244   const Register temp = r19;
2245   assert_different_registers(Rcache, index, temp);
2246 
2247   Label resolved, clinit_barrier_slow;
2248 
2249   Bytecodes::Code code = bytecode();
2250   switch (code) {
2251   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2252   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2253   default: break;
2254   }
2255 
2256   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2257   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2258   __ subs(zr, temp, (int) code);  // have we resolved this bytecode?
2259   __ br(Assembler::EQ, resolved);
2260 
2261   // resolve first time through
2262   // Class initialization barrier slow path lands here as well.
2263   __ bind(clinit_barrier_slow);
2264   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2265   __ mov(temp, (int) code);
2266   __ call_VM(noreg, entry, temp);
2267 
2268   // Update registers with resolved info
2269   __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2270   // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2271   // so all clients ofthis method must be modified accordingly
2272   __ bind(resolved);
2273 
2274   // Class initialization barrier for static methods
2275   if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2276     __ load_resolved_method_at_index(byte_no, temp, Rcache);
2277     __ load_method_holder(temp, temp);
2278     __ clinit_barrier(temp, rscratch1, NULL, &clinit_barrier_slow);
2279   }
2280 }
2281 
2282 // The Rcache and index registers must be set before call
2283 // n.b unlike x86 cache already includes the index offset
2284 void TemplateTable::load_field_cp_cache_entry(Register obj,
2285                                               Register cache,
2286                                               Register index,
2287                                               Register off,
2288                                               Register flags,
2289                                               bool is_static = false) {
2290   assert_different_registers(cache, index, flags, off);
2291 
2292   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2293   // Field offset
2294   __ ldr(off, Address(cache, in_bytes(cp_base_offset +
2295                                           ConstantPoolCacheEntry::f2_offset())));
2296   // Flags
2297   __ ldrw(flags, Address(cache, in_bytes(cp_base_offset +
2298                                            ConstantPoolCacheEntry::flags_offset())));
2299 
2300   // klass overwrite register
2301   if (is_static) {
2302     __ ldr(obj, Address(cache, in_bytes(cp_base_offset +
2303                                         ConstantPoolCacheEntry::f1_offset())));
2304     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2305     __ ldr(obj, Address(obj, mirror_offset));
2306     __ resolve_oop_handle(obj);
2307   }
2308 }
2309 
2310 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2311                                                Register method,
2312                                                Register itable_index,
2313                                                Register flags,
2314                                                bool is_invokevirtual,
2315                                                bool is_invokevfinal, /*unused*/
2316                                                bool is_invokedynamic) {
2317   // setup registers
2318   const Register cache = rscratch2;
2319   const Register index = r4;
2320   assert_different_registers(method, flags);
2321   assert_different_registers(method, cache, index);
2322   assert_different_registers(itable_index, flags);
2323   assert_different_registers(itable_index, cache, index);
2324   // determine constant pool cache field offsets
2325   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2326   const int method_offset = in_bytes(
2327     ConstantPoolCache::base_offset() +
2328       (is_invokevirtual
2329        ? ConstantPoolCacheEntry::f2_offset()
2330        : ConstantPoolCacheEntry::f1_offset()));
2331   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2332                                     ConstantPoolCacheEntry::flags_offset());
2333   // access constant pool cache fields
2334   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2335                                     ConstantPoolCacheEntry::f2_offset());
2336 
2337   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2338   resolve_cache_and_index(byte_no, cache, index, index_size);
2339   __ ldr(method, Address(cache, method_offset));
2340 
2341   if (itable_index != noreg) {
2342     __ ldr(itable_index, Address(cache, index_offset));
2343   }
2344   __ ldrw(flags, Address(cache, flags_offset));
2345 }
2346 
2347 
2348 // The registers cache and index expected to be set before call.
2349 // Correct values of the cache and index registers are preserved.
2350 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2351                                             bool is_static, bool has_tos) {
2352   // do the JVMTI work here to avoid disturbing the register state below
2353   // We use c_rarg registers here because we want to use the register used in
2354   // the call to the VM
2355   if (JvmtiExport::can_post_field_access()) {
2356     // Check to see if a field access watch has been set before we
2357     // take the time to call into the VM.
2358     Label L1;
2359     assert_different_registers(cache, index, r0);
2360     __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2361     __ ldrw(r0, Address(rscratch1));
2362     __ cbzw(r0, L1);
2363 
2364     __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2365     __ lea(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset())));
2366 
2367     if (is_static) {
2368       __ mov(c_rarg1, zr); // NULL object reference
2369     } else {
2370       __ ldr(c_rarg1, at_tos()); // get object pointer without popping it
2371       __ verify_oop(c_rarg1);
2372     }
2373     // c_rarg1: object pointer or NULL
2374     // c_rarg2: cache entry pointer
2375     // c_rarg3: jvalue object on the stack
2376     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2377                                        InterpreterRuntime::post_field_access),
2378                c_rarg1, c_rarg2, c_rarg3);
2379     __ get_cache_and_index_at_bcp(cache, index, 1);
2380     __ bind(L1);
2381   }
2382 }
2383 
2384 void TemplateTable::pop_and_check_object(Register r)
2385 {
2386   __ pop_ptr(r);
2387   __ null_check(r);  // for field access must check obj.
2388   __ verify_oop(r);
2389 }
2390 
2391 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2392 {
2393   const Register cache = r2;
2394   const Register index = r3;
2395   const Register obj   = r4;
2396   const Register off   = r19;
2397   const Register flags = r0;
2398   const Register raw_flags = r6;
2399   const Register bc    = r4; // uses same reg as obj, so don't mix them
2400 
2401   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2402   jvmti_post_field_access(cache, index, is_static, false);
2403   load_field_cp_cache_entry(obj, cache, index, off, raw_flags, is_static);
2404 
2405   if (!is_static) {
2406     // obj is on the stack
2407     pop_and_check_object(obj);
2408   }
2409 
2410   // 8179954: We need to make sure that the code generated for
2411   // volatile accesses forms a sequentially-consistent set of
2412   // operations when combined with STLR and LDAR.  Without a leading
2413   // membar it's possible for a simple Dekker test to fail if loads
2414   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
2415   // the stores in one method and we interpret the loads in another.
2416   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){
2417     Label notVolatile;
2418     __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2419     __ membar(MacroAssembler::AnyAny);
2420     __ bind(notVolatile);
2421   }
2422 
2423   const Address field(obj, off);
2424 
2425   Label Done, notByte, notBool, notInt, notShort, notChar,
2426               notLong, notFloat, notObj, notDouble;
2427 
2428   // x86 uses a shift and mask or wings it with a shift plus assert
2429   // the mask is not needed. aarch64 just uses bitfield extract
2430   __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift,
2431            ConstantPoolCacheEntry::tos_state_bits);
2432 
2433   assert(btos == 0, "change code, btos != 0");
2434   __ cbnz(flags, notByte);
2435 
2436   // Don't rewrite getstatic, only getfield
2437   if (is_static) rc = may_not_rewrite;
2438 
2439   // btos
2440   __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2441   __ push(btos);
2442   // Rewrite bytecode to be faster
2443   if (rc == may_rewrite) {
2444     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2445   }
2446   __ b(Done);
2447 
2448   __ bind(notByte);
2449   __ cmp(flags, (u1)ztos);
2450   __ br(Assembler::NE, notBool);
2451 
2452   // ztos (same code as btos)
2453   __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2454   __ push(ztos);
2455   // Rewrite bytecode to be faster
2456   if (rc == may_rewrite) {
2457     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2458     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2459   }
2460   __ b(Done);
2461 
2462   __ bind(notBool);
2463   __ cmp(flags, (u1)atos);
2464   __ br(Assembler::NE, notObj);
2465   // atos
2466   do_oop_load(_masm, field, r0, IN_HEAP);
2467   __ push(atos);
2468   if (rc == may_rewrite) {
2469     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2470   }
2471   __ b(Done);
2472 
2473   __ bind(notObj);
2474   __ cmp(flags, (u1)itos);
2475   __ br(Assembler::NE, notInt);
2476   // itos
2477   __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2478   __ push(itos);
2479   // Rewrite bytecode to be faster
2480   if (rc == may_rewrite) {
2481     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2482   }
2483   __ b(Done);
2484 
2485   __ bind(notInt);
2486   __ cmp(flags, (u1)ctos);
2487   __ br(Assembler::NE, notChar);
2488   // ctos
2489   __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2490   __ push(ctos);
2491   // Rewrite bytecode to be faster
2492   if (rc == may_rewrite) {
2493     patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2494   }
2495   __ b(Done);
2496 
2497   __ bind(notChar);
2498   __ cmp(flags, (u1)stos);
2499   __ br(Assembler::NE, notShort);
2500   // stos
2501   __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
2502   __ push(stos);
2503   // Rewrite bytecode to be faster
2504   if (rc == may_rewrite) {
2505     patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2506   }
2507   __ b(Done);
2508 
2509   __ bind(notShort);
2510   __ cmp(flags, (u1)ltos);
2511   __ br(Assembler::NE, notLong);
2512   // ltos
2513   __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
2514   __ push(ltos);
2515   // Rewrite bytecode to be faster
2516   if (rc == may_rewrite) {
2517     patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2518   }
2519   __ b(Done);
2520 
2521   __ bind(notLong);
2522   __ cmp(flags, (u1)ftos);
2523   __ br(Assembler::NE, notFloat);
2524   // ftos
2525   __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2526   __ push(ftos);
2527   // Rewrite bytecode to be faster
2528   if (rc == may_rewrite) {
2529     patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2530   }
2531   __ b(Done);
2532 
2533   __ bind(notFloat);
2534 #ifdef ASSERT
2535   __ cmp(flags, (u1)dtos);
2536   __ br(Assembler::NE, notDouble);
2537 #endif
2538   // dtos
2539   __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2540   __ push(dtos);
2541   // Rewrite bytecode to be faster
2542   if (rc == may_rewrite) {
2543     patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2544   }
2545 #ifdef ASSERT
2546   __ b(Done);
2547 
2548   __ bind(notDouble);
2549   __ stop("Bad state");
2550 #endif
2551 
2552   __ bind(Done);
2553 
2554   Label notVolatile;
2555   __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2556   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2557   __ bind(notVolatile);
2558 }
2559 
2560 
2561 void TemplateTable::getfield(int byte_no)
2562 {
2563   getfield_or_static(byte_no, false);
2564 }
2565 
2566 void TemplateTable::nofast_getfield(int byte_no) {
2567   getfield_or_static(byte_no, false, may_not_rewrite);
2568 }
2569 
2570 void TemplateTable::getstatic(int byte_no)
2571 {
2572   getfield_or_static(byte_no, true);
2573 }
2574 
2575 // The registers cache and index expected to be set before call.
2576 // The function may destroy various registers, just not the cache and index registers.
2577 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2578   transition(vtos, vtos);
2579 
2580   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2581 
2582   if (JvmtiExport::can_post_field_modification()) {
2583     // Check to see if a field modification watch has been set before
2584     // we take the time to call into the VM.
2585     Label L1;
2586     assert_different_registers(cache, index, r0);
2587     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2588     __ ldrw(r0, Address(rscratch1));
2589     __ cbz(r0, L1);
2590 
2591     __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2592 
2593     if (is_static) {
2594       // Life is simple.  Null out the object pointer.
2595       __ mov(c_rarg1, zr);
2596     } else {
2597       // Life is harder. The stack holds the value on top, followed by
2598       // the object.  We don't know the size of the value, though; it
2599       // could be one or two words depending on its type. As a result,
2600       // we must find the type to determine where the object is.
2601       __ ldrw(c_rarg3, Address(c_rarg2,
2602                                in_bytes(cp_base_offset +
2603                                         ConstantPoolCacheEntry::flags_offset())));
2604       __ lsr(c_rarg3, c_rarg3,
2605              ConstantPoolCacheEntry::tos_state_shift);
2606       ConstantPoolCacheEntry::verify_tos_state_shift();
2607       Label nope2, done, ok;
2608       __ ldr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
2609       __ cmpw(c_rarg3, ltos);
2610       __ br(Assembler::EQ, ok);
2611       __ cmpw(c_rarg3, dtos);
2612       __ br(Assembler::NE, nope2);
2613       __ bind(ok);
2614       __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2615       __ bind(nope2);
2616     }
2617     // cache entry pointer
2618     __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
2619     // object (tos)
2620     __ mov(c_rarg3, esp);
2621     // c_rarg1: object pointer set up above (NULL if static)
2622     // c_rarg2: cache entry pointer
2623     // c_rarg3: jvalue object on the stack
2624     __ call_VM(noreg,
2625                CAST_FROM_FN_PTR(address,
2626                                 InterpreterRuntime::post_field_modification),
2627                c_rarg1, c_rarg2, c_rarg3);
2628     __ get_cache_and_index_at_bcp(cache, index, 1);
2629     __ bind(L1);
2630   }
2631 }
2632 
2633 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2634   transition(vtos, vtos);
2635 
2636   const Register cache = r2;
2637   const Register index = r3;
2638   const Register obj   = r2;
2639   const Register off   = r19;
2640   const Register flags = r0;
2641   const Register bc    = r4;
2642 
2643   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2644   jvmti_post_field_mod(cache, index, is_static);
2645   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2646 
2647   Label Done;
2648   __ mov(r5, flags);
2649 
2650   {
2651     Label notVolatile;
2652     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2653     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2654     __ bind(notVolatile);
2655   }
2656 
2657   // field address
2658   const Address field(obj, off);
2659 
2660   Label notByte, notBool, notInt, notShort, notChar,
2661         notLong, notFloat, notObj, notDouble;
2662 
2663   // x86 uses a shift and mask or wings it with a shift plus assert
2664   // the mask is not needed. aarch64 just uses bitfield extract
2665   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2666 
2667   assert(btos == 0, "change code, btos != 0");
2668   __ cbnz(flags, notByte);
2669 
2670   // Don't rewrite putstatic, only putfield
2671   if (is_static) rc = may_not_rewrite;
2672 
2673   // btos
2674   {
2675     __ pop(btos);
2676     if (!is_static) pop_and_check_object(obj);
2677     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2678     if (rc == may_rewrite) {
2679       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2680     }
2681     __ b(Done);
2682   }
2683 
2684   __ bind(notByte);
2685   __ cmp(flags, (u1)ztos);
2686   __ br(Assembler::NE, notBool);
2687 
2688   // ztos
2689   {
2690     __ pop(ztos);
2691     if (!is_static) pop_and_check_object(obj);
2692     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2693     if (rc == may_rewrite) {
2694       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2695     }
2696     __ b(Done);
2697   }
2698 
2699   __ bind(notBool);
2700   __ cmp(flags, (u1)atos);
2701   __ br(Assembler::NE, notObj);
2702 
2703   // atos
2704   {
2705     __ pop(atos);
2706     if (!is_static) pop_and_check_object(obj);
2707     // Store into the field
2708     do_oop_store(_masm, field, r0, IN_HEAP);
2709     if (rc == may_rewrite) {
2710       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2711     }
2712     __ b(Done);
2713   }
2714 
2715   __ bind(notObj);
2716   __ cmp(flags, (u1)itos);
2717   __ br(Assembler::NE, notInt);
2718 
2719   // itos
2720   {
2721     __ pop(itos);
2722     if (!is_static) pop_and_check_object(obj);
2723     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
2724     if (rc == may_rewrite) {
2725       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2726     }
2727     __ b(Done);
2728   }
2729 
2730   __ bind(notInt);
2731   __ cmp(flags, (u1)ctos);
2732   __ br(Assembler::NE, notChar);
2733 
2734   // ctos
2735   {
2736     __ pop(ctos);
2737     if (!is_static) pop_and_check_object(obj);
2738     __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
2739     if (rc == may_rewrite) {
2740       patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
2741     }
2742     __ b(Done);
2743   }
2744 
2745   __ bind(notChar);
2746   __ cmp(flags, (u1)stos);
2747   __ br(Assembler::NE, notShort);
2748 
2749   // stos
2750   {
2751     __ pop(stos);
2752     if (!is_static) pop_and_check_object(obj);
2753     __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
2754     if (rc == may_rewrite) {
2755       patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
2756     }
2757     __ b(Done);
2758   }
2759 
2760   __ bind(notShort);
2761   __ cmp(flags, (u1)ltos);
2762   __ br(Assembler::NE, notLong);
2763 
2764   // ltos
2765   {
2766     __ pop(ltos);
2767     if (!is_static) pop_and_check_object(obj);
2768     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
2769     if (rc == may_rewrite) {
2770       patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
2771     }
2772     __ b(Done);
2773   }
2774 
2775   __ bind(notLong);
2776   __ cmp(flags, (u1)ftos);
2777   __ br(Assembler::NE, notFloat);
2778 
2779   // ftos
2780   {
2781     __ pop(ftos);
2782     if (!is_static) pop_and_check_object(obj);
2783     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
2784     if (rc == may_rewrite) {
2785       patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
2786     }
2787     __ b(Done);
2788   }
2789 
2790   __ bind(notFloat);
2791 #ifdef ASSERT
2792   __ cmp(flags, (u1)dtos);
2793   __ br(Assembler::NE, notDouble);
2794 #endif
2795 
2796   // dtos
2797   {
2798     __ pop(dtos);
2799     if (!is_static) pop_and_check_object(obj);
2800     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
2801     if (rc == may_rewrite) {
2802       patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
2803     }
2804   }
2805 
2806 #ifdef ASSERT
2807   __ b(Done);
2808 
2809   __ bind(notDouble);
2810   __ stop("Bad state");
2811 #endif
2812 
2813   __ bind(Done);
2814 
2815   {
2816     Label notVolatile;
2817     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2818     __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
2819     __ bind(notVolatile);
2820   }
2821 }
2822 
2823 void TemplateTable::putfield(int byte_no)
2824 {
2825   putfield_or_static(byte_no, false);
2826 }
2827 
2828 void TemplateTable::nofast_putfield(int byte_no) {
2829   putfield_or_static(byte_no, false, may_not_rewrite);
2830 }
2831 
2832 void TemplateTable::putstatic(int byte_no) {
2833   putfield_or_static(byte_no, true);
2834 }
2835 
2836 void TemplateTable::jvmti_post_fast_field_mod()
2837 {
2838   if (JvmtiExport::can_post_field_modification()) {
2839     // Check to see if a field modification watch has been set before
2840     // we take the time to call into the VM.
2841     Label L2;
2842     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2843     __ ldrw(c_rarg3, Address(rscratch1));
2844     __ cbzw(c_rarg3, L2);
2845     __ pop_ptr(r19);                  // copy the object pointer from tos
2846     __ verify_oop(r19);
2847     __ push_ptr(r19);                 // put the object pointer back on tos
2848     // Save tos values before call_VM() clobbers them. Since we have
2849     // to do it for every data type, we use the saved values as the
2850     // jvalue object.
2851     switch (bytecode()) {          // load values into the jvalue object
2852     case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
2853     case Bytecodes::_fast_bputfield: // fall through
2854     case Bytecodes::_fast_zputfield: // fall through
2855     case Bytecodes::_fast_sputfield: // fall through
2856     case Bytecodes::_fast_cputfield: // fall through
2857     case Bytecodes::_fast_iputfield: __ push_i(r0); break;
2858     case Bytecodes::_fast_dputfield: __ push_d(); break;
2859     case Bytecodes::_fast_fputfield: __ push_f(); break;
2860     case Bytecodes::_fast_lputfield: __ push_l(r0); break;
2861 
2862     default:
2863       ShouldNotReachHere();
2864     }
2865     __ mov(c_rarg3, esp);             // points to jvalue on the stack
2866     // access constant pool cache entry
2867     __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
2868     __ verify_oop(r19);
2869     // r19: object pointer copied above
2870     // c_rarg2: cache entry pointer
2871     // c_rarg3: jvalue object on the stack
2872     __ call_VM(noreg,
2873                CAST_FROM_FN_PTR(address,
2874                                 InterpreterRuntime::post_field_modification),
2875                r19, c_rarg2, c_rarg3);
2876 
2877     switch (bytecode()) {             // restore tos values
2878     case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
2879     case Bytecodes::_fast_bputfield: // fall through
2880     case Bytecodes::_fast_zputfield: // fall through
2881     case Bytecodes::_fast_sputfield: // fall through
2882     case Bytecodes::_fast_cputfield: // fall through
2883     case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
2884     case Bytecodes::_fast_dputfield: __ pop_d(); break;
2885     case Bytecodes::_fast_fputfield: __ pop_f(); break;
2886     case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
2887     default: break;
2888     }
2889     __ bind(L2);
2890   }
2891 }
2892 
2893 void TemplateTable::fast_storefield(TosState state)
2894 {
2895   transition(state, vtos);
2896 
2897   ByteSize base = ConstantPoolCache::base_offset();
2898 
2899   jvmti_post_fast_field_mod();
2900 
2901   // access constant pool cache
2902   __ get_cache_and_index_at_bcp(r2, r1, 1);
2903 
2904   // Must prevent reordering of the following cp cache loads with bytecode load
2905   __ membar(MacroAssembler::LoadLoad);
2906 
2907   // test for volatile with r3
2908   __ ldrw(r3, Address(r2, in_bytes(base +
2909                                    ConstantPoolCacheEntry::flags_offset())));
2910 
2911   // replace index with field offset from cache entry
2912   __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2913 
2914   {
2915     Label notVolatile;
2916     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2917     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2918     __ bind(notVolatile);
2919   }
2920 
2921   Label notVolatile;
2922 
2923   // Get object from stack
2924   pop_and_check_object(r2);
2925 
2926   // field address
2927   const Address field(r2, r1);
2928 
2929   // access field
2930   switch (bytecode()) {
2931   case Bytecodes::_fast_aputfield:
2932     do_oop_store(_masm, field, r0, IN_HEAP);
2933     break;
2934   case Bytecodes::_fast_lputfield:
2935     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
2936     break;
2937   case Bytecodes::_fast_iputfield:
2938     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
2939     break;
2940   case Bytecodes::_fast_zputfield:
2941     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2942     break;
2943   case Bytecodes::_fast_bputfield:
2944     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2945     break;
2946   case Bytecodes::_fast_sputfield:
2947     __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
2948     break;
2949   case Bytecodes::_fast_cputfield:
2950     __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
2951     break;
2952   case Bytecodes::_fast_fputfield:
2953     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
2954     break;
2955   case Bytecodes::_fast_dputfield:
2956     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
2957     break;
2958   default:
2959     ShouldNotReachHere();
2960   }
2961 
2962   {
2963     Label notVolatile;
2964     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2965     __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore);
2966     __ bind(notVolatile);
2967   }
2968 }
2969 
2970 
2971 void TemplateTable::fast_accessfield(TosState state)
2972 {
2973   transition(atos, state);
2974   // Do the JVMTI work here to avoid disturbing the register state below
2975   if (JvmtiExport::can_post_field_access()) {
2976     // Check to see if a field access watch has been set before we
2977     // take the time to call into the VM.
2978     Label L1;
2979     __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2980     __ ldrw(r2, Address(rscratch1));
2981     __ cbzw(r2, L1);
2982     // access constant pool cache entry
2983     __ get_cache_entry_pointer_at_bcp(c_rarg2, rscratch2, 1);
2984     __ verify_oop(r0);
2985     __ push_ptr(r0);  // save object pointer before call_VM() clobbers it
2986     __ mov(c_rarg1, r0);
2987     // c_rarg1: object pointer copied above
2988     // c_rarg2: cache entry pointer
2989     __ call_VM(noreg,
2990                CAST_FROM_FN_PTR(address,
2991                                 InterpreterRuntime::post_field_access),
2992                c_rarg1, c_rarg2);
2993     __ pop_ptr(r0); // restore object pointer
2994     __ bind(L1);
2995   }
2996 
2997   // access constant pool cache
2998   __ get_cache_and_index_at_bcp(r2, r1, 1);
2999 
3000   // Must prevent reordering of the following cp cache loads with bytecode load
3001   __ membar(MacroAssembler::LoadLoad);
3002 
3003   __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3004                                   ConstantPoolCacheEntry::f2_offset())));
3005   __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3006                                    ConstantPoolCacheEntry::flags_offset())));
3007 
3008   // r0: object
3009   __ verify_oop(r0);
3010   __ null_check(r0);
3011   const Address field(r0, r1);
3012 
3013   // 8179954: We need to make sure that the code generated for
3014   // volatile accesses forms a sequentially-consistent set of
3015   // operations when combined with STLR and LDAR.  Without a leading
3016   // membar it's possible for a simple Dekker test to fail if loads
3017   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3018   // the stores in one method and we interpret the loads in another.
3019   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3020     Label notVolatile;
3021     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3022     __ membar(MacroAssembler::AnyAny);
3023     __ bind(notVolatile);
3024   }
3025 
3026   // access field
3027   switch (bytecode()) {
3028   case Bytecodes::_fast_agetfield:
3029     do_oop_load(_masm, field, r0, IN_HEAP);
3030     __ verify_oop(r0);
3031     break;
3032   case Bytecodes::_fast_lgetfield:
3033     __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3034     break;
3035   case Bytecodes::_fast_igetfield:
3036     __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3037     break;
3038   case Bytecodes::_fast_bgetfield:
3039     __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3040     break;
3041   case Bytecodes::_fast_sgetfield:
3042     __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3043     break;
3044   case Bytecodes::_fast_cgetfield:
3045     __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3046     break;
3047   case Bytecodes::_fast_fgetfield:
3048     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3049     break;
3050   case Bytecodes::_fast_dgetfield:
3051     __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3052     break;
3053   default:
3054     ShouldNotReachHere();
3055   }
3056   {
3057     Label notVolatile;
3058     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3059     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3060     __ bind(notVolatile);
3061   }
3062 }
3063 
3064 void TemplateTable::fast_xaccess(TosState state)
3065 {
3066   transition(vtos, state);
3067 
3068   // get receiver
3069   __ ldr(r0, aaddress(0));
3070   // access constant pool cache
3071   __ get_cache_and_index_at_bcp(r2, r3, 2);
3072   __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3073                                   ConstantPoolCacheEntry::f2_offset())));
3074 
3075   // 8179954: We need to make sure that the code generated for
3076   // volatile accesses forms a sequentially-consistent set of
3077   // operations when combined with STLR and LDAR.  Without a leading
3078   // membar it's possible for a simple Dekker test to fail if loads
3079   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3080   // the stores in one method and we interpret the loads in another.
3081   if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
3082     Label notVolatile;
3083     __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3084                                      ConstantPoolCacheEntry::flags_offset())));
3085     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3086     __ membar(MacroAssembler::AnyAny);
3087     __ bind(notVolatile);
3088   }
3089 
3090   // make sure exception is reported in correct bcp range (getfield is
3091   // next instruction)
3092   __ increment(rbcp);
3093   __ null_check(r0);
3094   switch (state) {
3095   case itos:
3096     __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3097     break;
3098   case atos:
3099     do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
3100     __ verify_oop(r0);
3101     break;
3102   case ftos:
3103     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
3104     break;
3105   default:
3106     ShouldNotReachHere();
3107   }
3108 
3109   {
3110     Label notVolatile;
3111     __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3112                                      ConstantPoolCacheEntry::flags_offset())));
3113     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3114     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
3115     __ bind(notVolatile);
3116   }
3117 
3118   __ decrement(rbcp);
3119 }
3120 
3121 
3122 
3123 //-----------------------------------------------------------------------------
3124 // Calls
3125 
3126 void TemplateTable::prepare_invoke(int byte_no,
3127                                    Register method, // linked method (or i-klass)
3128                                    Register index,  // itable index, MethodType, etc.
3129                                    Register recv,   // if caller wants to see it
3130                                    Register flags   // if caller wants to test it
3131                                    ) {
3132   // determine flags
3133   Bytecodes::Code code = bytecode();
3134   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3135   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3136   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3137   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3138   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3139   const bool load_receiver       = (recv  != noreg);
3140   const bool save_flags          = (flags != noreg);
3141   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3142   assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3143   assert(flags == noreg || flags == r3, "");
3144   assert(recv  == noreg || recv  == r2, "");
3145 
3146   // setup registers & access constant pool cache
3147   if (recv  == noreg)  recv  = r2;
3148   if (flags == noreg)  flags = r3;
3149   assert_different_registers(method, index, recv, flags);
3150 
3151   // save 'interpreter return address'
3152   __ save_bcp();
3153 
3154   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3155 
3156   // maybe push appendix to arguments (just before return address)
3157   if (is_invokedynamic || is_invokehandle) {
3158     Label L_no_push;
3159     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
3160     // Push the appendix as a trailing parameter.
3161     // This must be done before we get the receiver,
3162     // since the parameter_size includes it.
3163     __ push(r19);
3164     __ mov(r19, index);
3165     __ load_resolved_reference_at_index(index, r19);
3166     __ pop(r19);
3167     __ push(index);  // push appendix (MethodType, CallSite, etc.)
3168     __ bind(L_no_push);
3169   }
3170 
3171   // load receiver if needed (note: no return address pushed yet)
3172   if (load_receiver) {
3173     __ andw(recv, flags, ConstantPoolCacheEntry::parameter_size_mask);
3174     // FIXME -- is this actually correct? looks like it should be 2
3175     // const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
3176     // const int receiver_is_at_end      = -1;  // back off one slot to get receiver
3177     // Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3178     // __ movptr(recv, recv_addr);
3179     __ add(rscratch1, esp, recv, ext::uxtx, 3); // FIXME: uxtb here?
3180     __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1)));
3181     __ verify_oop(recv);
3182   }
3183 
3184   // compute return type
3185   // x86 uses a shift and mask or wings it with a shift plus assert
3186   // the mask is not needed. aarch64 just uses bitfield extract
3187   __ ubfxw(rscratch2, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
3188   // load return address
3189   {
3190     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3191     __ mov(rscratch1, table_addr);
3192     __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3)));
3193   }
3194 }
3195 
3196 
3197 void TemplateTable::invokevirtual_helper(Register index,
3198                                          Register recv,
3199                                          Register flags)
3200 {
3201   // Uses temporary registers r0, r3
3202   assert_different_registers(index, recv, r0, r3);
3203   // Test for an invoke of a final method
3204   Label notFinal;
3205   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
3206 
3207   const Register method = index;  // method must be rmethod
3208   assert(method == rmethod,
3209          "Method must be rmethod for interpreter calling convention");
3210 
3211   // do the call - the index is actually the method to call
3212   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3213 
3214   // It's final, need a null check here!
3215   __ null_check(recv);
3216 
3217   // profile this call
3218   __ profile_final_call(r0);
3219   __ profile_arguments_type(r0, method, r4, true);
3220 
3221   __ jump_from_interpreted(method, r0);
3222 
3223   __ bind(notFinal);
3224 
3225   // get receiver klass
3226   __ load_klass(r0, recv, true);
3227 
3228   // profile this call
3229   __ profile_virtual_call(r0, rlocals, r3);
3230 
3231   // get target Method & entry point
3232   __ lookup_virtual_method(r0, index, method);
3233   __ profile_arguments_type(r3, method, r4, true);
3234   // FIXME -- this looks completely redundant. is it?
3235   // __ ldr(r3, Address(method, Method::interpreter_entry_offset()));
3236   __ jump_from_interpreted(method, r3);
3237 }
3238 
3239 void TemplateTable::invokevirtual(int byte_no)
3240 {
3241   transition(vtos, vtos);
3242   assert(byte_no == f2_byte, "use this argument");
3243 
3244   prepare_invoke(byte_no, rmethod, noreg, r2, r3);
3245 
3246   // rmethod: index (actually a Method*)
3247   // r2: receiver
3248   // r3: flags
3249 
3250   invokevirtual_helper(rmethod, r2, r3);
3251 }
3252 
3253 void TemplateTable::invokespecial(int byte_no)
3254 {
3255   transition(vtos, vtos);
3256   assert(byte_no == f1_byte, "use this argument");
3257 
3258   prepare_invoke(byte_no, rmethod, noreg,  // get f1 Method*
3259                  r2);  // get receiver also for null check
3260   __ verify_oop(r2);
3261   __ null_check(r2);
3262   // do the call
3263   __ profile_call(r0);
3264   __ profile_arguments_type(r0, rmethod, rbcp, false);
3265   __ jump_from_interpreted(rmethod, r0);
3266 }
3267 
3268 void TemplateTable::invokestatic(int byte_no)
3269 {
3270   transition(vtos, vtos);
3271   assert(byte_no == f1_byte, "use this argument");
3272 
3273   prepare_invoke(byte_no, rmethod);  // get f1 Method*
3274   // do the call
3275   __ profile_call(r0);
3276   __ profile_arguments_type(r0, rmethod, r4, false);
3277   __ jump_from_interpreted(rmethod, r0);
3278 }
3279 
3280 void TemplateTable::fast_invokevfinal(int byte_no)
3281 {
3282   __ call_Unimplemented();
3283 }
3284 
3285 void TemplateTable::invokeinterface(int byte_no) {
3286   transition(vtos, vtos);
3287   assert(byte_no == f1_byte, "use this argument");
3288 
3289   prepare_invoke(byte_no, r0, rmethod,  // get f1 Klass*, f2 Method*
3290                  r2, r3); // recv, flags
3291 
3292   // r0: interface klass (from f1)
3293   // rmethod: method (from f2)
3294   // r2: receiver
3295   // r3: flags
3296 
3297   // First check for Object case, then private interface method,
3298   // then regular interface method.
3299 
3300   // Special case of invokeinterface called for virtual method of
3301   // java.lang.Object.  See cpCache.cpp for details.
3302   Label notObjectMethod;
3303   __ tbz(r3, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
3304 
3305   invokevirtual_helper(rmethod, r2, r3);
3306   __ bind(notObjectMethod);
3307 
3308   Label no_such_interface;
3309 
3310   // Check for private method invocation - indicated by vfinal
3311   Label notVFinal;
3312   __ tbz(r3, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
3313 
3314   // Get receiver klass into r3 - also a null check
3315   __ load_klass(r3, r2, true);
3316 
3317   Label subtype;
3318   __ check_klass_subtype(r3, r0, r4, subtype);
3319   // If we get here the typecheck failed
3320   __ b(no_such_interface);
3321   __ bind(subtype);
3322 
3323   __ profile_final_call(r0);
3324   __ profile_arguments_type(r0, rmethod, r4, true);
3325   __ jump_from_interpreted(rmethod, r0);
3326 
3327   __ bind(notVFinal);
3328 
3329   // Get receiver klass into r3 - also a null check
3330   __ restore_locals();
3331   __ load_klass(r3, r2, true);
3332 
3333   Label no_such_method;
3334 
3335   // Preserve method for throw_AbstractMethodErrorVerbose.
3336   __ mov(r16, rmethod);
3337   // Receiver subtype check against REFC.
3338   // Superklass in r0. Subklass in r3. Blows rscratch2, r13
3339   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3340                              r3, r0, noreg,
3341                              // outputs: scan temp. reg, scan temp. reg
3342                              rscratch2, r13,
3343                              no_such_interface,
3344                              /*return_method=*/false);
3345 
3346   // profile this call
3347   __ profile_virtual_call(r3, r13, r19);
3348 
3349   // Get declaring interface class from method, and itable index
3350 
3351   __ load_method_holder(r0, rmethod);
3352   __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset()));
3353   __ subw(rmethod, rmethod, Method::itable_index_max);
3354   __ negw(rmethod, rmethod);
3355 
3356   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3357   __ mov(rlocals, r3);
3358   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3359                              rlocals, r0, rmethod,
3360                              // outputs: method, scan temp. reg
3361                              rmethod, r13,
3362                              no_such_interface);
3363 
3364   // rmethod,: Method to call
3365   // r2: receiver
3366   // Check for abstract method error
3367   // Note: This should be done more efficiently via a throw_abstract_method_error
3368   //       interpreter entry point and a conditional jump to it in case of a null
3369   //       method.
3370   __ cbz(rmethod, no_such_method);
3371 
3372   __ profile_arguments_type(r3, rmethod, r13, true);
3373 
3374   // do the call
3375   // r2: receiver
3376   // rmethod,: Method
3377   __ jump_from_interpreted(rmethod, r3);
3378   __ should_not_reach_here();
3379 
3380   // exception handling code follows...
3381   // note: must restore interpreter registers to canonical
3382   //       state for exception handling to work correctly!
3383 
3384   __ bind(no_such_method);
3385   // throw exception
3386   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
3387   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3388   // Pass arguments for generating a verbose error message.
3389   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16);
3390   // the call_VM checks for exception, so we should never return here.
3391   __ should_not_reach_here();
3392 
3393   __ bind(no_such_interface);
3394   // throw exception
3395   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
3396   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3397   // Pass arguments for generating a verbose error message.
3398   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3399                    InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0);
3400   // the call_VM checks for exception, so we should never return here.
3401   __ should_not_reach_here();
3402   return;
3403 }
3404 
3405 void TemplateTable::invokehandle(int byte_no) {
3406   transition(vtos, vtos);
3407   assert(byte_no == f1_byte, "use this argument");
3408 
3409   prepare_invoke(byte_no, rmethod, r0, r2);
3410   __ verify_method_ptr(r2);
3411   __ verify_oop(r2);
3412   __ null_check(r2);
3413 
3414   // FIXME: profile the LambdaForm also
3415 
3416   // r13 is safe to use here as a scratch reg because it is about to
3417   // be clobbered by jump_from_interpreted().
3418   __ profile_final_call(r13);
3419   __ profile_arguments_type(r13, rmethod, r4, true);
3420 
3421   __ jump_from_interpreted(rmethod, r0);
3422 }
3423 
3424 void TemplateTable::invokedynamic(int byte_no) {
3425   transition(vtos, vtos);
3426   assert(byte_no == f1_byte, "use this argument");
3427 
3428   prepare_invoke(byte_no, rmethod, r0);
3429 
3430   // r0: CallSite object (from cpool->resolved_references[])
3431   // rmethod: MH.linkToCallSite method (from f2)
3432 
3433   // Note:  r0_callsite is already pushed by prepare_invoke
3434 
3435   // %%% should make a type profile for any invokedynamic that takes a ref argument
3436   // profile this call
3437   __ profile_call(rbcp);
3438   __ profile_arguments_type(r3, rmethod, r13, false);
3439 
3440   __ verify_oop(r0);
3441 
3442   __ jump_from_interpreted(rmethod, r0);
3443 }
3444 
3445 
3446 //-----------------------------------------------------------------------------
3447 // Allocation
3448 
3449 void TemplateTable::_new() {
3450   transition(vtos, atos);
3451 
3452   __ get_unsigned_2_byte_index_at_bcp(r3, 1);
3453   Label slow_case;
3454   Label done;
3455   Label initialize_header;
3456   Label initialize_object; // including clearing the fields
3457 
3458   __ get_cpool_and_tags(r4, r0);
3459   // Make sure the class we're about to instantiate has been resolved.
3460   // This is done before loading InstanceKlass to be consistent with the order
3461   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3462   const int tags_offset = Array<u1>::base_offset_in_bytes();
3463   __ lea(rscratch1, Address(r0, r3, Address::lsl(0)));
3464   __ lea(rscratch1, Address(rscratch1, tags_offset));
3465   __ ldarb(rscratch1, rscratch1);
3466   __ cmp(rscratch1, (u1)JVM_CONSTANT_Class);
3467   __ br(Assembler::NE, slow_case);
3468 
3469   // get InstanceKlass
3470   __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
3471 
3472   // make sure klass is initialized & doesn't have finalizer
3473   // make sure klass is fully initialized
3474   __ ldrb(rscratch1, Address(r4, InstanceKlass::init_state_offset()));
3475   __ cmp(rscratch1, (u1)InstanceKlass::fully_initialized);
3476   __ br(Assembler::NE, slow_case);
3477 
3478   // get instance_size in InstanceKlass (scaled to a count of bytes)
3479   __ ldrw(r3,
3480           Address(r4,
3481                   Klass::layout_helper_offset()));
3482   // test to see if it has a finalizer or is malformed in some way
3483   __ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
3484 
3485   // Allocate the instance:
3486   //  If TLAB is enabled:
3487   //    Try to allocate in the TLAB.
3488   //    If fails, go to the slow path.
3489   //  Else If inline contiguous allocations are enabled:
3490   //    Try to allocate in eden.
3491   //    If fails due to heap end, go to slow path.
3492   //
3493   //  If TLAB is enabled OR inline contiguous is enabled:
3494   //    Initialize the allocation.
3495   //    Exit.
3496   //
3497   //  Go to slow path.
3498   const bool allow_shared_alloc =
3499     Universe::heap()->supports_inline_contig_alloc();
3500 
3501   if (UseTLAB) {
3502     __ tlab_allocate(r0, r3, 0, noreg, r1, slow_case);
3503 
3504     if (ZeroTLAB) {
3505       // the fields have been already cleared
3506       __ b(initialize_header);
3507     } else {
3508       // initialize both the header and fields
3509       __ b(initialize_object);
3510     }
3511   } else {
3512     // Allocation in the shared Eden, if allowed.
3513     //
3514     // r3: instance size in bytes
3515     if (allow_shared_alloc) {
3516       __ eden_allocate(r0, r3, 0, r10, slow_case);
3517     }
3518   }
3519 
3520   // If UseTLAB or allow_shared_alloc are true, the object is created above and
3521   // there is an initialize need. Otherwise, skip and go to the slow path.
3522   if (UseTLAB || allow_shared_alloc) {
3523     // The object is initialized before the header.  If the object size is
3524     // zero, go directly to the header initialization.
3525     __ bind(initialize_object);
3526     __ sub(r3, r3, oopDesc::base_offset_in_bytes());
3527     __ cbz(r3, initialize_header);
3528 
3529     // Initialize object fields
3530     {
3531       __ add(r2, r0, oopDesc::base_offset_in_bytes());
3532       if (!is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong)) {
3533         __ strw(zr, Address(__ post(r2, BytesPerInt)));
3534         __ sub(r3, r3, BytesPerInt);
3535         __ cbz(r3, initialize_header);
3536       }
3537       Label loop;
3538       __ bind(loop);
3539       __ str(zr, Address(__ post(r2, BytesPerLong)));
3540       __ sub(r3, r3, BytesPerLong);
3541       __ cbnz(r3, loop);
3542     }
3543 
3544     // initialize object header only.
3545     __ bind(initialize_header);
3546     if (UseBiasedLocking || UseCompactObjectHeaders) {
3547       __ ldr(rscratch1, Address(r4, Klass::prototype_header_offset()));
3548     } else {
3549       __ mov(rscratch1, (intptr_t)markWord::prototype().value());
3550     }
3551     __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
3552     if (!UseCompactObjectHeaders) {
3553       __ store_klass_gap(r0, zr);  // zero klass gap for compressed oops
3554       __ store_klass(r0, r4);      // store klass last
3555     }
3556     {
3557       SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3558       // Trigger dtrace event for fastpath
3559       __ push(atos); // save the return value
3560       __ call_VM_leaf(
3561            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), r0);
3562       __ pop(atos); // restore the return value
3563 
3564     }
3565     __ b(done);
3566   }
3567 
3568   // slow case
3569   __ bind(slow_case);
3570   __ get_constant_pool(c_rarg1);
3571   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3572   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3573   __ verify_oop(r0);
3574 
3575   // continue
3576   __ bind(done);
3577   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3578   __ membar(Assembler::StoreStore);
3579 }
3580 
3581 void TemplateTable::newarray() {
3582   transition(itos, atos);
3583   __ load_unsigned_byte(c_rarg1, at_bcp(1));
3584   __ mov(c_rarg2, r0);
3585   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3586           c_rarg1, c_rarg2);
3587   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3588   __ membar(Assembler::StoreStore);
3589 }
3590 
3591 void TemplateTable::anewarray() {
3592   transition(itos, atos);
3593   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3594   __ get_constant_pool(c_rarg1);
3595   __ mov(c_rarg3, r0);
3596   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3597           c_rarg1, c_rarg2, c_rarg3);
3598   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3599   __ membar(Assembler::StoreStore);
3600 }
3601 
3602 void TemplateTable::arraylength() {
3603   transition(atos, itos);
3604   __ null_check(r0, arrayOopDesc::length_offset_in_bytes());
3605   __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes()));
3606 }
3607 
3608 void TemplateTable::checkcast()
3609 {
3610   transition(atos, atos);
3611   Label done, is_null, ok_is_subtype, quicked, resolved;
3612   __ cbz(r0, is_null);
3613 
3614   // Get cpool & tags index
3615   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3616   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3617   // See if bytecode has already been quicked
3618   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3619   __ lea(r1, Address(rscratch1, r19));
3620   __ ldarb(r1, r1);
3621   __ cmp(r1, (u1)JVM_CONSTANT_Class);
3622   __ br(Assembler::EQ, quicked);
3623 
3624   __ push(atos); // save receiver for result, and for GC
3625   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3626   // vm_result_2 has metadata result
3627   __ get_vm_result_2(r0, rthread);
3628   __ pop(r3); // restore receiver
3629   __ b(resolved);
3630 
3631   // Get superklass in r0 and subklass in r3
3632   __ bind(quicked);
3633   __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3634   __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3635 
3636   __ bind(resolved);
3637   __ load_klass(r19, r3);
3638 
3639   // Generate subtype check.  Blows r2, r5.  Object in r3.
3640   // Superklass in r0.  Subklass in r19.
3641   __ gen_subtype_check(r19, ok_is_subtype);
3642 
3643   // Come here on failure
3644   __ push(r3);
3645   // object is at TOS
3646   __ b(Interpreter::_throw_ClassCastException_entry);
3647 
3648   // Come here on success
3649   __ bind(ok_is_subtype);
3650   __ mov(r0, r3); // Restore object in r3
3651 
3652   // Collect counts on whether this test sees NULLs a lot or not.
3653   if (ProfileInterpreter) {
3654     __ b(done);
3655     __ bind(is_null);
3656     __ profile_null_seen(r2);
3657   } else {
3658     __ bind(is_null);   // same as 'done'
3659   }
3660   __ bind(done);
3661 }
3662 
3663 void TemplateTable::instanceof() {
3664   transition(atos, itos);
3665   Label done, is_null, ok_is_subtype, quicked, resolved;
3666   __ cbz(r0, is_null);
3667 
3668   // Get cpool & tags index
3669   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3670   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3671   // See if bytecode has already been quicked
3672   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3673   __ lea(r1, Address(rscratch1, r19));
3674   __ ldarb(r1, r1);
3675   __ cmp(r1, (u1)JVM_CONSTANT_Class);
3676   __ br(Assembler::EQ, quicked);
3677 
3678   __ push(atos); // save receiver for result, and for GC
3679   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3680   // vm_result_2 has metadata result
3681   __ get_vm_result_2(r0, rthread);
3682   __ pop(r3); // restore receiver
3683   __ verify_oop(r3);
3684   __ load_klass(r3, r3);
3685   __ b(resolved);
3686 
3687   // Get superklass in r0 and subklass in r3
3688   __ bind(quicked);
3689   __ load_klass(r3, r0);
3690   __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
3691 
3692   __ bind(resolved);
3693 
3694   // Generate subtype check.  Blows r2, r5
3695   // Superklass in r0.  Subklass in r3.
3696   __ gen_subtype_check(r3, ok_is_subtype);
3697 
3698   // Come here on failure
3699   __ mov(r0, 0);
3700   __ b(done);
3701   // Come here on success
3702   __ bind(ok_is_subtype);
3703   __ mov(r0, 1);
3704 
3705   // Collect counts on whether this test sees NULLs a lot or not.
3706   if (ProfileInterpreter) {
3707     __ b(done);
3708     __ bind(is_null);
3709     __ profile_null_seen(r2);
3710   } else {
3711     __ bind(is_null);   // same as 'done'
3712   }
3713   __ bind(done);
3714   // r0 = 0: obj == NULL or  obj is not an instanceof the specified klass
3715   // r0 = 1: obj != NULL and obj is     an instanceof the specified klass
3716 }
3717 
3718 //-----------------------------------------------------------------------------
3719 // Breakpoints
3720 void TemplateTable::_breakpoint() {
3721   // Note: We get here even if we are single stepping..
3722   // jbug inists on setting breakpoints at every bytecode
3723   // even if we are in single step mode.
3724 
3725   transition(vtos, vtos);
3726 
3727   // get the unpatched byte code
3728   __ get_method(c_rarg1);
3729   __ call_VM(noreg,
3730              CAST_FROM_FN_PTR(address,
3731                               InterpreterRuntime::get_original_bytecode_at),
3732              c_rarg1, rbcp);
3733   __ mov(r19, r0);
3734 
3735   // post the breakpoint event
3736   __ call_VM(noreg,
3737              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3738              rmethod, rbcp);
3739 
3740   // complete the execution of original bytecode
3741   __ mov(rscratch1, r19);
3742   __ dispatch_only_normal(vtos);
3743 }
3744 
3745 //-----------------------------------------------------------------------------
3746 // Exceptions
3747 
3748 void TemplateTable::athrow() {
3749   transition(atos, vtos);
3750   __ null_check(r0);
3751   __ b(Interpreter::throw_exception_entry());
3752 }
3753 
3754 //-----------------------------------------------------------------------------
3755 // Synchronization
3756 //
3757 // Note: monitorenter & exit are symmetric routines; which is reflected
3758 //       in the assembly code structure as well
3759 //
3760 // Stack layout:
3761 //
3762 // [expressions  ] <--- esp               = expression stack top
3763 // ..
3764 // [expressions  ]
3765 // [monitor entry] <--- monitor block top = expression stack bot
3766 // ..
3767 // [monitor entry]
3768 // [frame data   ] <--- monitor block bot
3769 // ...
3770 // [saved rbp    ] <--- rbp
3771 void TemplateTable::monitorenter()
3772 {
3773   transition(atos, vtos);
3774 
3775   // check for NULL object
3776   __ null_check(r0);
3777 
3778   const Address monitor_block_top(
3779         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3780   const Address monitor_block_bot(
3781         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3782   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3783 
3784   Label allocated;
3785 
3786   // initialize entry pointer
3787   __ mov(c_rarg1, zr); // points to free slot or NULL
3788 
3789   // find a free slot in the monitor block (result in c_rarg1)
3790   {
3791     Label entry, loop, exit;
3792     __ ldr(c_rarg3, monitor_block_top); // points to current entry,
3793                                         // starting with top-most entry
3794     __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3795 
3796     __ b(entry);
3797 
3798     __ bind(loop);
3799     // check if current entry is used
3800     // if not used then remember entry in c_rarg1
3801     __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3802     __ cmp(zr, rscratch1);
3803     __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ);
3804     // check if current entry is for same object
3805     __ cmp(r0, rscratch1);
3806     // if same object then stop searching
3807     __ br(Assembler::EQ, exit);
3808     // otherwise advance to next entry
3809     __ add(c_rarg3, c_rarg3, entry_size);
3810     __ bind(entry);
3811     // check if bottom reached
3812     __ cmp(c_rarg3, c_rarg2);
3813     // if not at bottom then check this entry
3814     __ br(Assembler::NE, loop);
3815     __ bind(exit);
3816   }
3817 
3818   __ cbnz(c_rarg1, allocated); // check if a slot has been found and
3819                             // if found, continue with that on
3820 
3821   // allocate one if there's no free slot
3822   {
3823     Label entry, loop;
3824     // 1. compute new pointers            // rsp: old expression stack top
3825     __ ldr(c_rarg1, monitor_block_bot);   // c_rarg1: old expression stack bottom
3826     __ sub(esp, esp, entry_size);         // move expression stack top
3827     __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom
3828     __ mov(c_rarg3, esp);                 // set start value for copy loop
3829     __ str(c_rarg1, monitor_block_bot);   // set new monitor block bottom
3830 
3831     __ sub(sp, sp, entry_size);           // make room for the monitor
3832 
3833     __ b(entry);
3834     // 2. move expression stack contents
3835     __ bind(loop);
3836     __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3837                                                    // word from old location
3838     __ str(c_rarg2, Address(c_rarg3, 0));          // and store it at new location
3839     __ add(c_rarg3, c_rarg3, wordSize);            // advance to next word
3840     __ bind(entry);
3841     __ cmp(c_rarg3, c_rarg1);        // check if bottom reached
3842     __ br(Assembler::NE, loop);      // if not at bottom then
3843                                      // copy next word
3844   }
3845 
3846   // call run-time routine
3847   // c_rarg1: points to monitor entry
3848   __ bind(allocated);
3849 
3850   // Increment bcp to point to the next bytecode, so exception
3851   // handling for async. exceptions work correctly.
3852   // The object has already been poped from the stack, so the
3853   // expression stack looks correct.
3854   __ increment(rbcp);
3855 
3856   // store object
3857   __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3858   __ lock_object(c_rarg1);
3859 
3860   // check to make sure this monitor doesn't cause stack overflow after locking
3861   __ save_bcp();  // in case of exception
3862   __ generate_stack_overflow_check(0);
3863 
3864   // The bcp has already been incremented. Just need to dispatch to
3865   // next instruction.
3866   __ dispatch_next(vtos);
3867 }
3868 
3869 
3870 void TemplateTable::monitorexit()
3871 {
3872   transition(atos, vtos);
3873 
3874   // check for NULL object
3875   __ null_check(r0);
3876 
3877   const Address monitor_block_top(
3878         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3879   const Address monitor_block_bot(
3880         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3881   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3882 
3883   Label found;
3884 
3885   // find matching slot
3886   {
3887     Label entry, loop;
3888     __ ldr(c_rarg1, monitor_block_top); // points to current entry,
3889                                         // starting with top-most entry
3890     __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3891                                         // of monitor block
3892     __ b(entry);
3893 
3894     __ bind(loop);
3895     // check if current entry is for same object
3896     __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3897     __ cmp(r0, rscratch1);
3898     // if same object then stop searching
3899     __ br(Assembler::EQ, found);
3900     // otherwise advance to next entry
3901     __ add(c_rarg1, c_rarg1, entry_size);
3902     __ bind(entry);
3903     // check if bottom reached
3904     __ cmp(c_rarg1, c_rarg2);
3905     // if not at bottom then check this entry
3906     __ br(Assembler::NE, loop);
3907   }
3908 
3909   // error handling. Unlocking was not block-structured
3910   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3911                    InterpreterRuntime::throw_illegal_monitor_state_exception));
3912   __ should_not_reach_here();
3913 
3914   // call run-time routine
3915   __ bind(found);
3916   __ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
3917   __ unlock_object(c_rarg1);
3918   __ pop_ptr(r0); // discard object
3919 }
3920 
3921 
3922 // Wide instructions
3923 void TemplateTable::wide()
3924 {
3925   __ load_unsigned_byte(r19, at_bcp(1));
3926   __ mov(rscratch1, (address)Interpreter::_wentry_point);
3927   __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3)));
3928   __ br(rscratch1);
3929 }
3930 
3931 
3932 // Multi arrays
3933 void TemplateTable::multianewarray() {
3934   transition(vtos, atos);
3935   __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions
3936   // last dim is on top of stack; we want address of first one:
3937   // first_addr = last_addr + (ndims - 1) * wordSize
3938   __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3)));
3939   __ sub(c_rarg1, c_rarg1, wordSize);
3940   call_VM(r0,
3941           CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3942           c_rarg1);
3943   __ load_unsigned_byte(r1, at_bcp(3));
3944   __ lea(esp, Address(esp, r1, Address::uxtw(3)));
3945 }