1 /*
   2  * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "gc/shared/barrierSetAssembler.hpp"
  28 #include "interpreter/interp_masm.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/cpCache.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/methodHandles.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 
  43 #define __ _masm->
  44 
  45 //----------------------------------------------------------------------------------------------------
  46 // Platform-dependent initialization
  47 
  48 void TemplateTable::pd_initialize() {
  49   // No arm specific initialization
  50 }
  51 
  52 //----------------------------------------------------------------------------------------------------
  53 // Address computation
  54 
  55 // local variables
  56 static inline Address iaddress(int n)            {
  57   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n)            { return iaddress(n + 1); }
  61 static inline Address haddress(int n)            { return iaddress(n + 0); }
  62 
  63 static inline Address faddress(int n)            { return iaddress(n); }
  64 static inline Address daddress(int n)            { return laddress(n); }
  65 static inline Address aaddress(int n)            { return iaddress(n); }
  66 
  67 
  68 void TemplateTable::get_local_base_addr(Register r, Register index) {
  69   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  70 }
  71 
  72 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  73   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  74 }
  75 
  76 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  77   return load_iaddress(index, scratch);
  78 }
  79 
  80 Address TemplateTable::load_faddress(Register index, Register scratch) {
  81 #ifdef __SOFTFP__
  82   return load_iaddress(index, scratch);
  83 #else
  84   get_local_base_addr(scratch, index);
  85   return Address(scratch);
  86 #endif // __SOFTFP__
  87 }
  88 
  89 Address TemplateTable::load_daddress(Register index, Register scratch) {
  90   get_local_base_addr(scratch, index);
  91   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  92 }
  93 
  94 // At top of Java expression stack which may be different than SP.
  95 // It isn't for category 1 objects.
  96 static inline Address at_tos() {
  97   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
  98 }
  99 
 100 static inline Address at_tos_p1() {
 101   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 102 }
 103 
 104 static inline Address at_tos_p2() {
 105   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 106 }
 107 
 108 
 109 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 110 // separate ldr instructions (supports nonadjacent values).
 111 // Used for longs in all modes, and for doubles in SOFTFP mode.
 112 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 113   const Register Rlocal_base = tmp;
 114   assert_different_registers(Rlocal_index, tmp);
 115 
 116   get_local_base_addr(Rlocal_base, Rlocal_index);
 117   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 118   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 119 }
 120 
 121 
 122 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 123 // separate str instructions (supports nonadjacent values).
 124 // Used for longs in all modes, and for doubles in SOFTFP mode
 125 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 126   const Register Rlocal_base = tmp;
 127   assert_different_registers(Rlocal_index, tmp);
 128 
 129   get_local_base_addr(Rlocal_base, Rlocal_index);
 130   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 131   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 132 }
 133 
 134 // Returns address of Java array element using temp register as address base.
 135 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 136   int logElemSize = exact_log2(type2aelembytes(elemType));
 137   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 138   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 139 }
 140 
 141 // Returns address of Java array element using temp register as offset from array base
 142 Address TemplateTable::get_array_elem_addr_same_base(BasicType elemType, Register array, Register index, Register temp) {
 143   int logElemSize = exact_log2(type2aelembytes(elemType));
 144   if (logElemSize == 0) {
 145     __ add(temp, index, arrayOopDesc::base_offset_in_bytes(elemType));
 146   } else {
 147     __ mov(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 148     __ add_ptr_scaled_int32(temp, temp, index, logElemSize);
 149   }
 150   return Address(array, temp);
 151 }
 152 
 153 //----------------------------------------------------------------------------------------------------
 154 // Condition conversion
 155 AsmCondition convNegCond(TemplateTable::Condition cc) {
 156   switch (cc) {
 157     case TemplateTable::equal        : return ne;
 158     case TemplateTable::not_equal    : return eq;
 159     case TemplateTable::less         : return ge;
 160     case TemplateTable::less_equal   : return gt;
 161     case TemplateTable::greater      : return le;
 162     case TemplateTable::greater_equal: return lt;
 163   }
 164   ShouldNotReachHere();
 165   return nv;
 166 }
 167 
 168 //----------------------------------------------------------------------------------------------------
 169 // Miscelaneous helper routines
 170 
 171 // Store an oop (or NULL) at the address described by obj.
 172 // Blows all volatile registers R0-R3, Rtemp, LR).
 173 // Also destroys new_val and obj.base().
 174 static void do_oop_store(InterpreterMacroAssembler* _masm,
 175                          Address obj,
 176                          Register new_val,
 177                          Register tmp1,
 178                          Register tmp2,
 179                          Register tmp3,
 180                          bool is_null,
 181                          DecoratorSet decorators = 0) {
 182 
 183   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 184   if (is_null) {
 185     __ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators);
 186   } else {
 187     __ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators);
 188   }
 189 }
 190 
 191 static void do_oop_load(InterpreterMacroAssembler* _masm,
 192                         Register dst,
 193                         Address obj,
 194                         DecoratorSet decorators = 0) {
 195   __ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators);
 196 }
 197 
 198 Address TemplateTable::at_bcp(int offset) {
 199   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 200   return Address(Rbcp, offset);
 201 }
 202 
 203 
 204 // Blows volatile registers R0-R3, Rtemp, LR.
 205 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 206                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 207                                    int byte_no) {
 208   assert_different_registers(bc_reg, temp_reg);
 209   if (!RewriteBytecodes)  return;
 210   Label L_patch_done;
 211 
 212   switch (bc) {
 213   case Bytecodes::_fast_aputfield:
 214   case Bytecodes::_fast_bputfield:
 215   case Bytecodes::_fast_zputfield:
 216   case Bytecodes::_fast_cputfield:
 217   case Bytecodes::_fast_dputfield:
 218   case Bytecodes::_fast_fputfield:
 219   case Bytecodes::_fast_iputfield:
 220   case Bytecodes::_fast_lputfield:
 221   case Bytecodes::_fast_sputfield:
 222     {
 223       // We skip bytecode quickening for putfield instructions when
 224       // the put_code written to the constant pool cache is zero.
 225       // This is required so that every execution of this instruction
 226       // calls out to InterpreterRuntime::resolve_get_put to do
 227       // additional, required work.
 228       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 229       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 230       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 231       __ mov(bc_reg, bc);
 232       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 233     }
 234     break;
 235   default:
 236     assert(byte_no == -1, "sanity");
 237     // the pair bytecodes have already done the load.
 238     if (load_bc_into_bc_reg) {
 239       __ mov(bc_reg, bc);
 240     }
 241   }
 242 
 243   if (__ can_post_breakpoint()) {
 244     Label L_fast_patch;
 245     // if a breakpoint is present we can't rewrite the stream directly
 246     __ ldrb(temp_reg, at_bcp(0));
 247     __ cmp(temp_reg, Bytecodes::_breakpoint);
 248     __ b(L_fast_patch, ne);
 249     if (bc_reg != R3) {
 250       __ mov(R3, bc_reg);
 251     }
 252     __ mov(R1, Rmethod);
 253     __ mov(R2, Rbcp);
 254     // Let breakpoint table handling rewrite to quicker bytecode
 255     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 256     __ b(L_patch_done);
 257     __ bind(L_fast_patch);
 258   }
 259 
 260 #ifdef ASSERT
 261   Label L_okay;
 262   __ ldrb(temp_reg, at_bcp(0));
 263   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 264   __ b(L_okay, eq);
 265   __ cmp(temp_reg, bc_reg);
 266   __ b(L_okay, eq);
 267   __ stop("patching the wrong bytecode");
 268   __ bind(L_okay);
 269 #endif
 270 
 271   // patch bytecode
 272   __ strb(bc_reg, at_bcp(0));
 273   __ bind(L_patch_done);
 274 }
 275 
 276 //----------------------------------------------------------------------------------------------------
 277 // Individual instructions
 278 
 279 void TemplateTable::nop() {
 280   transition(vtos, vtos);
 281   // nothing to do
 282 }
 283 
 284 void TemplateTable::shouldnotreachhere() {
 285   transition(vtos, vtos);
 286   __ stop("shouldnotreachhere bytecode");
 287 }
 288 
 289 
 290 
 291 void TemplateTable::aconst_null() {
 292   transition(vtos, atos);
 293   __ mov(R0_tos, 0);
 294 }
 295 
 296 
 297 void TemplateTable::iconst(int value) {
 298   transition(vtos, itos);
 299   __ mov_slow(R0_tos, value);
 300 }
 301 
 302 
 303 void TemplateTable::lconst(int value) {
 304   transition(vtos, ltos);
 305   assert((value == 0) || (value == 1), "unexpected long constant");
 306   __ mov(R0_tos, value);
 307   __ mov(R1_tos_hi, 0);
 308 }
 309 
 310 
 311 void TemplateTable::fconst(int value) {
 312   transition(vtos, ftos);
 313   const int zero = 0;         // 0.0f
 314   const int one = 0x3f800000; // 1.0f
 315   const int two = 0x40000000; // 2.0f
 316 
 317   switch(value) {
 318   case 0:   __ mov(R0_tos, zero);   break;
 319   case 1:   __ mov(R0_tos, one);    break;
 320   case 2:   __ mov(R0_tos, two);    break;
 321   default:  ShouldNotReachHere();   break;
 322   }
 323 
 324 #ifndef __SOFTFP__
 325   __ fmsr(S0_tos, R0_tos);
 326 #endif // !__SOFTFP__
 327 }
 328 
 329 
 330 void TemplateTable::dconst(int value) {
 331   transition(vtos, dtos);
 332   const int one_lo = 0;            // low part of 1.0
 333   const int one_hi = 0x3ff00000;   // high part of 1.0
 334 
 335   if (value == 0) {
 336 #ifdef __SOFTFP__
 337     __ mov(R0_tos_lo, 0);
 338     __ mov(R1_tos_hi, 0);
 339 #else
 340     __ mov(R0_tmp, 0);
 341     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 342 #endif // __SOFTFP__
 343   } else if (value == 1) {
 344     __ mov(R0_tos_lo, one_lo);
 345     __ mov_slow(R1_tos_hi, one_hi);
 346 #ifndef __SOFTFP__
 347     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 348 #endif // !__SOFTFP__
 349   } else {
 350     ShouldNotReachHere();
 351   }
 352 }
 353 
 354 
 355 void TemplateTable::bipush() {
 356   transition(vtos, itos);
 357   __ ldrsb(R0_tos, at_bcp(1));
 358 }
 359 
 360 
 361 void TemplateTable::sipush() {
 362   transition(vtos, itos);
 363   __ ldrsb(R0_tmp, at_bcp(1));
 364   __ ldrb(R1_tmp, at_bcp(2));
 365   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 366 }
 367 
 368 
 369 void TemplateTable::ldc(bool wide) {
 370   transition(vtos, vtos);
 371   Label fastCase, Condy, Done;
 372 
 373   const Register Rindex = R1_tmp;
 374   const Register Rcpool = R2_tmp;
 375   const Register Rtags  = R3_tmp;
 376   const Register RtagType = R3_tmp;
 377 
 378   if (wide) {
 379     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 380   } else {
 381     __ ldrb(Rindex, at_bcp(1));
 382   }
 383   __ get_cpool_and_tags(Rcpool, Rtags);
 384 
 385   const int base_offset = ConstantPool::header_size() * wordSize;
 386   const int tags_offset = Array<u1>::base_offset_in_bytes();
 387 
 388   // get const type
 389   __ add(Rtemp, Rtags, tags_offset);
 390   __ ldrb(RtagType, Address(Rtemp, Rindex));
 391   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 392 
 393   // unresolved class - get the resolved class
 394   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 395 
 396   // unresolved class in error (resolution failed) - call into runtime
 397   // so that the same error from first resolution attempt is thrown.
 398   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 399 
 400   // resolved class - need to call vm to get java mirror of the class
 401   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 402 
 403   __ b(fastCase, ne);
 404 
 405   // slow case - call runtime
 406   __ mov(R1, wide);
 407   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 408   __ push(atos);
 409   __ b(Done);
 410 
 411   // int, float, String
 412   __ bind(fastCase);
 413 
 414   __ cmp(RtagType, JVM_CONSTANT_Integer);
 415   __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 416   __ b(Condy, ne);
 417 
 418   // itos, ftos
 419   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 420   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 421 
 422   // floats and ints are placed on stack in the same way, so
 423   // we can use push(itos) to transfer float value without VFP
 424   __ push(itos);
 425   __ b(Done);
 426 
 427   __ bind(Condy);
 428   condy_helper(Done);
 429 
 430   __ bind(Done);
 431 }
 432 
 433 // Fast path for caching oop constants.
 434 void TemplateTable::fast_aldc(bool wide) {
 435   transition(vtos, atos);
 436   int index_size = wide ? sizeof(u2) : sizeof(u1);
 437   Label resolved;
 438 
 439   // We are resolved if the resolved reference cache entry contains a
 440   // non-null object (CallSite, etc.)
 441   assert_different_registers(R0_tos, R2_tmp);
 442   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 443   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 444   __ cbnz(R0_tos, resolved);
 445 
 446   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 447 
 448   // first time invocation - must resolve first
 449   __ mov(R1, (int)bytecode());
 450   __ call_VM(R0_tos, entry, R1);
 451   __ bind(resolved);
 452 
 453   { // Check for the null sentinel.
 454     // If we just called the VM, that already did the mapping for us,
 455     // but it's harmless to retry.
 456     Label notNull;
 457     Register result = R0;
 458     Register tmp = R1;
 459     Register rarg = R2;
 460 
 461     // Stash null_sentinel address to get its value later
 462     __ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
 463     __ ldr(tmp, Address(rarg));
 464     __ cmp(result, tmp);
 465     __ b(notNull, ne);
 466     __ mov(result, 0);  // NULL object reference
 467     __ bind(notNull);
 468   }
 469 
 470   if (VerifyOops) {
 471     __ verify_oop(R0_tos);
 472   }
 473 }
 474 
 475 void TemplateTable::ldc2_w() {
 476   transition(vtos, vtos);
 477   const Register Rtags  = R2_tmp;
 478   const Register Rindex = R3_tmp;
 479   const Register Rcpool = R4_tmp;
 480   const Register Rbase  = R5_tmp;
 481 
 482   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 483 
 484   __ get_cpool_and_tags(Rcpool, Rtags);
 485   const int base_offset = ConstantPool::header_size() * wordSize;
 486   const int tags_offset = Array<u1>::base_offset_in_bytes();
 487 
 488   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 489 
 490   // get type from tags
 491   __ add(Rtemp, Rtags, tags_offset);
 492   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 493 
 494   Label Condy, exit;
 495 #ifdef __ABI_HARD__
 496   Label NotDouble;
 497   __ cmp(Rtemp, JVM_CONSTANT_Double);
 498   __ b(NotDouble, ne);
 499   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 500 
 501   __ push(dtos);
 502   __ b(exit);
 503   __ bind(NotDouble);
 504 #endif
 505 
 506   __ cmp(Rtemp, JVM_CONSTANT_Long);
 507   __ b(Condy, ne);
 508   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 509   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 510   __ push(ltos);
 511   __ b(exit);
 512 
 513   __ bind(Condy);
 514   condy_helper(exit);
 515 
 516   __ bind(exit);
 517 }
 518 
 519 
 520 void TemplateTable::condy_helper(Label& Done)
 521 {
 522   Register obj   = R0_tmp;
 523   Register rtmp  = R1_tmp;
 524   Register flags = R2_tmp;
 525   Register off   = R3_tmp;
 526 
 527   __ mov(rtmp, (int) bytecode());
 528   __ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rtmp);
 529   __ get_vm_result_2(flags, rtmp);
 530 
 531   // VMr = obj = base address to find primitive value to push
 532   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 533   __ mov(off, flags);
 534 
 535   __ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
 536   __ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
 537 
 538   const Address field(obj, off);
 539 
 540   __ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
 541   // Make sure we don't need to mask flags after the above shift
 542   ConstantPoolCacheEntry::verify_tos_state_shift();
 543 
 544   switch (bytecode()) {
 545     case Bytecodes::_ldc:
 546     case Bytecodes::_ldc_w:
 547       {
 548         // tos in (itos, ftos, stos, btos, ctos, ztos)
 549         Label notIntFloat, notShort, notByte, notChar, notBool;
 550         __ cmp(flags, itos);
 551         __ cond_cmp(flags, ftos, ne);
 552         __ b(notIntFloat, ne);
 553         __ ldr(R0_tos, field);
 554         __ push(itos);
 555         __ b(Done);
 556 
 557         __ bind(notIntFloat);
 558         __ cmp(flags, stos);
 559         __ b(notShort, ne);
 560         __ ldrsh(R0_tos, field);
 561         __ push(stos);
 562         __ b(Done);
 563 
 564         __ bind(notShort);
 565         __ cmp(flags, btos);
 566         __ b(notByte, ne);
 567         __ ldrsb(R0_tos, field);
 568         __ push(btos);
 569         __ b(Done);
 570 
 571         __ bind(notByte);
 572         __ cmp(flags, ctos);
 573         __ b(notChar, ne);
 574         __ ldrh(R0_tos, field);
 575         __ push(ctos);
 576         __ b(Done);
 577 
 578         __ bind(notChar);
 579         __ cmp(flags, ztos);
 580         __ b(notBool, ne);
 581         __ ldrsb(R0_tos, field);
 582         __ push(ztos);
 583         __ b(Done);
 584 
 585         __ bind(notBool);
 586         break;
 587       }
 588 
 589     case Bytecodes::_ldc2_w:
 590       {
 591         Label notLongDouble;
 592         __ cmp(flags, ltos);
 593         __ cond_cmp(flags, dtos, ne);
 594         __ b(notLongDouble, ne);
 595 
 596         __ add(rtmp, obj, wordSize);
 597         __ ldr(R0_tos_lo, Address(obj, off));
 598         __ ldr(R1_tos_hi, Address(rtmp, off));
 599         __ push(ltos);
 600         __ b(Done);
 601 
 602         __ bind(notLongDouble);
 603 
 604         break;
 605       }
 606 
 607     default:
 608       ShouldNotReachHere();
 609     }
 610 
 611     __ stop("bad ldc/condy");
 612 }
 613 
 614 
 615 void TemplateTable::locals_index(Register reg, int offset) {
 616   __ ldrb(reg, at_bcp(offset));
 617 }
 618 
 619 void TemplateTable::iload() {
 620   iload_internal();
 621 }
 622 
 623 void TemplateTable::nofast_iload() {
 624   iload_internal(may_not_rewrite);
 625 }
 626 
 627 void TemplateTable::iload_internal(RewriteControl rc) {
 628   transition(vtos, itos);
 629 
 630   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 631     Label rewrite, done;
 632     const Register next_bytecode = R1_tmp;
 633     const Register target_bytecode = R2_tmp;
 634 
 635     // get next byte
 636     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 637     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 638     // last two iloads in a pair.  Comparing against fast_iload means that
 639     // the next bytecode is neither an iload or a caload, and therefore
 640     // an iload pair.
 641     __ cmp(next_bytecode, Bytecodes::_iload);
 642     __ b(done, eq);
 643 
 644     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 645     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 646     __ b(rewrite, eq);
 647 
 648     // if _caload, rewrite to fast_icaload
 649     __ cmp(next_bytecode, Bytecodes::_caload);
 650     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 651     __ b(rewrite, eq);
 652 
 653     // rewrite so iload doesn't check again.
 654     __ mov(target_bytecode, Bytecodes::_fast_iload);
 655 
 656     // rewrite
 657     // R2: fast bytecode
 658     __ bind(rewrite);
 659     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 660     __ bind(done);
 661   }
 662 
 663   // Get the local value into tos
 664   const Register Rlocal_index = R1_tmp;
 665   locals_index(Rlocal_index);
 666   Address local = load_iaddress(Rlocal_index, Rtemp);
 667   __ ldr_s32(R0_tos, local);
 668 }
 669 
 670 
 671 void TemplateTable::fast_iload2() {
 672   transition(vtos, itos);
 673   const Register Rlocal_index = R1_tmp;
 674 
 675   locals_index(Rlocal_index);
 676   Address local = load_iaddress(Rlocal_index, Rtemp);
 677   __ ldr_s32(R0_tos, local);
 678   __ push(itos);
 679 
 680   locals_index(Rlocal_index, 3);
 681   local = load_iaddress(Rlocal_index, Rtemp);
 682   __ ldr_s32(R0_tos, local);
 683 }
 684 
 685 void TemplateTable::fast_iload() {
 686   transition(vtos, itos);
 687   const Register Rlocal_index = R1_tmp;
 688 
 689   locals_index(Rlocal_index);
 690   Address local = load_iaddress(Rlocal_index, Rtemp);
 691   __ ldr_s32(R0_tos, local);
 692 }
 693 
 694 
 695 void TemplateTable::lload() {
 696   transition(vtos, ltos);
 697   const Register Rlocal_index = R2_tmp;
 698 
 699   locals_index(Rlocal_index);
 700   load_category2_local(Rlocal_index, R3_tmp);
 701 }
 702 
 703 
 704 void TemplateTable::fload() {
 705   transition(vtos, ftos);
 706   const Register Rlocal_index = R2_tmp;
 707 
 708   // Get the local value into tos
 709   locals_index(Rlocal_index);
 710   Address local = load_faddress(Rlocal_index, Rtemp);
 711 #ifdef __SOFTFP__
 712   __ ldr(R0_tos, local);
 713 #else
 714   __ ldr_float(S0_tos, local);
 715 #endif // __SOFTFP__
 716 }
 717 
 718 
 719 void TemplateTable::dload() {
 720   transition(vtos, dtos);
 721   const Register Rlocal_index = R2_tmp;
 722 
 723   locals_index(Rlocal_index);
 724 
 725 #ifdef __SOFTFP__
 726   load_category2_local(Rlocal_index, R3_tmp);
 727 #else
 728   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 729 #endif // __SOFTFP__
 730 }
 731 
 732 
 733 void TemplateTable::aload() {
 734   transition(vtos, atos);
 735   const Register Rlocal_index = R1_tmp;
 736 
 737   locals_index(Rlocal_index);
 738   Address local = load_aaddress(Rlocal_index, Rtemp);
 739   __ ldr(R0_tos, local);
 740 }
 741 
 742 
 743 void TemplateTable::locals_index_wide(Register reg) {
 744   assert_different_registers(reg, Rtemp);
 745   __ ldrb(Rtemp, at_bcp(2));
 746   __ ldrb(reg, at_bcp(3));
 747   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 748 }
 749 
 750 
 751 void TemplateTable::wide_iload() {
 752   transition(vtos, itos);
 753   const Register Rlocal_index = R2_tmp;
 754 
 755   locals_index_wide(Rlocal_index);
 756   Address local = load_iaddress(Rlocal_index, Rtemp);
 757   __ ldr_s32(R0_tos, local);
 758 }
 759 
 760 
 761 void TemplateTable::wide_lload() {
 762   transition(vtos, ltos);
 763   const Register Rlocal_index = R2_tmp;
 764   const Register Rlocal_base = R3_tmp;
 765 
 766   locals_index_wide(Rlocal_index);
 767   load_category2_local(Rlocal_index, R3_tmp);
 768 }
 769 
 770 
 771 void TemplateTable::wide_fload() {
 772   transition(vtos, ftos);
 773   const Register Rlocal_index = R2_tmp;
 774 
 775   locals_index_wide(Rlocal_index);
 776   Address local = load_faddress(Rlocal_index, Rtemp);
 777 #ifdef __SOFTFP__
 778   __ ldr(R0_tos, local);
 779 #else
 780   __ ldr_float(S0_tos, local);
 781 #endif // __SOFTFP__
 782 }
 783 
 784 
 785 void TemplateTable::wide_dload() {
 786   transition(vtos, dtos);
 787   const Register Rlocal_index = R2_tmp;
 788 
 789   locals_index_wide(Rlocal_index);
 790 #ifdef __SOFTFP__
 791   load_category2_local(Rlocal_index, R3_tmp);
 792 #else
 793   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 794 #endif // __SOFTFP__
 795 }
 796 
 797 
 798 void TemplateTable::wide_aload() {
 799   transition(vtos, atos);
 800   const Register Rlocal_index = R2_tmp;
 801 
 802   locals_index_wide(Rlocal_index);
 803   Address local = load_aaddress(Rlocal_index, Rtemp);
 804   __ ldr(R0_tos, local);
 805 }
 806 
 807 void TemplateTable::index_check(Register array, Register index) {
 808   // Pop ptr into array
 809   __ pop_ptr(array);
 810   index_check_without_pop(array, index);
 811 }
 812 
 813 void TemplateTable::index_check_without_pop(Register array, Register index) {
 814   assert_different_registers(array, index, Rtemp);
 815   // check array
 816   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 817   // check index
 818   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 819   __ cmp_32(index, Rtemp);
 820   if (index != R4_ArrayIndexOutOfBounds_index) {
 821     // convention with generate_ArrayIndexOutOfBounds_handler()
 822     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 823   }
 824   __ mov(R1, array, hs);
 825   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 826 }
 827 
 828 
 829 void TemplateTable::iaload() {
 830   transition(itos, itos);
 831   const Register Rarray = R1_tmp;
 832   const Register Rindex = R0_tos;
 833 
 834   index_check(Rarray, Rindex);
 835   Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp);
 836   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 837 }
 838 
 839 
 840 void TemplateTable::laload() {
 841   transition(itos, ltos);
 842   const Register Rarray = R1_tmp;
 843   const Register Rindex = R0_tos;
 844 
 845   index_check(Rarray, Rindex);
 846 
 847   Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp);
 848   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg);
 849 }
 850 
 851 
 852 void TemplateTable::faload() {
 853   transition(itos, ftos);
 854   const Register Rarray = R1_tmp;
 855   const Register Rindex = R0_tos;
 856 
 857   index_check(Rarray, Rindex);
 858 
 859   Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp);
 860   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg);
 861 }
 862 
 863 
 864 void TemplateTable::daload() {
 865   transition(itos, dtos);
 866   const Register Rarray = R1_tmp;
 867   const Register Rindex = R0_tos;
 868 
 869   index_check(Rarray, Rindex);
 870 
 871   Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp);
 872   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg);
 873 }
 874 
 875 
 876 void TemplateTable::aaload() {
 877   transition(itos, atos);
 878   const Register Rarray = R1_tmp;
 879   const Register Rindex = R0_tos;
 880 
 881   index_check(Rarray, Rindex);
 882   do_oop_load(_masm, R0_tos, get_array_elem_addr_same_base(T_OBJECT, Rarray, Rindex, Rtemp), IS_ARRAY);
 883 }
 884 
 885 
 886 void TemplateTable::baload() {
 887   transition(itos, itos);
 888   const Register Rarray = R1_tmp;
 889   const Register Rindex = R0_tos;
 890 
 891   index_check(Rarray, Rindex);
 892   Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp);
 893   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 894 }
 895 
 896 
 897 void TemplateTable::caload() {
 898   transition(itos, itos);
 899   const Register Rarray = R1_tmp;
 900   const Register Rindex = R0_tos;
 901 
 902   index_check(Rarray, Rindex);
 903   Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
 904   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 905 }
 906 
 907 
 908 // iload followed by caload frequent pair
 909 void TemplateTable::fast_icaload() {
 910   transition(vtos, itos);
 911   const Register Rlocal_index = R1_tmp;
 912   const Register Rarray = R1_tmp;
 913   const Register Rindex = R4_tmp; // index_check prefers index on R4
 914   assert_different_registers(Rlocal_index, Rindex);
 915   assert_different_registers(Rarray, Rindex);
 916 
 917   // load index out of locals
 918   locals_index(Rlocal_index);
 919   Address local = load_iaddress(Rlocal_index, Rtemp);
 920   __ ldr_s32(Rindex, local);
 921 
 922   // get array element
 923   index_check(Rarray, Rindex);
 924   Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
 925   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 926 }
 927 
 928 
 929 void TemplateTable::saload() {
 930   transition(itos, itos);
 931   const Register Rarray = R1_tmp;
 932   const Register Rindex = R0_tos;
 933 
 934   index_check(Rarray, Rindex);
 935   Address addr = get_array_elem_addr_same_base(T_SHORT, Rarray, Rindex, Rtemp);
 936   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 937 }
 938 
 939 
 940 void TemplateTable::iload(int n) {
 941   transition(vtos, itos);
 942   __ ldr_s32(R0_tos, iaddress(n));
 943 }
 944 
 945 
 946 void TemplateTable::lload(int n) {
 947   transition(vtos, ltos);
 948   __ ldr(R0_tos_lo, laddress(n));
 949   __ ldr(R1_tos_hi, haddress(n));
 950 }
 951 
 952 
 953 void TemplateTable::fload(int n) {
 954   transition(vtos, ftos);
 955 #ifdef __SOFTFP__
 956   __ ldr(R0_tos, faddress(n));
 957 #else
 958   __ ldr_float(S0_tos, faddress(n));
 959 #endif // __SOFTFP__
 960 }
 961 
 962 
 963 void TemplateTable::dload(int n) {
 964   transition(vtos, dtos);
 965 #ifdef __SOFTFP__
 966   __ ldr(R0_tos_lo, laddress(n));
 967   __ ldr(R1_tos_hi, haddress(n));
 968 #else
 969   __ ldr_double(D0_tos, daddress(n));
 970 #endif // __SOFTFP__
 971 }
 972 
 973 
 974 void TemplateTable::aload(int n) {
 975   transition(vtos, atos);
 976   __ ldr(R0_tos, aaddress(n));
 977 }
 978 
 979 void TemplateTable::aload_0() {
 980   aload_0_internal();
 981 }
 982 
 983 void TemplateTable::nofast_aload_0() {
 984   aload_0_internal(may_not_rewrite);
 985 }
 986 
 987 void TemplateTable::aload_0_internal(RewriteControl rc) {
 988   transition(vtos, atos);
 989   // According to bytecode histograms, the pairs:
 990   //
 991   // _aload_0, _fast_igetfield
 992   // _aload_0, _fast_agetfield
 993   // _aload_0, _fast_fgetfield
 994   //
 995   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
 996   // bytecode checks if the next bytecode is either _fast_igetfield,
 997   // _fast_agetfield or _fast_fgetfield and then rewrites the
 998   // current bytecode into a pair bytecode; otherwise it rewrites the current
 999   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
1000   //
1001   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
1002   //       otherwise we may miss an opportunity for a pair.
1003   //
1004   // Also rewrite frequent pairs
1005   //   aload_0, aload_1
1006   //   aload_0, iload_1
1007   // These bytecodes with a small amount of code are most profitable to rewrite
1008   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
1009     Label rewrite, done;
1010     const Register next_bytecode = R1_tmp;
1011     const Register target_bytecode = R2_tmp;
1012 
1013     // get next byte
1014     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
1015 
1016     // if _getfield then wait with rewrite
1017     __ cmp(next_bytecode, Bytecodes::_getfield);
1018     __ b(done, eq);
1019 
1020     // if _igetfield then rewrite to _fast_iaccess_0
1021     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1022     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1023     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1024     __ b(rewrite, eq);
1025 
1026     // if _agetfield then rewrite to _fast_aaccess_0
1027     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1028     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1029     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1030     __ b(rewrite, eq);
1031 
1032     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1033     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1034     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1035 
1036     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1037     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1038     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1039 
1040     // rewrite
1041     __ bind(rewrite);
1042     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1043 
1044     __ bind(done);
1045   }
1046 
1047   aload(0);
1048 }
1049 
1050 void TemplateTable::istore() {
1051   transition(itos, vtos);
1052   const Register Rlocal_index = R2_tmp;
1053 
1054   locals_index(Rlocal_index);
1055   Address local = load_iaddress(Rlocal_index, Rtemp);
1056   __ str_32(R0_tos, local);
1057 }
1058 
1059 
1060 void TemplateTable::lstore() {
1061   transition(ltos, vtos);
1062   const Register Rlocal_index = R2_tmp;
1063 
1064   locals_index(Rlocal_index);
1065   store_category2_local(Rlocal_index, R3_tmp);
1066 }
1067 
1068 
1069 void TemplateTable::fstore() {
1070   transition(ftos, vtos);
1071   const Register Rlocal_index = R2_tmp;
1072 
1073   locals_index(Rlocal_index);
1074   Address local = load_faddress(Rlocal_index, Rtemp);
1075 #ifdef __SOFTFP__
1076   __ str(R0_tos, local);
1077 #else
1078   __ str_float(S0_tos, local);
1079 #endif // __SOFTFP__
1080 }
1081 
1082 
1083 void TemplateTable::dstore() {
1084   transition(dtos, vtos);
1085   const Register Rlocal_index = R2_tmp;
1086 
1087   locals_index(Rlocal_index);
1088 
1089 #ifdef __SOFTFP__
1090   store_category2_local(Rlocal_index, R3_tmp);
1091 #else
1092   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1093 #endif // __SOFTFP__
1094 }
1095 
1096 
1097 void TemplateTable::astore() {
1098   transition(vtos, vtos);
1099   const Register Rlocal_index = R1_tmp;
1100 
1101   __ pop_ptr(R0_tos);
1102   locals_index(Rlocal_index);
1103   Address local = load_aaddress(Rlocal_index, Rtemp);
1104   __ str(R0_tos, local);
1105 }
1106 
1107 
1108 void TemplateTable::wide_istore() {
1109   transition(vtos, vtos);
1110   const Register Rlocal_index = R2_tmp;
1111 
1112   __ pop_i(R0_tos);
1113   locals_index_wide(Rlocal_index);
1114   Address local = load_iaddress(Rlocal_index, Rtemp);
1115   __ str_32(R0_tos, local);
1116 }
1117 
1118 
1119 void TemplateTable::wide_lstore() {
1120   transition(vtos, vtos);
1121   const Register Rlocal_index = R2_tmp;
1122   const Register Rlocal_base = R3_tmp;
1123 
1124   __ pop_l(R0_tos_lo, R1_tos_hi);
1125 
1126   locals_index_wide(Rlocal_index);
1127   store_category2_local(Rlocal_index, R3_tmp);
1128 }
1129 
1130 
1131 void TemplateTable::wide_fstore() {
1132   wide_istore();
1133 }
1134 
1135 
1136 void TemplateTable::wide_dstore() {
1137   wide_lstore();
1138 }
1139 
1140 
1141 void TemplateTable::wide_astore() {
1142   transition(vtos, vtos);
1143   const Register Rlocal_index = R2_tmp;
1144 
1145   __ pop_ptr(R0_tos);
1146   locals_index_wide(Rlocal_index);
1147   Address local = load_aaddress(Rlocal_index, Rtemp);
1148   __ str(R0_tos, local);
1149 }
1150 
1151 
1152 void TemplateTable::iastore() {
1153   transition(itos, vtos);
1154   const Register Rindex = R4_tmp; // index_check prefers index in R4
1155   const Register Rarray = R3_tmp;
1156   // R0_tos: value
1157 
1158   __ pop_i(Rindex);
1159   index_check(Rarray, Rindex);
1160   Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp);
1161   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1162 }
1163 
1164 
1165 void TemplateTable::lastore() {
1166   transition(ltos, vtos);
1167   const Register Rindex = R4_tmp; // index_check prefers index in R4
1168   const Register Rarray = R3_tmp;
1169   // R0_tos_lo:R1_tos_hi: value
1170 
1171   __ pop_i(Rindex);
1172   index_check(Rarray, Rindex);
1173 
1174   Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp);
1175   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg, false);
1176 }
1177 
1178 
1179 void TemplateTable::fastore() {
1180   transition(ftos, vtos);
1181   const Register Rindex = R4_tmp; // index_check prefers index in R4
1182   const Register Rarray = R3_tmp;
1183   // S0_tos/R0_tos: value
1184 
1185   __ pop_i(Rindex);
1186   index_check(Rarray, Rindex);
1187   Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp);
1188   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg, false);
1189 }
1190 
1191 
1192 void TemplateTable::dastore() {
1193   transition(dtos, vtos);
1194   const Register Rindex = R4_tmp; // index_check prefers index in R4
1195   const Register Rarray = R3_tmp;
1196   // D0_tos / R0_tos_lo:R1_to_hi: value
1197 
1198   __ pop_i(Rindex);
1199   index_check(Rarray, Rindex);
1200 
1201   Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp);
1202   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg, false);
1203 }
1204 
1205 
1206 void TemplateTable::aastore() {
1207   transition(vtos, vtos);
1208   Label is_null, throw_array_store, done;
1209 
1210   const Register Raddr_1   = R1_tmp;
1211   const Register Rvalue_2  = R2_tmp;
1212   const Register Rarray_3  = R3_tmp;
1213   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1214   const Register Rsub_5    = R5_tmp;
1215   const Register Rsuper_LR = LR_tmp;
1216 
1217   // stack: ..., array, index, value
1218   __ ldr(Rvalue_2, at_tos());     // Value
1219   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1220   __ ldr(Rarray_3, at_tos_p2());  // Array
1221 
1222   index_check_without_pop(Rarray_3, Rindex_4);
1223 
1224   // Compute the array base
1225   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1226 
1227   // do array store check - check for NULL value first
1228   __ cbz(Rvalue_2, is_null);
1229 
1230   // Load subklass
1231   __ load_klass(Rsub_5, Rvalue_2);
1232   // Load superklass
1233   __ load_klass(Rtemp, Rarray_3);
1234   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1235 
1236   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1237   // Come here on success
1238 
1239   // Store value
1240   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1241 
1242   // Now store using the appropriate barrier
1243   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IS_ARRAY);
1244   __ b(done);
1245 
1246   __ bind(throw_array_store);
1247 
1248   // Come here on failure of subtype check
1249   __ profile_typecheck_failed(R0_tmp);
1250 
1251   // object is at TOS
1252   __ b(Interpreter::_throw_ArrayStoreException_entry);
1253 
1254   // Have a NULL in Rvalue_2, store NULL at array[index].
1255   __ bind(is_null);
1256   __ profile_null_seen(R0_tmp);
1257 
1258   // Store a NULL
1259   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IS_ARRAY);
1260 
1261   // Pop stack arguments
1262   __ bind(done);
1263   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1264 }
1265 
1266 
1267 void TemplateTable::bastore() {
1268   transition(itos, vtos);
1269   const Register Rindex = R4_tmp; // index_check prefers index in R4
1270   const Register Rarray = R3_tmp;
1271   // R0_tos: value
1272 
1273   __ pop_i(Rindex);
1274   index_check(Rarray, Rindex);
1275 
1276   // Need to check whether array is boolean or byte
1277   // since both types share the bastore bytecode.
1278   __ load_klass(Rtemp, Rarray);
1279   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1280   Label L_skip;
1281   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1282   __ b(L_skip, eq);
1283   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1284   __ bind(L_skip);
1285   Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp);
1286   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1287 }
1288 
1289 
1290 void TemplateTable::castore() {
1291   transition(itos, vtos);
1292   const Register Rindex = R4_tmp; // index_check prefers index in R4
1293   const Register Rarray = R3_tmp;
1294   // R0_tos: value
1295 
1296   __ pop_i(Rindex);
1297   index_check(Rarray, Rindex);
1298   Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
1299   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1300 }
1301 
1302 
1303 void TemplateTable::sastore() {
1304   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1305            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1306          "base offsets for char and short should be equal");
1307   castore();
1308 }
1309 
1310 
1311 void TemplateTable::istore(int n) {
1312   transition(itos, vtos);
1313   __ str_32(R0_tos, iaddress(n));
1314 }
1315 
1316 
1317 void TemplateTable::lstore(int n) {
1318   transition(ltos, vtos);
1319   __ str(R0_tos_lo, laddress(n));
1320   __ str(R1_tos_hi, haddress(n));
1321 }
1322 
1323 
1324 void TemplateTable::fstore(int n) {
1325   transition(ftos, vtos);
1326 #ifdef __SOFTFP__
1327   __ str(R0_tos, faddress(n));
1328 #else
1329   __ str_float(S0_tos, faddress(n));
1330 #endif // __SOFTFP__
1331 }
1332 
1333 
1334 void TemplateTable::dstore(int n) {
1335   transition(dtos, vtos);
1336 #ifdef __SOFTFP__
1337   __ str(R0_tos_lo, laddress(n));
1338   __ str(R1_tos_hi, haddress(n));
1339 #else
1340   __ str_double(D0_tos, daddress(n));
1341 #endif // __SOFTFP__
1342 }
1343 
1344 
1345 void TemplateTable::astore(int n) {
1346   transition(vtos, vtos);
1347   __ pop_ptr(R0_tos);
1348   __ str(R0_tos, aaddress(n));
1349 }
1350 
1351 
1352 void TemplateTable::pop() {
1353   transition(vtos, vtos);
1354   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1355 }
1356 
1357 
1358 void TemplateTable::pop2() {
1359   transition(vtos, vtos);
1360   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1361 }
1362 
1363 
1364 void TemplateTable::dup() {
1365   transition(vtos, vtos);
1366   // stack: ..., a
1367   __ load_ptr(0, R0_tmp);
1368   __ push_ptr(R0_tmp);
1369   // stack: ..., a, a
1370 }
1371 
1372 
1373 void TemplateTable::dup_x1() {
1374   transition(vtos, vtos);
1375   // stack: ..., a, b
1376   __ load_ptr(0, R0_tmp);  // load b
1377   __ load_ptr(1, R2_tmp);  // load a
1378   __ store_ptr(1, R0_tmp); // store b
1379   __ store_ptr(0, R2_tmp); // store a
1380   __ push_ptr(R0_tmp);     // push b
1381   // stack: ..., b, a, b
1382 }
1383 
1384 
1385 void TemplateTable::dup_x2() {
1386   transition(vtos, vtos);
1387   // stack: ..., a, b, c
1388   __ load_ptr(0, R0_tmp);   // load c
1389   __ load_ptr(1, R2_tmp);   // load b
1390   __ load_ptr(2, R4_tmp);   // load a
1391 
1392   __ push_ptr(R0_tmp);      // push c
1393 
1394   // stack: ..., a, b, c, c
1395   __ store_ptr(1, R2_tmp);  // store b
1396   __ store_ptr(2, R4_tmp);  // store a
1397   __ store_ptr(3, R0_tmp);  // store c
1398   // stack: ..., c, a, b, c
1399 }
1400 
1401 
1402 void TemplateTable::dup2() {
1403   transition(vtos, vtos);
1404   // stack: ..., a, b
1405   __ load_ptr(1, R0_tmp);  // load a
1406   __ push_ptr(R0_tmp);     // push a
1407   __ load_ptr(1, R0_tmp);  // load b
1408   __ push_ptr(R0_tmp);     // push b
1409   // stack: ..., a, b, a, b
1410 }
1411 
1412 
1413 void TemplateTable::dup2_x1() {
1414   transition(vtos, vtos);
1415 
1416   // stack: ..., a, b, c
1417   __ load_ptr(0, R4_tmp);  // load c
1418   __ load_ptr(1, R2_tmp);  // load b
1419   __ load_ptr(2, R0_tmp);  // load a
1420 
1421   __ push_ptr(R2_tmp);     // push b
1422   __ push_ptr(R4_tmp);     // push c
1423 
1424   // stack: ..., a, b, c, b, c
1425 
1426   __ store_ptr(2, R0_tmp);  // store a
1427   __ store_ptr(3, R4_tmp);  // store c
1428   __ store_ptr(4, R2_tmp);  // store b
1429 
1430   // stack: ..., b, c, a, b, c
1431 }
1432 
1433 
1434 void TemplateTable::dup2_x2() {
1435   transition(vtos, vtos);
1436   // stack: ..., a, b, c, d
1437   __ load_ptr(0, R0_tmp);  // load d
1438   __ load_ptr(1, R2_tmp);  // load c
1439   __ push_ptr(R2_tmp);     // push c
1440   __ push_ptr(R0_tmp);     // push d
1441   // stack: ..., a, b, c, d, c, d
1442   __ load_ptr(4, R4_tmp);  // load b
1443   __ store_ptr(4, R0_tmp); // store d in b
1444   __ store_ptr(2, R4_tmp); // store b in d
1445   // stack: ..., a, d, c, b, c, d
1446   __ load_ptr(5, R4_tmp);  // load a
1447   __ store_ptr(5, R2_tmp); // store c in a
1448   __ store_ptr(3, R4_tmp); // store a in c
1449   // stack: ..., c, d, a, b, c, d
1450 }
1451 
1452 
1453 void TemplateTable::swap() {
1454   transition(vtos, vtos);
1455   // stack: ..., a, b
1456   __ load_ptr(1, R0_tmp);  // load a
1457   __ load_ptr(0, R2_tmp);  // load b
1458   __ store_ptr(0, R0_tmp); // store a in b
1459   __ store_ptr(1, R2_tmp); // store b in a
1460   // stack: ..., b, a
1461 }
1462 
1463 
1464 void TemplateTable::iop2(Operation op) {
1465   transition(itos, itos);
1466   const Register arg1 = R1_tmp;
1467   const Register arg2 = R0_tos;
1468 
1469   __ pop_i(arg1);
1470   switch (op) {
1471     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1472     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1473     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1474     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1475     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1476     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1477     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1478     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1479     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1480     default   : ShouldNotReachHere();
1481   }
1482 }
1483 
1484 
1485 void TemplateTable::lop2(Operation op) {
1486   transition(ltos, ltos);
1487   const Register arg1_lo = R2_tmp;
1488   const Register arg1_hi = R3_tmp;
1489   const Register arg2_lo = R0_tos_lo;
1490   const Register arg2_hi = R1_tos_hi;
1491 
1492   __ pop_l(arg1_lo, arg1_hi);
1493   switch (op) {
1494     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1495     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1496     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1497     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1498     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1499     default : ShouldNotReachHere();
1500   }
1501 }
1502 
1503 
1504 void TemplateTable::idiv() {
1505   transition(itos, itos);
1506   __ mov(R2, R0_tos);
1507   __ pop_i(R0);
1508   // R0 - dividend
1509   // R2 - divisor
1510   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1511   // R1 - result
1512   __ mov(R0_tos, R1);
1513 }
1514 
1515 
1516 void TemplateTable::irem() {
1517   transition(itos, itos);
1518   __ mov(R2, R0_tos);
1519   __ pop_i(R0);
1520   // R0 - dividend
1521   // R2 - divisor
1522   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1523   // R0 - remainder
1524 }
1525 
1526 
1527 void TemplateTable::lmul() {
1528   transition(ltos, ltos);
1529   const Register arg1_lo = R0_tos_lo;
1530   const Register arg1_hi = R1_tos_hi;
1531   const Register arg2_lo = R2_tmp;
1532   const Register arg2_hi = R3_tmp;
1533 
1534   __ pop_l(arg2_lo, arg2_hi);
1535 
1536   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1537 }
1538 
1539 
1540 void TemplateTable::ldiv() {
1541   transition(ltos, ltos);
1542   const Register x_lo = R2_tmp;
1543   const Register x_hi = R3_tmp;
1544   const Register y_lo = R0_tos_lo;
1545   const Register y_hi = R1_tos_hi;
1546 
1547   __ pop_l(x_lo, x_hi);
1548 
1549   // check if y = 0
1550   __ orrs(Rtemp, y_lo, y_hi);
1551   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1552   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1553 }
1554 
1555 
1556 void TemplateTable::lrem() {
1557   transition(ltos, ltos);
1558   const Register x_lo = R2_tmp;
1559   const Register x_hi = R3_tmp;
1560   const Register y_lo = R0_tos_lo;
1561   const Register y_hi = R1_tos_hi;
1562 
1563   __ pop_l(x_lo, x_hi);
1564 
1565   // check if y = 0
1566   __ orrs(Rtemp, y_lo, y_hi);
1567   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1568   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1569 }
1570 
1571 
1572 void TemplateTable::lshl() {
1573   transition(itos, ltos);
1574   const Register shift_cnt = R4_tmp;
1575   const Register val_lo = R2_tmp;
1576   const Register val_hi = R3_tmp;
1577 
1578   __ pop_l(val_lo, val_hi);
1579   __ andr(shift_cnt, R0_tos, 63);
1580   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1581 }
1582 
1583 
1584 void TemplateTable::lshr() {
1585   transition(itos, ltos);
1586   const Register shift_cnt = R4_tmp;
1587   const Register val_lo = R2_tmp;
1588   const Register val_hi = R3_tmp;
1589 
1590   __ pop_l(val_lo, val_hi);
1591   __ andr(shift_cnt, R0_tos, 63);
1592   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1593 }
1594 
1595 
1596 void TemplateTable::lushr() {
1597   transition(itos, ltos);
1598   const Register shift_cnt = R4_tmp;
1599   const Register val_lo = R2_tmp;
1600   const Register val_hi = R3_tmp;
1601 
1602   __ pop_l(val_lo, val_hi);
1603   __ andr(shift_cnt, R0_tos, 63);
1604   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1605 }
1606 
1607 
1608 void TemplateTable::fop2(Operation op) {
1609   transition(ftos, ftos);
1610 #ifdef __SOFTFP__
1611   __ mov(R1, R0_tos);
1612   __ pop_i(R0);
1613   switch (op) {
1614     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1615     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1616     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1617     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1618     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1619     default : ShouldNotReachHere();
1620   }
1621 #else
1622   const FloatRegister arg1 = S1_tmp;
1623   const FloatRegister arg2 = S0_tos;
1624 
1625   switch (op) {
1626     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1627     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1628     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1629     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1630     case rem:
1631 #ifndef __ABI_HARD__
1632       __ pop_f(arg1);
1633       __ fmrs(R0, arg1);
1634       __ fmrs(R1, arg2);
1635       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1636       __ fmsr(S0_tos, R0);
1637 #else
1638       __ mov_float(S1_reg, arg2);
1639       __ pop_f(S0);
1640       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1641 #endif // !__ABI_HARD__
1642       break;
1643     default : ShouldNotReachHere();
1644   }
1645 #endif // __SOFTFP__
1646 }
1647 
1648 
1649 void TemplateTable::dop2(Operation op) {
1650   transition(dtos, dtos);
1651 #ifdef __SOFTFP__
1652   __ mov(R2, R0_tos_lo);
1653   __ mov(R3, R1_tos_hi);
1654   __ pop_l(R0, R1);
1655   switch (op) {
1656     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1657     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1658     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1659     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1660     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1661     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1662     default : ShouldNotReachHere();
1663   }
1664 #else
1665   const FloatRegister arg1 = D1_tmp;
1666   const FloatRegister arg2 = D0_tos;
1667 
1668   switch (op) {
1669     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1670     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1671     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1672     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1673     case rem:
1674 #ifndef __ABI_HARD__
1675       __ pop_d(arg1);
1676       __ fmrrd(R0, R1, arg1);
1677       __ fmrrd(R2, R3, arg2);
1678       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1679       __ fmdrr(D0_tos, R0, R1);
1680 #else
1681       __ mov_double(D1, arg2);
1682       __ pop_d(D0);
1683       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1684 #endif // !__ABI_HARD__
1685       break;
1686     default : ShouldNotReachHere();
1687   }
1688 #endif // __SOFTFP__
1689 }
1690 
1691 
1692 void TemplateTable::ineg() {
1693   transition(itos, itos);
1694   __ neg_32(R0_tos, R0_tos);
1695 }
1696 
1697 
1698 void TemplateTable::lneg() {
1699   transition(ltos, ltos);
1700   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1701   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1702 }
1703 
1704 
1705 void TemplateTable::fneg() {
1706   transition(ftos, ftos);
1707 #ifdef __SOFTFP__
1708   // Invert sign bit
1709   const int sign_mask = 0x80000000;
1710   __ eor(R0_tos, R0_tos, sign_mask);
1711 #else
1712   __ neg_float(S0_tos, S0_tos);
1713 #endif // __SOFTFP__
1714 }
1715 
1716 
1717 void TemplateTable::dneg() {
1718   transition(dtos, dtos);
1719 #ifdef __SOFTFP__
1720   // Invert sign bit in the high part of the double
1721   const int sign_mask_hi = 0x80000000;
1722   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1723 #else
1724   __ neg_double(D0_tos, D0_tos);
1725 #endif // __SOFTFP__
1726 }
1727 
1728 
1729 void TemplateTable::iinc() {
1730   transition(vtos, vtos);
1731   const Register Rconst = R2_tmp;
1732   const Register Rlocal_index = R1_tmp;
1733   const Register Rval = R0_tmp;
1734 
1735   __ ldrsb(Rconst, at_bcp(2));
1736   locals_index(Rlocal_index);
1737   Address local = load_iaddress(Rlocal_index, Rtemp);
1738   __ ldr_s32(Rval, local);
1739   __ add(Rval, Rval, Rconst);
1740   __ str_32(Rval, local);
1741 }
1742 
1743 
1744 void TemplateTable::wide_iinc() {
1745   transition(vtos, vtos);
1746   const Register Rconst = R2_tmp;
1747   const Register Rlocal_index = R1_tmp;
1748   const Register Rval = R0_tmp;
1749 
1750   // get constant in Rconst
1751   __ ldrsb(R2_tmp, at_bcp(4));
1752   __ ldrb(R3_tmp, at_bcp(5));
1753   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1754 
1755   locals_index_wide(Rlocal_index);
1756   Address local = load_iaddress(Rlocal_index, Rtemp);
1757   __ ldr_s32(Rval, local);
1758   __ add(Rval, Rval, Rconst);
1759   __ str_32(Rval, local);
1760 }
1761 
1762 
1763 void TemplateTable::convert() {
1764   // Checking
1765 #ifdef ASSERT
1766   { TosState tos_in  = ilgl;
1767     TosState tos_out = ilgl;
1768     switch (bytecode()) {
1769       case Bytecodes::_i2l: // fall through
1770       case Bytecodes::_i2f: // fall through
1771       case Bytecodes::_i2d: // fall through
1772       case Bytecodes::_i2b: // fall through
1773       case Bytecodes::_i2c: // fall through
1774       case Bytecodes::_i2s: tos_in = itos; break;
1775       case Bytecodes::_l2i: // fall through
1776       case Bytecodes::_l2f: // fall through
1777       case Bytecodes::_l2d: tos_in = ltos; break;
1778       case Bytecodes::_f2i: // fall through
1779       case Bytecodes::_f2l: // fall through
1780       case Bytecodes::_f2d: tos_in = ftos; break;
1781       case Bytecodes::_d2i: // fall through
1782       case Bytecodes::_d2l: // fall through
1783       case Bytecodes::_d2f: tos_in = dtos; break;
1784       default             : ShouldNotReachHere();
1785     }
1786     switch (bytecode()) {
1787       case Bytecodes::_l2i: // fall through
1788       case Bytecodes::_f2i: // fall through
1789       case Bytecodes::_d2i: // fall through
1790       case Bytecodes::_i2b: // fall through
1791       case Bytecodes::_i2c: // fall through
1792       case Bytecodes::_i2s: tos_out = itos; break;
1793       case Bytecodes::_i2l: // fall through
1794       case Bytecodes::_f2l: // fall through
1795       case Bytecodes::_d2l: tos_out = ltos; break;
1796       case Bytecodes::_i2f: // fall through
1797       case Bytecodes::_l2f: // fall through
1798       case Bytecodes::_d2f: tos_out = ftos; break;
1799       case Bytecodes::_i2d: // fall through
1800       case Bytecodes::_l2d: // fall through
1801       case Bytecodes::_f2d: tos_out = dtos; break;
1802       default             : ShouldNotReachHere();
1803     }
1804     transition(tos_in, tos_out);
1805   }
1806 #endif // ASSERT
1807 
1808   // Conversion
1809   switch (bytecode()) {
1810     case Bytecodes::_i2l:
1811       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1812       break;
1813 
1814     case Bytecodes::_i2f:
1815 #ifdef __SOFTFP__
1816       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
1817 #else
1818       __ fmsr(S0_tmp, R0_tos);
1819       __ fsitos(S0_tos, S0_tmp);
1820 #endif // __SOFTFP__
1821       break;
1822 
1823     case Bytecodes::_i2d:
1824 #ifdef __SOFTFP__
1825       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
1826 #else
1827       __ fmsr(S0_tmp, R0_tos);
1828       __ fsitod(D0_tos, S0_tmp);
1829 #endif // __SOFTFP__
1830       break;
1831 
1832     case Bytecodes::_i2b:
1833       __ sign_extend(R0_tos, R0_tos, 8);
1834       break;
1835 
1836     case Bytecodes::_i2c:
1837       __ zero_extend(R0_tos, R0_tos, 16);
1838       break;
1839 
1840     case Bytecodes::_i2s:
1841       __ sign_extend(R0_tos, R0_tos, 16);
1842       break;
1843 
1844     case Bytecodes::_l2i:
1845       /* nothing to do */
1846       break;
1847 
1848     case Bytecodes::_l2f:
1849       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
1850 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1851       __ fmsr(S0_tos, R0);
1852 #endif // !__SOFTFP__ && !__ABI_HARD__
1853       break;
1854 
1855     case Bytecodes::_l2d:
1856       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
1857 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1858       __ fmdrr(D0_tos, R0, R1);
1859 #endif // !__SOFTFP__ && !__ABI_HARD__
1860       break;
1861 
1862     case Bytecodes::_f2i:
1863 #ifndef __SOFTFP__
1864       __ ftosizs(S0_tos, S0_tos);
1865       __ fmrs(R0_tos, S0_tos);
1866 #else
1867       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
1868 #endif // !__SOFTFP__
1869       break;
1870 
1871     case Bytecodes::_f2l:
1872 #ifndef __SOFTFP__
1873       __ fmrs(R0_tos, S0_tos);
1874 #endif // !__SOFTFP__
1875       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
1876       break;
1877 
1878     case Bytecodes::_f2d:
1879 #ifdef __SOFTFP__
1880       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
1881 #else
1882       __ convert_f2d(D0_tos, S0_tos);
1883 #endif // __SOFTFP__
1884       break;
1885 
1886     case Bytecodes::_d2i:
1887 #ifndef __SOFTFP__
1888       __ ftosizd(Stemp, D0);
1889       __ fmrs(R0, Stemp);
1890 #else
1891       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
1892 #endif // !__SOFTFP__
1893       break;
1894 
1895     case Bytecodes::_d2l:
1896 #ifndef __SOFTFP__
1897       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
1898 #endif // !__SOFTFP__
1899       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
1900       break;
1901 
1902     case Bytecodes::_d2f:
1903 #ifdef __SOFTFP__
1904       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
1905 #else
1906       __ convert_d2f(S0_tos, D0_tos);
1907 #endif // __SOFTFP__
1908       break;
1909 
1910     default:
1911       ShouldNotReachHere();
1912   }
1913 }
1914 
1915 
1916 void TemplateTable::lcmp() {
1917   transition(ltos, itos);
1918   const Register arg1_lo = R2_tmp;
1919   const Register arg1_hi = R3_tmp;
1920   const Register arg2_lo = R0_tos_lo;
1921   const Register arg2_hi = R1_tos_hi;
1922   const Register res = R4_tmp;
1923 
1924   __ pop_l(arg1_lo, arg1_hi);
1925 
1926   // long compare arg1 with arg2
1927   // result is -1/0/+1 if '<'/'='/'>'
1928   Label done;
1929 
1930   __ mov (res, 0);
1931   __ cmp (arg1_hi, arg2_hi);
1932   __ mvn (res, 0, lt);
1933   __ mov (res, 1, gt);
1934   __ b(done, ne);
1935   __ cmp (arg1_lo, arg2_lo);
1936   __ mvn (res, 0, lo);
1937   __ mov (res, 1, hi);
1938   __ bind(done);
1939   __ mov (R0_tos, res);
1940 }
1941 
1942 
1943 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1944   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
1945 
1946 
1947 #ifdef __SOFTFP__
1948 
1949   if (is_float) {
1950     transition(ftos, itos);
1951     const Register Rx = R0;
1952     const Register Ry = R1;
1953 
1954     __ mov(Ry, R0_tos);
1955     __ pop_i(Rx);
1956 
1957     if (unordered_result == 1) {
1958       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
1959     } else {
1960       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
1961     }
1962 
1963   } else {
1964 
1965     transition(dtos, itos);
1966     const Register Rx_lo = R0;
1967     const Register Rx_hi = R1;
1968     const Register Ry_lo = R2;
1969     const Register Ry_hi = R3;
1970 
1971     __ mov(Ry_lo, R0_tos_lo);
1972     __ mov(Ry_hi, R1_tos_hi);
1973     __ pop_l(Rx_lo, Rx_hi);
1974 
1975     if (unordered_result == 1) {
1976       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
1977     } else {
1978       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
1979     }
1980   }
1981 
1982 #else
1983 
1984   if (is_float) {
1985     transition(ftos, itos);
1986     __ pop_f(S1_tmp);
1987     __ fcmps(S1_tmp, S0_tos);
1988   } else {
1989     transition(dtos, itos);
1990     __ pop_d(D1_tmp);
1991     __ fcmpd(D1_tmp, D0_tos);
1992   }
1993 
1994   __ fmstat();
1995 
1996   // comparison result | flag N | flag Z | flag C | flag V
1997   // "<"               |   1    |   0    |   0    |   0
1998   // "=="              |   0    |   1    |   1    |   0
1999   // ">"               |   0    |   0    |   1    |   0
2000   // unordered         |   0    |   0    |   1    |   1
2001 
2002   if (unordered_result < 0) {
2003     __ mov(R0_tos, 1);           // result ==  1 if greater
2004     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2005   } else {
2006     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2007     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2008   }
2009   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2010 #endif // __SOFTFP__
2011 }
2012 
2013 
2014 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2015 
2016   const Register Rdisp = R0_tmp;
2017   const Register Rbumped_taken_count = R5_tmp;
2018 
2019   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2020 
2021   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2022                              InvocationCounter::counter_offset();
2023   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2024                               InvocationCounter::counter_offset();
2025   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2026 
2027   // Load up R0 with the branch displacement
2028   if (is_wide) {
2029     __ ldrsb(R0_tmp, at_bcp(1));
2030     __ ldrb(R1_tmp, at_bcp(2));
2031     __ ldrb(R2_tmp, at_bcp(3));
2032     __ ldrb(R3_tmp, at_bcp(4));
2033     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2034     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2035     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2036   } else {
2037     __ ldrsb(R0_tmp, at_bcp(1));
2038     __ ldrb(R1_tmp, at_bcp(2));
2039     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2040   }
2041 
2042   // Handle all the JSR stuff here, then exit.
2043   // It's much shorter and cleaner than intermingling with the
2044   // non-JSR normal-branch stuff occuring below.
2045   if (is_jsr) {
2046     // compute return address as bci in R1
2047     const Register Rret_addr = R1_tmp;
2048     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2049 
2050     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2051     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2052     __ sub(Rret_addr, Rret_addr, Rtemp);
2053 
2054     // Load the next target bytecode into R3_bytecode and advance Rbcp
2055     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2056 
2057     // Push return address
2058     __ push_i(Rret_addr);
2059     // jsr returns vtos
2060     __ dispatch_only_noverify(vtos);
2061     return;
2062   }
2063 
2064   // Normal (non-jsr) branch handling
2065 
2066   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2067   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2068 
2069   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2070   Label backedge_counter_overflow;
2071   Label profile_method;
2072   Label dispatch;
2073 
2074   if (UseLoopCounter) {
2075     // increment backedge counter for backward branches
2076     // Rdisp (R0): target offset
2077 
2078     const Register Rcnt = R2_tmp;
2079     const Register Rcounters = R1_tmp;
2080 
2081     // count only if backward branch
2082     __ tst(Rdisp, Rdisp);
2083     __ b(dispatch, pl);
2084 
2085     if (TieredCompilation) {
2086       Label no_mdo;
2087       int increment = InvocationCounter::count_increment;
2088       if (ProfileInterpreter) {
2089         // Are we profiling?
2090         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2091         __ cbz(Rtemp, no_mdo);
2092         // Increment the MDO backedge counter
2093         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2094                                                   in_bytes(InvocationCounter::counter_offset()));
2095         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2096         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2097                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2098         __ b(dispatch);
2099       }
2100       __ bind(no_mdo);
2101       // Increment backedge counter in MethodCounters*
2102       // Note Rbumped_taken_count is a callee saved registers for ARM32
2103       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2104                              Rdisp, R3_bytecode,
2105                              noreg);
2106       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2107       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2108                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2109     } else {
2110       // Increment backedge counter in MethodCounters*
2111       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2112                              Rdisp, R3_bytecode,
2113                              noreg);
2114       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2115       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2116       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2117 
2118       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2119       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2120       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2121 
2122       if (ProfileInterpreter) {
2123         // Test to see if we should create a method data oop
2124         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2125         __ ldr_s32(Rtemp, profile_limit);
2126         __ cmp_32(Rcnt, Rtemp);
2127         __ b(dispatch, lt);
2128 
2129         // if no method data exists, go to profile method
2130         __ test_method_data_pointer(R4_tmp, profile_method);
2131 
2132         if (UseOnStackReplacement) {
2133           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2134           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2135           __ ldr_s32(Rtemp, backward_branch_limit);
2136           __ cmp(Rbumped_taken_count, Rtemp);
2137           __ b(dispatch, lo);
2138 
2139           // When ProfileInterpreter is on, the backedge_count comes from the
2140           // MethodData*, which value does not get reset on the call to
2141           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2142           // routine while the method is being compiled, add a second test to make
2143           // sure the overflow function is called only once every overflow_frequency.
2144           const int overflow_frequency = 1024;
2145 
2146           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2147           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2148           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2149 
2150           __ b(backedge_counter_overflow, eq);
2151         }
2152       } else {
2153         if (UseOnStackReplacement) {
2154           // check for overflow against Rcnt, which is the sum of the counters
2155           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2156           __ ldr_s32(Rtemp, backward_branch_limit);
2157           __ cmp_32(Rcnt, Rtemp);
2158           __ b(backedge_counter_overflow, hs);
2159 
2160         }
2161       }
2162     }
2163     __ bind(dispatch);
2164   }
2165 
2166   if (!UseOnStackReplacement) {
2167     __ bind(backedge_counter_overflow);
2168   }
2169 
2170   // continue with the bytecode @ target
2171   __ dispatch_only(vtos);
2172 
2173   if (UseLoopCounter) {
2174     if (ProfileInterpreter) {
2175       // Out-of-line code to allocate method data oop.
2176       __ bind(profile_method);
2177 
2178       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2179       __ set_method_data_pointer_for_bcp();
2180       // reload next bytecode
2181       __ ldrb(R3_bytecode, Address(Rbcp));
2182       __ b(dispatch);
2183     }
2184 
2185     if (UseOnStackReplacement) {
2186       // invocation counter overflow
2187       __ bind(backedge_counter_overflow);
2188 
2189       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2190       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2191 
2192       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2193       const Register Rnmethod = R0;
2194 
2195       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2196 
2197       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2198 
2199       // nmethod may have been invalidated (VM may block upon call_VM return)
2200       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2201       __ cmp(R1_tmp, nmethod::in_use);
2202       __ b(dispatch, ne);
2203 
2204       // We have the address of an on stack replacement routine in Rnmethod,
2205       // We need to prepare to execute the OSR method. First we must
2206       // migrate the locals and monitors off of the stack.
2207 
2208       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2209 
2210       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2211 
2212       // R0 is OSR buffer
2213 
2214       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2215       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2216 
2217       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2218       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2219 
2220       __ jump(R1_tmp);
2221     }
2222   }
2223 }
2224 
2225 
2226 void TemplateTable::if_0cmp(Condition cc) {
2227   transition(itos, vtos);
2228   // assume branch is more often taken than not (loops use backward branches)
2229   Label not_taken;
2230   __ cmp_32(R0_tos, 0);
2231   __ b(not_taken, convNegCond(cc));
2232   branch(false, false);
2233   __ bind(not_taken);
2234   __ profile_not_taken_branch(R0_tmp);
2235 }
2236 
2237 
2238 void TemplateTable::if_icmp(Condition cc) {
2239   transition(itos, vtos);
2240   // assume branch is more often taken than not (loops use backward branches)
2241   Label not_taken;
2242   __ pop_i(R1_tmp);
2243   __ cmp_32(R1_tmp, R0_tos);
2244   __ b(not_taken, convNegCond(cc));
2245   branch(false, false);
2246   __ bind(not_taken);
2247   __ profile_not_taken_branch(R0_tmp);
2248 }
2249 
2250 
2251 void TemplateTable::if_nullcmp(Condition cc) {
2252   transition(atos, vtos);
2253   assert(cc == equal || cc == not_equal, "invalid condition");
2254 
2255   // assume branch is more often taken than not (loops use backward branches)
2256   Label not_taken;
2257   if (cc == equal) {
2258     __ cbnz(R0_tos, not_taken);
2259   } else {
2260     __ cbz(R0_tos, not_taken);
2261   }
2262   branch(false, false);
2263   __ bind(not_taken);
2264   __ profile_not_taken_branch(R0_tmp);
2265 }
2266 
2267 
2268 void TemplateTable::if_acmp(Condition cc) {
2269   transition(atos, vtos);
2270   // assume branch is more often taken than not (loops use backward branches)
2271   Label not_taken;
2272   __ pop_ptr(R1_tmp);
2273   __ cmpoop(R1_tmp, R0_tos);
2274   __ b(not_taken, convNegCond(cc));
2275   branch(false, false);
2276   __ bind(not_taken);
2277   __ profile_not_taken_branch(R0_tmp);
2278 }
2279 
2280 
2281 void TemplateTable::ret() {
2282   transition(vtos, vtos);
2283   const Register Rlocal_index = R1_tmp;
2284   const Register Rret_bci = Rtmp_save0; // R4/R19
2285 
2286   locals_index(Rlocal_index);
2287   Address local = load_iaddress(Rlocal_index, Rtemp);
2288   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2289   __ profile_ret(Rtmp_save1, Rret_bci);
2290   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2291   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2292   __ add(Rbcp, Rtemp, Rret_bci);
2293   __ dispatch_next(vtos);
2294 }
2295 
2296 
2297 void TemplateTable::wide_ret() {
2298   transition(vtos, vtos);
2299   const Register Rlocal_index = R1_tmp;
2300   const Register Rret_bci = Rtmp_save0; // R4/R19
2301 
2302   locals_index_wide(Rlocal_index);
2303   Address local = load_iaddress(Rlocal_index, Rtemp);
2304   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2305   __ profile_ret(Rtmp_save1, Rret_bci);
2306   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2307   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2308   __ add(Rbcp, Rtemp, Rret_bci);
2309   __ dispatch_next(vtos);
2310 }
2311 
2312 
2313 void TemplateTable::tableswitch() {
2314   transition(itos, vtos);
2315 
2316   const Register Rindex  = R0_tos;
2317   const Register Rtemp2  = R1_tmp;
2318   const Register Rabcp   = R2_tmp;  // aligned bcp
2319   const Register Rlow    = R3_tmp;
2320   const Register Rhigh   = R4_tmp;
2321   const Register Roffset = R5_tmp;
2322 
2323   // align bcp
2324   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2325   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2326 
2327   // load lo & hi
2328   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2329   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2330   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2331 
2332   // compare index with high bound
2333   __ cmp_32(Rhigh, Rindex);
2334 
2335 
2336   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2337   __ subs(Rindex, Rindex, Rlow, ge);
2338 
2339   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2340   // ("ge" status accumulated from cmp and subs instructions) then load
2341   // offset from table, otherwise load offset for default case
2342 
2343   if(ProfileInterpreter) {
2344     Label default_case, continue_execution;
2345 
2346     __ b(default_case, lt);
2347     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2348     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2349     __ b(continue_execution);
2350 
2351     __ bind(default_case);
2352     __ profile_switch_default(R0_tmp);
2353     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2354 
2355     __ bind(continue_execution);
2356   } else {
2357     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2358     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2359   }
2360 
2361   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2362 
2363   // load the next bytecode to R3_bytecode and advance Rbcp
2364   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2365   __ dispatch_only(vtos);
2366 
2367 }
2368 
2369 
2370 void TemplateTable::lookupswitch() {
2371   transition(itos, itos);
2372   __ stop("lookupswitch bytecode should have been rewritten");
2373 }
2374 
2375 
2376 void TemplateTable::fast_linearswitch() {
2377   transition(itos, vtos);
2378   Label loop, found, default_case, continue_execution;
2379 
2380   const Register Rkey     = R0_tos;
2381   const Register Rabcp    = R2_tmp;  // aligned bcp
2382   const Register Rdefault = R3_tmp;
2383   const Register Rcount   = R4_tmp;
2384   const Register Roffset  = R5_tmp;
2385 
2386   // bswap Rkey, so we can avoid bswapping the table entries
2387   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2388 
2389   // align bcp
2390   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2391   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2392 
2393   // load default & counter
2394   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2395   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2396 
2397   __ cmp_32(Rcount, 0);
2398   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2399   __ b(default_case, eq);
2400 
2401   // table search
2402   __ bind(loop);
2403   __ cmp_32(Rtemp, Rkey);
2404   __ b(found, eq);
2405   __ subs(Rcount, Rcount, 1);
2406   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2407   __ b(loop, ne);
2408 
2409   // default case
2410   __ bind(default_case);
2411   __ profile_switch_default(R0_tmp);
2412   __ mov(Roffset, Rdefault);
2413   __ b(continue_execution);
2414 
2415   // entry found -> get offset
2416   __ bind(found);
2417   // Rabcp is already incremented and points to the next entry
2418   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2419   if (ProfileInterpreter) {
2420     // Calculate index of the selected case.
2421     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2422 
2423     // align bcp
2424     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2425     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2426 
2427     // load number of cases
2428     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2429     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2430 
2431     // Selected index = <number of cases> - <current loop count>
2432     __ sub(R1_tmp, R2_tmp, Rcount);
2433     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2434   }
2435 
2436   // continue execution
2437   __ bind(continue_execution);
2438   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2439 
2440   // load the next bytecode to R3_bytecode and advance Rbcp
2441   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2442   __ dispatch_only(vtos);
2443 }
2444 
2445 
2446 void TemplateTable::fast_binaryswitch() {
2447   transition(itos, vtos);
2448   // Implementation using the following core algorithm:
2449   //
2450   // int binary_search(int key, LookupswitchPair* array, int n) {
2451   //   // Binary search according to "Methodik des Programmierens" by
2452   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2453   //   int i = 0;
2454   //   int j = n;
2455   //   while (i+1 < j) {
2456   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2457   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2458   //     // where a stands for the array and assuming that the (inexisting)
2459   //     // element a[n] is infinitely big.
2460   //     int h = (i + j) >> 1;
2461   //     // i < h < j
2462   //     if (key < array[h].fast_match()) {
2463   //       j = h;
2464   //     } else {
2465   //       i = h;
2466   //     }
2467   //   }
2468   //   // R: a[i] <= key < a[i+1] or Q
2469   //   // (i.e., if key is within array, i is the correct index)
2470   //   return i;
2471   // }
2472 
2473   // register allocation
2474   const Register key    = R0_tos;                // already set (tosca)
2475   const Register array  = R1_tmp;
2476   const Register i      = R2_tmp;
2477   const Register j      = R3_tmp;
2478   const Register h      = R4_tmp;
2479   const Register val    = R5_tmp;
2480   const Register temp1  = Rtemp;
2481   const Register temp2  = LR_tmp;
2482   const Register offset = R3_tmp;
2483 
2484   // set 'array' = aligned bcp + 2 ints
2485   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2486   __ align_reg(array, temp1, BytesPerInt);
2487 
2488   // initialize i & j
2489   __ mov(i, 0);                                  // i = 0;
2490   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2491   // Convert j into native byteordering
2492   __ byteswap_u32(j, temp1, temp2);
2493 
2494   // and start
2495   Label entry;
2496   __ b(entry);
2497 
2498   // binary search loop
2499   { Label loop;
2500     __ bind(loop);
2501     // int h = (i + j) >> 1;
2502     __ add(h, i, j);                             // h = i + j;
2503     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2504     // if (key < array[h].fast_match()) {
2505     //   j = h;
2506     // } else {
2507     //   i = h;
2508     // }
2509     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2510     // Convert array[h].match to native byte-ordering before compare
2511     __ byteswap_u32(val, temp1, temp2);
2512     __ cmp_32(key, val);
2513     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2514     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2515     // while (i+1 < j)
2516     __ bind(entry);
2517     __ add(temp1, i, 1);                             // i+1
2518     __ cmp(temp1, j);                                // i+1 < j
2519     __ b(loop, lt);
2520   }
2521 
2522   // end of binary search, result index is i (must check again!)
2523   Label default_case;
2524   // Convert array[i].match to native byte-ordering before compare
2525   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2526   __ byteswap_u32(val, temp1, temp2);
2527   __ cmp_32(key, val);
2528   __ b(default_case, ne);
2529 
2530   // entry found
2531   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2532   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2533   __ profile_switch_case(R0, i, R1, i);
2534   __ byteswap_u32(offset, temp1, temp2);
2535   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2536   __ dispatch_only(vtos);
2537 
2538   // default case
2539   __ bind(default_case);
2540   __ profile_switch_default(R0);
2541   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2542   __ byteswap_u32(offset, temp1, temp2);
2543   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2544   __ dispatch_only(vtos);
2545 }
2546 
2547 
2548 void TemplateTable::_return(TosState state) {
2549   transition(state, state);
2550   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2551 
2552   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2553     Label skip_register_finalizer;
2554     assert(state == vtos, "only valid state");
2555     __ ldr(R1, aaddress(0));
2556     __ load_klass(Rtemp, R1);
2557     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2558     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2559 
2560     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2561 
2562     __ bind(skip_register_finalizer);
2563   }
2564 
2565   // Narrow result if state is itos but result type is smaller.
2566   // Need to narrow in the return bytecode rather than in generate_return_entry
2567   // since compiled code callers expect the result to already be narrowed.
2568   if (state == itos) {
2569     __ narrow(R0_tos);
2570   }
2571   __ remove_activation(state, LR);
2572 
2573   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2574 
2575   // According to interpreter calling conventions, result is returned in R0/R1,
2576   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2577   // This conversion should be done after remove_activation, as it uses
2578   // push(state) & pop(state) to preserve return value.
2579   __ convert_tos_to_retval(state);
2580 
2581   __ ret();
2582 
2583   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2584   __ nop();
2585 }
2586 
2587 
2588 // ----------------------------------------------------------------------------
2589 // Volatile variables demand their effects be made known to all CPU's in
2590 // order.  Store buffers on most chips allow reads & writes to reorder; the
2591 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2592 // memory barrier (i.e., it's not sufficient that the interpreter does not
2593 // reorder volatile references, the hardware also must not reorder them).
2594 //
2595 // According to the new Java Memory Model (JMM):
2596 // (1) All volatiles are serialized wrt to each other.
2597 // ALSO reads & writes act as aquire & release, so:
2598 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2599 // the read float up to before the read.  It's OK for non-volatile memory refs
2600 // that happen before the volatile read to float down below it.
2601 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2602 // that happen BEFORE the write float down to after the write.  It's OK for
2603 // non-volatile memory refs that happen after the volatile write to float up
2604 // before it.
2605 //
2606 // We only put in barriers around volatile refs (they are expensive), not
2607 // _between_ memory refs (that would require us to track the flavor of the
2608 // previous memory refs).  Requirements (2) and (3) require some barriers
2609 // before volatile stores and after volatile loads.  These nearly cover
2610 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2611 // case is placed after volatile-stores although it could just as well go
2612 // before volatile-loads.
2613 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2614                                      Register tmp,
2615                                      bool preserve_flags,
2616                                      Register load_tgt) {
2617   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2618 }
2619 
2620 // Blows all volatile registers: R0-R3, Rtemp, LR.
2621 void TemplateTable::resolve_cache_and_index(int byte_no,
2622                                             Register Rcache,
2623                                             Register Rindex,
2624                                             size_t index_size) {
2625   assert_different_registers(Rcache, Rindex, Rtemp);
2626 
2627   Label resolved;
2628   Bytecodes::Code code = bytecode();
2629   switch (code) {
2630   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2631   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2632   default: break;
2633   }
2634 
2635   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2636   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
2637   __ cmp(Rtemp, code);  // have we resolved this bytecode?
2638   __ b(resolved, eq);
2639 
2640   // resolve first time through
2641   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2642   __ mov(R1, code);
2643   __ call_VM(noreg, entry, R1);
2644   // Update registers with resolved info
2645   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
2646   __ bind(resolved);
2647 }
2648 
2649 
2650 // The Rcache and Rindex registers must be set before call
2651 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
2652                                               Register Rindex,
2653                                               Register Roffset,
2654                                               Register Rflags,
2655                                               Register Robj,
2656                                               bool is_static = false) {
2657 
2658   assert_different_registers(Rcache, Rindex, Rtemp);
2659   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
2660 
2661   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2662 
2663   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2664 
2665   // Field offset
2666   __ ldr(Roffset, Address(Rtemp,
2667            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2668 
2669   // Flags
2670   __ ldr_u32(Rflags, Address(Rtemp,
2671            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2672 
2673   if (is_static) {
2674     __ ldr(Robj, Address(Rtemp,
2675              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2676     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2677     __ ldr(Robj, Address(Robj, mirror_offset));
2678     __ resolve_oop_handle(Robj);
2679   }
2680 }
2681 
2682 
2683 // Blows all volatile registers: R0-R3, Rtemp, LR.
2684 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2685                                                Register method,
2686                                                Register itable_index,
2687                                                Register flags,
2688                                                bool is_invokevirtual,
2689                                                bool is_invokevfinal/*unused*/,
2690                                                bool is_invokedynamic) {
2691   // setup registers
2692   const Register cache = R2_tmp;
2693   const Register index = R3_tmp;
2694   const Register temp_reg = Rtemp;
2695   assert_different_registers(cache, index, temp_reg);
2696   assert_different_registers(method, itable_index, temp_reg);
2697 
2698   // determine constant pool cache field offsets
2699   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2700   const int method_offset = in_bytes(
2701     ConstantPoolCache::base_offset() +
2702       ((byte_no == f2_byte)
2703        ? ConstantPoolCacheEntry::f2_offset()
2704        : ConstantPoolCacheEntry::f1_offset()
2705       )
2706     );
2707   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2708                                     ConstantPoolCacheEntry::flags_offset());
2709   // access constant pool cache fields
2710   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2711                                     ConstantPoolCacheEntry::f2_offset());
2712 
2713   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2714   resolve_cache_and_index(byte_no, cache, index, index_size);
2715     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
2716     __ ldr(method, Address(temp_reg, method_offset));
2717 
2718   if (itable_index != noreg) {
2719     __ ldr(itable_index, Address(temp_reg, index_offset));
2720   }
2721   __ ldr_u32(flags, Address(temp_reg, flags_offset));
2722 }
2723 
2724 
2725 // The registers cache and index expected to be set before call, and should not be Rtemp.
2726 // Blows volatile registers R0-R3, Rtemp, LR,
2727 // except cache and index registers which are preserved.
2728 void TemplateTable::jvmti_post_field_access(Register Rcache,
2729                                             Register Rindex,
2730                                             bool is_static,
2731                                             bool has_tos) {
2732   assert_different_registers(Rcache, Rindex, Rtemp);
2733 
2734   if (__ can_post_field_access()) {
2735     // Check to see if a field access watch has been set before we take
2736     // the time to call into the VM.
2737 
2738     Label Lcontinue;
2739 
2740     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
2741     __ cbz(Rtemp, Lcontinue);
2742 
2743     // cache entry pointer
2744     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2745     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
2746     if (is_static) {
2747       __ mov(R1, 0);        // NULL object reference
2748     } else {
2749       __ pop(atos);         // Get the object
2750       __ mov(R1, R0_tos);
2751       __ verify_oop(R1);
2752       __ push(atos);        // Restore stack state
2753     }
2754     // R1: object pointer or NULL
2755     // R2: cache entry pointer
2756     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2757                R1, R2);
2758     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
2759 
2760     __ bind(Lcontinue);
2761   }
2762 }
2763 
2764 
2765 void TemplateTable::pop_and_check_object(Register r) {
2766   __ pop_ptr(r);
2767   __ null_check(r, Rtemp);  // for field access must check obj.
2768   __ verify_oop(r);
2769 }
2770 
2771 
2772 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2773   transition(vtos, vtos);
2774 
2775   const Register Roffset  = R2_tmp;
2776   const Register Robj     = R3_tmp;
2777   const Register Rcache   = R4_tmp;
2778   const Register Rflagsav = Rtmp_save0;  // R4/R19
2779   const Register Rindex   = R5_tmp;
2780   const Register Rflags   = R5_tmp;
2781 
2782   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
2783   jvmti_post_field_access(Rcache, Rindex, is_static, false);
2784   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
2785 
2786   __ mov(Rflagsav, Rflags);
2787 
2788   if (!is_static) pop_and_check_object(Robj);
2789 
2790   Label Done, Lint, Ltable, shouldNotReachHere;
2791   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
2792 
2793   // compute type
2794   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
2795   // Make sure we don't need to mask flags after the above shift
2796   ConstantPoolCacheEntry::verify_tos_state_shift();
2797 
2798   // There are actually two versions of implementation of getfield/getstatic:
2799   //
2800   // 1) Table switch using add(PC,...) instruction (fast_version)
2801   // 2) Table switch using ldr(PC,...) instruction
2802   //
2803   // First version requires fixed size of code block for each case and
2804   // can not be used in RewriteBytecodes and VerifyOops
2805   // modes.
2806 
2807   // Size of fixed size code block for fast_version
2808   const int log_max_block_size = 3;
2809   const int max_block_size = 1 << log_max_block_size;
2810 
2811   // Decide if fast version is enabled
2812   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops;
2813 
2814   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
2815   // atos requires additional processing in slow version.
2816   bool atos_merged_with_itos = fast_version;
2817 
2818   assert(number_of_states == 10, "number of tos states should be equal to 9");
2819 
2820   __ cmp(Rflags, itos);
2821   if(atos_merged_with_itos) {
2822     __ cmp(Rflags, atos, ne);
2823   }
2824 
2825   // table switch by type
2826   if(fast_version) {
2827     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
2828   } else {
2829     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
2830   }
2831 
2832   // jump to itos/atos case
2833   __ b(Lint);
2834 
2835   // table with addresses for slow version
2836   if (fast_version) {
2837     // nothing to do
2838   } else  {
2839     __ bind(Ltable);
2840     __ emit_address(Lbtos);
2841     __ emit_address(Lztos);
2842     __ emit_address(Lctos);
2843     __ emit_address(Lstos);
2844     __ emit_address(Litos);
2845     __ emit_address(Lltos);
2846     __ emit_address(Lftos);
2847     __ emit_address(Ldtos);
2848     __ emit_address(Latos);
2849   }
2850 
2851 #ifdef ASSERT
2852   int seq = 0;
2853 #endif
2854   // btos
2855   {
2856     assert(btos == seq++, "btos has unexpected value");
2857     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
2858     __ bind(Lbtos);
2859     __ access_load_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2860     __ push(btos);
2861     // Rewrite bytecode to be faster
2862     if (!is_static && rc == may_rewrite) {
2863       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
2864     }
2865     __ b(Done);
2866   }
2867 
2868   // ztos (same as btos for getfield)
2869   {
2870     assert(ztos == seq++, "btos has unexpected value");
2871     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
2872     __ bind(Lztos);
2873     __ access_load_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2874     __ push(ztos);
2875     // Rewrite bytecode to be faster (use btos fast getfield)
2876     if (!is_static && rc == may_rewrite) {
2877       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
2878     }
2879     __ b(Done);
2880   }
2881 
2882   // ctos
2883   {
2884     assert(ctos == seq++, "ctos has unexpected value");
2885     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
2886     __ bind(Lctos);
2887     __ access_load_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2888     __ push(ctos);
2889     if (!is_static && rc == may_rewrite) {
2890       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
2891     }
2892     __ b(Done);
2893   }
2894 
2895   // stos
2896   {
2897     assert(stos == seq++, "stos has unexpected value");
2898     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
2899     __ bind(Lstos);
2900     __ access_load_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2901     __ push(stos);
2902     if (!is_static && rc == may_rewrite) {
2903       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
2904     }
2905     __ b(Done);
2906   }
2907 
2908   // itos
2909   {
2910     assert(itos == seq++, "itos has unexpected value");
2911     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
2912     __ bind(Litos);
2913     __ b(shouldNotReachHere);
2914   }
2915 
2916   // ltos
2917   {
2918     assert(ltos == seq++, "ltos has unexpected value");
2919     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
2920     __ bind(Lltos);
2921     __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg);
2922     __ push(ltos);
2923     if (!is_static && rc == may_rewrite) {
2924       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
2925     }
2926     __ b(Done);
2927   }
2928 
2929   // ftos
2930   {
2931     assert(ftos == seq++, "ftos has unexpected value");
2932     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
2933     __ bind(Lftos);
2934     // floats and ints are placed on stack in same way, so
2935     // we can use push(itos) to transfer value without using VFP
2936     __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2937     __ push(itos);
2938     if (!is_static && rc == may_rewrite) {
2939       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
2940     }
2941     __ b(Done);
2942   }
2943 
2944   // dtos
2945   {
2946     assert(dtos == seq++, "dtos has unexpected value");
2947     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
2948     __ bind(Ldtos);
2949     // doubles and longs are placed on stack in the same way, so
2950     // we can use push(ltos) to transfer value without using VFP
2951     __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg);
2952     __ push(ltos);
2953     if (!is_static && rc == may_rewrite) {
2954       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
2955     }
2956     __ b(Done);
2957   }
2958 
2959   // atos
2960   {
2961     assert(atos == seq++, "atos has unexpected value");
2962 
2963     // atos case for slow version on 32-bit ARM
2964     if(!atos_merged_with_itos) {
2965       __ bind(Latos);
2966       do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
2967       __ push(atos);
2968       // Rewrite bytecode to be faster
2969       if (!is_static && rc == may_rewrite) {
2970         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
2971       }
2972       __ b(Done);
2973     }
2974   }
2975 
2976   assert(vtos == seq++, "vtos has unexpected value");
2977 
2978   __ bind(shouldNotReachHere);
2979   __ should_not_reach_here();
2980 
2981   // itos and atos cases are frequent so it makes sense to move them out of table switch
2982   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
2983 
2984   __ bind(Lint);
2985   __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2986   __ push(itos);
2987   // Rewrite bytecode to be faster
2988   if (!is_static && rc == may_rewrite) {
2989     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
2990   }
2991 
2992   __ bind(Done);
2993 
2994   // Check for volatile field
2995   Label notVolatile;
2996   __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2997 
2998   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
2999 
3000   __ bind(notVolatile);
3001 }
3002 
3003 void TemplateTable::getfield(int byte_no) {
3004   getfield_or_static(byte_no, false);
3005 }
3006 
3007 void TemplateTable::nofast_getfield(int byte_no) {
3008   getfield_or_static(byte_no, false, may_not_rewrite);
3009 }
3010 
3011 void TemplateTable::getstatic(int byte_no) {
3012   getfield_or_static(byte_no, true);
3013 }
3014 
3015 
3016 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3017 // Blows volatile registers R0-R3, Rtemp, LR,
3018 // except cache and index registers which are preserved.
3019 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3020   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3021   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3022 
3023   if (__ can_post_field_modification()) {
3024     // Check to see if a field modification watch has been set before we take
3025     // the time to call into the VM.
3026     Label Lcontinue;
3027 
3028     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3029     __ cbz(Rtemp, Lcontinue);
3030 
3031     if (is_static) {
3032       // Life is simple.  Null out the object pointer.
3033       __ mov(R1, 0);
3034     } else {
3035       // Life is harder. The stack holds the value on top, followed by the object.
3036       // We don't know the size of the value, though; it could be one or two words
3037       // depending on its type. As a result, we must find the type to determine where
3038       // the object is.
3039 
3040       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3041       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3042 
3043       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3044       // Make sure we don't need to mask Rtemp after the above shift
3045       ConstantPoolCacheEntry::verify_tos_state_shift();
3046 
3047       __ cmp(Rtemp, ltos);
3048       __ cond_cmp(Rtemp, dtos, ne);
3049       // two word value (ltos/dtos)
3050       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3051 
3052       // one word value (not ltos, dtos)
3053       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3054     }
3055 
3056     // cache entry pointer
3057     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3058     __ add(R2, R2, in_bytes(cp_base_offset));
3059 
3060     // object (tos)
3061     __ mov(R3, Rstack_top);
3062 
3063     // R1: object pointer set up above (NULL if static)
3064     // R2: cache entry pointer
3065     // R3: value object on the stack
3066     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3067                R1, R2, R3);
3068     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3069 
3070     __ bind(Lcontinue);
3071   }
3072 }
3073 
3074 
3075 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3076   transition(vtos, vtos);
3077 
3078   const Register Roffset  = R2_tmp;
3079   const Register Robj     = R3_tmp;
3080   const Register Rcache   = R4_tmp;
3081   const Register Rflagsav = Rtmp_save0;  // R4/R19
3082   const Register Rindex   = R5_tmp;
3083   const Register Rflags   = R5_tmp;
3084 
3085   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3086   jvmti_post_field_mod(Rcache, Rindex, is_static);
3087   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3088 
3089   // Check for volatile field
3090   Label notVolatile;
3091   __ mov(Rflagsav, Rflags);
3092   __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3093 
3094   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3095 
3096   __ bind(notVolatile);
3097 
3098   Label Done, Lint, shouldNotReachHere;
3099   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3100 
3101   // compute type
3102   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3103   // Make sure we don't need to mask flags after the above shift
3104   ConstantPoolCacheEntry::verify_tos_state_shift();
3105 
3106   // There are actually two versions of implementation of putfield/putstatic:
3107   //
3108   // 32-bit ARM:
3109   // 1) Table switch using add(PC,...) instruction (fast_version)
3110   // 2) Table switch using ldr(PC,...) instruction
3111   //
3112   // First version requires fixed size of code block for each case and
3113   // can not be used in RewriteBytecodes and VerifyOops
3114   // modes.
3115 
3116   // Size of fixed size code block for fast_version (in instructions)
3117   const int log_max_block_size = 3;
3118   const int max_block_size = 1 << log_max_block_size;
3119 
3120   // Decide if fast version is enabled
3121   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops;
3122 
3123   assert(number_of_states == 10, "number of tos states should be equal to 9");
3124 
3125   // itos case is frequent and is moved outside table switch
3126   __ cmp(Rflags, itos);
3127 
3128   // table switch by type
3129   if (fast_version) {
3130     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3131   } else  {
3132     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3133   }
3134 
3135   // jump to itos case
3136   __ b(Lint);
3137 
3138   // table with addresses for slow version
3139   if (fast_version) {
3140     // nothing to do
3141   } else  {
3142     __ bind(Ltable);
3143     __ emit_address(Lbtos);
3144     __ emit_address(Lztos);
3145     __ emit_address(Lctos);
3146     __ emit_address(Lstos);
3147     __ emit_address(Litos);
3148     __ emit_address(Lltos);
3149     __ emit_address(Lftos);
3150     __ emit_address(Ldtos);
3151     __ emit_address(Latos);
3152   }
3153 
3154 #ifdef ASSERT
3155   int seq = 0;
3156 #endif
3157   // btos
3158   {
3159     assert(btos == seq++, "btos has unexpected value");
3160     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3161     __ bind(Lbtos);
3162     __ pop(btos);
3163     if (!is_static) pop_and_check_object(Robj);
3164     __ access_store_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3165     if (!is_static && rc == may_rewrite) {
3166       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3167     }
3168     __ b(Done);
3169   }
3170 
3171   // ztos
3172   {
3173     assert(ztos == seq++, "ztos has unexpected value");
3174     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3175     __ bind(Lztos);
3176     __ pop(ztos);
3177     if (!is_static) pop_and_check_object(Robj);
3178     __ access_store_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3179     if (!is_static && rc == may_rewrite) {
3180       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3181     }
3182     __ b(Done);
3183   }
3184 
3185   // ctos
3186   {
3187     assert(ctos == seq++, "ctos has unexpected value");
3188     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3189     __ bind(Lctos);
3190     __ pop(ctos);
3191     if (!is_static) pop_and_check_object(Robj);
3192     __ access_store_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3193     if (!is_static && rc == may_rewrite) {
3194       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3195     }
3196     __ b(Done);
3197   }
3198 
3199   // stos
3200   {
3201     assert(stos == seq++, "stos has unexpected value");
3202     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3203     __ bind(Lstos);
3204     __ pop(stos);
3205     if (!is_static) pop_and_check_object(Robj);
3206     __ access_store_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3207     if (!is_static && rc == may_rewrite) {
3208       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3209     }
3210     __ b(Done);
3211   }
3212 
3213   // itos
3214   {
3215     assert(itos == seq++, "itos has unexpected value");
3216     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3217     __ bind(Litos);
3218     __ b(shouldNotReachHere);
3219   }
3220 
3221   // ltos
3222   {
3223     assert(ltos == seq++, "ltos has unexpected value");
3224     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3225     __ bind(Lltos);
3226     __ pop(ltos);
3227     if (!is_static) pop_and_check_object(Robj);
3228     __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false);
3229     if (!is_static && rc == may_rewrite) {
3230       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3231     }
3232     __ b(Done);
3233   }
3234 
3235   // ftos
3236   {
3237     assert(ftos == seq++, "ftos has unexpected value");
3238     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3239     __ bind(Lftos);
3240     // floats and ints are placed on stack in the same way, so
3241     // we can use pop(itos) to transfer value without using VFP
3242     __ pop(itos);
3243     if (!is_static) pop_and_check_object(Robj);
3244     __ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3245     if (!is_static && rc == may_rewrite) {
3246       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3247     }
3248     __ b(Done);
3249   }
3250 
3251   // dtos
3252   {
3253     assert(dtos == seq++, "dtos has unexpected value");
3254     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3255     __ bind(Ldtos);
3256     // doubles and longs are placed on stack in the same way, so
3257     // we can use pop(ltos) to transfer value without using VFP
3258     __ pop(ltos);
3259     if (!is_static) pop_and_check_object(Robj);
3260     __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false);
3261     if (!is_static && rc == may_rewrite) {
3262       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3263     }
3264     __ b(Done);
3265   }
3266 
3267   // atos
3268   {
3269     assert(atos == seq++, "dtos has unexpected value");
3270     __ bind(Latos);
3271     __ pop(atos);
3272     if (!is_static) pop_and_check_object(Robj);
3273     // Store into the field
3274     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
3275     if (!is_static && rc == may_rewrite) {
3276       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3277     }
3278     __ b(Done);
3279   }
3280 
3281   __ bind(shouldNotReachHere);
3282   __ should_not_reach_here();
3283 
3284   // itos case is frequent and is moved outside table switch
3285   __ bind(Lint);
3286   __ pop(itos);
3287   if (!is_static) pop_and_check_object(Robj);
3288   __ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3289   if (!is_static && rc == may_rewrite) {
3290     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3291   }
3292 
3293   __ bind(Done);
3294 
3295   Label notVolatile2;
3296   if (is_static) {
3297     // Just check for volatile. Memory barrier for static final field
3298     // is handled by class initialization.
3299     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
3300     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3301     __ bind(notVolatile2);
3302   } else {
3303     // Check for volatile field and final field
3304     Label skipMembar;
3305 
3306     __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3307            1 << ConstantPoolCacheEntry::is_final_shift);
3308     __ b(skipMembar, eq);
3309 
3310     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
3311 
3312     // StoreLoad barrier after volatile field write
3313     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3314     __ b(skipMembar);
3315 
3316     // StoreStore barrier after final field write
3317     __ bind(notVolatile2);
3318     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3319 
3320     __ bind(skipMembar);
3321   }
3322 }
3323 
3324 void TemplateTable::putfield(int byte_no) {
3325   putfield_or_static(byte_no, false);
3326 }
3327 
3328 void TemplateTable::nofast_putfield(int byte_no) {
3329   putfield_or_static(byte_no, false, may_not_rewrite);
3330 }
3331 
3332 void TemplateTable::putstatic(int byte_no) {
3333   putfield_or_static(byte_no, true);
3334 }
3335 
3336 
3337 void TemplateTable::jvmti_post_fast_field_mod() {
3338   // This version of jvmti_post_fast_field_mod() is not used on ARM
3339   Unimplemented();
3340 }
3341 
3342 // Blows volatile registers R0-R3, Rtemp, LR,
3343 // but preserves tosca with the given state.
3344 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3345   if (__ can_post_field_modification()) {
3346     // Check to see if a field modification watch has been set before we take
3347     // the time to call into the VM.
3348     Label done;
3349 
3350     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3351     __ cbz(R2, done);
3352 
3353     __ pop_ptr(R3);               // copy the object pointer from tos
3354     __ verify_oop(R3);
3355     __ push_ptr(R3);              // put the object pointer back on tos
3356 
3357     __ push(state);               // save value on the stack
3358 
3359     // access constant pool cache entry
3360     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3361 
3362     __ mov(R1, R3);
3363     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3364     __ mov(R3, Rstack_top); // put tos addr into R3
3365 
3366     // R1: object pointer copied above
3367     // R2: cache entry pointer
3368     // R3: jvalue object on the stack
3369     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3370 
3371     __ pop(state);                // restore value
3372 
3373     __ bind(done);
3374   }
3375 }
3376 
3377 
3378 void TemplateTable::fast_storefield(TosState state) {
3379   transition(state, vtos);
3380 
3381   ByteSize base = ConstantPoolCache::base_offset();
3382 
3383   jvmti_post_fast_field_mod(state);
3384 
3385   const Register Rcache  = R2_tmp;
3386   const Register Rindex  = R3_tmp;
3387   const Register Roffset = R3_tmp;
3388   const Register Rflags  = Rtmp_save0; // R4/R19
3389   const Register Robj    = R5_tmp;
3390 
3391   // access constant pool cache
3392   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3393 
3394   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3395 
3396   // load flags to test volatile
3397   __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3398 
3399   // replace index with field offset from cache entry
3400   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3401 
3402   // Check for volatile store
3403   Label notVolatile;
3404   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3405 
3406   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3407 
3408   __ bind(notVolatile);
3409 
3410   // Get object from stack
3411   pop_and_check_object(Robj);
3412 
3413   Address addr = Address(Robj, Roffset);
3414   // access field
3415   switch (bytecode()) {
3416     case Bytecodes::_fast_zputfield:
3417       __ access_store_at(T_BOOLEAN, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3418       break;
3419     case Bytecodes::_fast_bputfield:
3420       __ access_store_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3421       break;
3422     case Bytecodes::_fast_sputfield:
3423       __ access_store_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3424       break;
3425     case Bytecodes::_fast_cputfield:
3426       __ access_store_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg,false);
3427       break;
3428     case Bytecodes::_fast_iputfield:
3429       __ access_store_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3430       break;
3431     case Bytecodes::_fast_lputfield:
3432       __ access_store_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3433       break;
3434     case Bytecodes::_fast_fputfield:
3435       __ access_store_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3436       break;
3437     case Bytecodes::_fast_dputfield:
3438       __ access_store_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3439       break;
3440     case Bytecodes::_fast_aputfield:
3441       do_oop_store(_masm, addr, R0_tos, Rtemp, R1_tmp, R2_tmp, false);
3442       break;
3443 
3444     default:
3445       ShouldNotReachHere();
3446   }
3447 
3448   Label notVolatile2;
3449   Label skipMembar;
3450   __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3451          1 << ConstantPoolCacheEntry::is_final_shift);
3452   __ b(skipMembar, eq);
3453 
3454   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
3455 
3456   // StoreLoad barrier after volatile field write
3457   volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3458   __ b(skipMembar);
3459 
3460   // StoreStore barrier after final field write
3461   __ bind(notVolatile2);
3462   volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3463 
3464   __ bind(skipMembar);
3465 }
3466 
3467 void TemplateTable::fast_accessfield(TosState state) {
3468   transition(atos, state);
3469 
3470   // do the JVMTI work here to avoid disturbing the register state below
3471   if (__ can_post_field_access()) {
3472     // Check to see if a field access watch has been set before we take
3473     // the time to call into the VM.
3474     Label done;
3475     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3476     __ cbz(R2, done);
3477     // access constant pool cache entry
3478     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3479     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3480     __ verify_oop(R0_tos);
3481     __ mov(R1, R0_tos);
3482     // R1: object pointer copied above
3483     // R2: cache entry pointer
3484     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3485     __ pop_ptr(R0_tos);   // restore object pointer
3486 
3487     __ bind(done);
3488   }
3489 
3490   const Register Robj    = R0_tos;
3491   const Register Rcache  = R2_tmp;
3492   const Register Rflags  = R2_tmp;
3493   const Register Rindex  = R3_tmp;
3494   const Register Roffset = R3_tmp;
3495 
3496   // access constant pool cache
3497   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3498   // replace index with field offset from cache entry
3499   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3500   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3501 
3502   // load flags to test volatile
3503   __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3504 
3505   __ verify_oop(Robj);
3506   __ null_check(Robj, Rtemp);
3507 
3508   Address addr = Address(Robj, Roffset);
3509   // access field
3510   switch (bytecode()) {
3511     case Bytecodes::_fast_bgetfield:
3512       __ access_load_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3513       break;
3514     case Bytecodes::_fast_sgetfield:
3515       __ access_load_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3516       break;
3517     case Bytecodes::_fast_cgetfield:
3518       __ access_load_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3519       break;
3520     case Bytecodes::_fast_igetfield:
3521       __ access_load_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3522       break;
3523     case Bytecodes::_fast_lgetfield:
3524       __ access_load_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3525       break;
3526     case Bytecodes::_fast_fgetfield:
3527       __ access_load_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3528       break;
3529     case Bytecodes::_fast_dgetfield:
3530       __ access_load_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3531       break;
3532     case Bytecodes::_fast_agetfield:
3533       do_oop_load(_masm, R0_tos, addr);
3534       __ verify_oop(R0_tos);
3535       break;
3536     default:
3537       ShouldNotReachHere();
3538   }
3539 
3540   // Check for volatile load
3541   Label notVolatile;
3542   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3543 
3544   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3545 
3546   __ bind(notVolatile);
3547 }
3548 
3549 
3550 void TemplateTable::fast_xaccess(TosState state) {
3551   transition(vtos, state);
3552 
3553   const Register Robj = R1_tmp;
3554   const Register Rcache = R2_tmp;
3555   const Register Rindex = R3_tmp;
3556   const Register Roffset = R3_tmp;
3557   const Register Rflags = R4_tmp;
3558   Label done;
3559 
3560   // get receiver
3561   __ ldr(Robj, aaddress(0));
3562 
3563   // access constant pool cache
3564   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
3565   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3566   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3567 
3568   // load flags to test volatile
3569   __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3570 
3571   // make sure exception is reported in correct bcp range (getfield is next instruction)
3572   __ add(Rbcp, Rbcp, 1);
3573   __ null_check(Robj, Rtemp);
3574   __ sub(Rbcp, Rbcp, 1);
3575 
3576 
3577   if (state == itos) {
3578     __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
3579   } else if (state == atos) {
3580     do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3581     __ verify_oop(R0_tos);
3582   } else if (state == ftos) {
3583 #ifdef __SOFTFP__
3584     __ ldr(R0_tos, Address(Robj, Roffset));
3585 #else
3586     __ access_load_at(T_FLOAT, IN_HEAP, Address(Robj, Roffset), noreg /* ftos */, noreg, noreg, noreg);
3587 #endif // __SOFTFP__
3588   } else {
3589     ShouldNotReachHere();
3590   }
3591 
3592   // Check for volatile load
3593   Label notVolatile;
3594   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3595 
3596   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3597 
3598   __ bind(notVolatile);
3599 
3600   __ bind(done);
3601 }
3602 
3603 
3604 
3605 //----------------------------------------------------------------------------------------------------
3606 // Calls
3607 
3608 void TemplateTable::count_calls(Register method, Register temp) {
3609   // implemented elsewhere
3610   ShouldNotReachHere();
3611 }
3612 
3613 
3614 void TemplateTable::prepare_invoke(int byte_no,
3615                                    Register method,  // linked method (or i-klass)
3616                                    Register index,   // itable index, MethodType, etc.
3617                                    Register recv,    // if caller wants to see it
3618                                    Register flags    // if caller wants to test it
3619                                    ) {
3620   // determine flags
3621   const Bytecodes::Code code = bytecode();
3622   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3623   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3624   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3625   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3626   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3627   const bool load_receiver       = (recv != noreg);
3628   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3629   assert(recv  == noreg || recv  == R2, "");
3630   assert(flags == noreg || flags == R3, "");
3631 
3632   // setup registers & access constant pool cache
3633   if (recv  == noreg)  recv  = R2;
3634   if (flags == noreg)  flags = R3;
3635   const Register temp = Rtemp;
3636   const Register ret_type = R1_tmp;
3637   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
3638 
3639   // save 'interpreter return address'
3640   __ save_bcp();
3641 
3642   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3643 
3644   // maybe push extra argument
3645   if (is_invokedynamic || is_invokehandle) {
3646     Label L_no_push;
3647     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
3648     __ mov(temp, index);
3649     __ load_resolved_reference_at_index(index, temp);
3650     __ verify_oop(index);
3651     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
3652     __ bind(L_no_push);
3653   }
3654 
3655   // load receiver if needed (after extra argument is pushed so parameter size is correct)
3656   if (load_receiver) {
3657     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
3658     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
3659     __ ldr(recv, recv_addr);
3660     __ verify_oop(recv);
3661   }
3662 
3663   // compute return type
3664   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
3665   // Make sure we don't need to mask flags after the above shift
3666   ConstantPoolCacheEntry::verify_tos_state_shift();
3667   // load return address
3668   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
3669     __ mov_slow(temp, table);
3670     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
3671   }
3672 }
3673 
3674 
3675 void TemplateTable::invokevirtual_helper(Register index,
3676                                          Register recv,
3677                                          Register flags) {
3678 
3679   const Register recv_klass = R2_tmp;
3680 
3681   assert_different_registers(index, recv, flags, Rtemp);
3682   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
3683 
3684   // Test for an invoke of a final method
3685   Label notFinal;
3686   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
3687 
3688   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
3689 
3690   // do the call - the index is actually the method to call
3691 
3692   // It's final, need a null check here!
3693   __ null_check(recv, Rtemp);
3694 
3695   // profile this call
3696   __ profile_final_call(R0_tmp);
3697 
3698   __ jump_from_interpreted(Rmethod);
3699 
3700   __ bind(notFinal);
3701 
3702   // get receiver klass
3703   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
3704   __ load_klass(recv_klass, recv);
3705 
3706   // profile this call
3707   __ profile_virtual_call(R0_tmp, recv_klass);
3708 
3709   // get target Method* & entry point
3710   const int base = in_bytes(Klass::vtable_start_offset());
3711   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
3712   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
3713   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
3714   __ jump_from_interpreted(Rmethod);
3715 }
3716 
3717 void TemplateTable::invokevirtual(int byte_no) {
3718   transition(vtos, vtos);
3719   assert(byte_no == f2_byte, "use this argument");
3720 
3721   const Register Rrecv  = R2_tmp;
3722   const Register Rflags = R3_tmp;
3723 
3724   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
3725 
3726   // Rmethod: index
3727   // Rrecv:   receiver
3728   // Rflags:  flags
3729   // LR:      return address
3730 
3731   invokevirtual_helper(Rmethod, Rrecv, Rflags);
3732 }
3733 
3734 
3735 void TemplateTable::invokespecial(int byte_no) {
3736   transition(vtos, vtos);
3737   assert(byte_no == f1_byte, "use this argument");
3738   const Register Rrecv  = R2_tmp;
3739   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
3740   __ verify_oop(Rrecv);
3741   __ null_check(Rrecv, Rtemp);
3742   // do the call
3743   __ profile_call(Rrecv);
3744   __ jump_from_interpreted(Rmethod);
3745 }
3746 
3747 
3748 void TemplateTable::invokestatic(int byte_no) {
3749   transition(vtos, vtos);
3750   assert(byte_no == f1_byte, "use this argument");
3751   prepare_invoke(byte_no, Rmethod);
3752   // do the call
3753   __ profile_call(R2_tmp);
3754   __ jump_from_interpreted(Rmethod);
3755 }
3756 
3757 
3758 void TemplateTable::fast_invokevfinal(int byte_no) {
3759   transition(vtos, vtos);
3760   assert(byte_no == f2_byte, "use this argument");
3761   __ stop("fast_invokevfinal is not used on ARM");
3762 }
3763 
3764 
3765 void TemplateTable::invokeinterface(int byte_no) {
3766   transition(vtos, vtos);
3767   assert(byte_no == f1_byte, "use this argument");
3768 
3769   const Register Ritable = R1_tmp;
3770   const Register Rrecv   = R2_tmp;
3771   const Register Rinterf = R5_tmp;
3772   const Register Rindex  = R4_tmp;
3773   const Register Rflags  = R3_tmp;
3774   const Register Rklass  = R2_tmp; // Note! Same register with Rrecv
3775 
3776   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
3777 
3778   // First check for Object case, then private interface method,
3779   // then regular interface method.
3780 
3781   // Special case of invokeinterface called for virtual method of
3782   // java.lang.Object.  See cpCache.cpp for details.
3783   Label notObjectMethod;
3784   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
3785   invokevirtual_helper(Rmethod, Rrecv, Rflags);
3786   __ bind(notObjectMethod);
3787 
3788   // Get receiver klass into Rklass - also a null check
3789   __ load_klass(Rklass, Rrecv);
3790 
3791   // Check for private method invocation - indicated by vfinal
3792   Label no_such_interface;
3793 
3794   Label notVFinal;
3795   __ tbz(Rflags, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
3796 
3797   Label subtype;
3798   __ check_klass_subtype(Rklass, Rinterf, R1_tmp, R3_tmp, noreg, subtype);
3799   // If we get here the typecheck failed
3800   __ b(no_such_interface);
3801   __ bind(subtype);
3802 
3803   // do the call
3804   __ profile_final_call(R0_tmp);
3805   __ jump_from_interpreted(Rmethod);
3806 
3807   __ bind(notVFinal);
3808 
3809   // Receiver subtype check against REFC.
3810   __ lookup_interface_method(// inputs: rec. class, interface
3811                              Rklass, Rinterf, noreg,
3812                              // outputs:  scan temp. reg1, scan temp. reg2
3813                              noreg, Ritable, Rtemp,
3814                              no_such_interface);
3815 
3816   // profile this call
3817   __ profile_virtual_call(R0_tmp, Rklass);
3818 
3819   // Get declaring interface class from method
3820   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
3821   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
3822   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
3823 
3824   // Get itable index from method
3825   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
3826   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
3827   __ neg(Rindex, Rtemp);
3828 
3829   __ lookup_interface_method(// inputs: rec. class, interface
3830                              Rklass, Rinterf, Rindex,
3831                              // outputs:  scan temp. reg1, scan temp. reg2
3832                              Rmethod, Ritable, Rtemp,
3833                              no_such_interface);
3834 
3835   // Rmethod: Method* to call
3836 
3837   // Check for abstract method error
3838   // Note: This should be done more efficiently via a throw_abstract_method_error
3839   //       interpreter entry point and a conditional jump to it in case of a null
3840   //       method.
3841   { Label L;
3842     __ cbnz(Rmethod, L);
3843     // throw exception
3844     // note: must restore interpreter registers to canonical
3845     //       state for exception handling to work correctly!
3846     __ restore_method();
3847     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3848     // the call_VM checks for exception, so we should never return here.
3849     __ should_not_reach_here();
3850     __ bind(L);
3851   }
3852 
3853   // do the call
3854   __ jump_from_interpreted(Rmethod);
3855 
3856   // throw exception
3857   __ bind(no_such_interface);
3858   __ restore_method();
3859   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3860   // the call_VM checks for exception, so we should never return here.
3861   __ should_not_reach_here();
3862 }
3863 
3864 void TemplateTable::invokehandle(int byte_no) {
3865   transition(vtos, vtos);
3866 
3867   const Register Rrecv  = R2_tmp;
3868   const Register Rmtype = R4_tmp;
3869   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
3870 
3871   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
3872   __ null_check(Rrecv, Rtemp);
3873 
3874   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
3875   // Rmethod: MH.invokeExact_MT method (from f2)
3876 
3877   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
3878 
3879   // do the call
3880   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
3881   __ mov(Rmethod, R5_method);
3882   __ jump_from_interpreted(Rmethod);
3883 }
3884 
3885 void TemplateTable::invokedynamic(int byte_no) {
3886   transition(vtos, vtos);
3887 
3888   const Register Rcallsite = R4_tmp;
3889   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
3890 
3891   prepare_invoke(byte_no, R5_method, Rcallsite);
3892 
3893   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
3894   // Rmethod:   MH.linkToCallSite method (from f2)
3895 
3896   // Note:  Rcallsite is already pushed by prepare_invoke
3897 
3898   if (ProfileInterpreter) {
3899     __ profile_call(R2_tmp);
3900   }
3901 
3902   // do the call
3903   __ mov(Rmethod, R5_method);
3904   __ jump_from_interpreted(Rmethod);
3905 }
3906 
3907 //----------------------------------------------------------------------------------------------------
3908 // Allocation
3909 
3910 void TemplateTable::_new() {
3911   transition(vtos, atos);
3912 
3913   const Register Robj   = R0_tos;
3914   const Register Rcpool = R1_tmp;
3915   const Register Rindex = R2_tmp;
3916   const Register Rtags  = R3_tmp;
3917   const Register Rsize  = R3_tmp;
3918 
3919   Register Rklass = R4_tmp;
3920   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
3921   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
3922 
3923   Label slow_case;
3924   Label done;
3925   Label initialize_header;
3926   Label initialize_object;  // including clearing the fields
3927 
3928   const bool allow_shared_alloc =
3929     Universe::heap()->supports_inline_contig_alloc();
3930 
3931   // Literals
3932   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
3933 
3934   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
3935   __ get_cpool_and_tags(Rcpool, Rtags);
3936 
3937   // Make sure the class we're about to instantiate has been resolved.
3938   // This is done before loading InstanceKlass to be consistent with the order
3939   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3940   const int tags_offset = Array<u1>::base_offset_in_bytes();
3941   __ add(Rtemp, Rtags, Rindex);
3942 
3943   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
3944 
3945   // use Rklass as a scratch
3946   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
3947 
3948   // get InstanceKlass
3949   __ cmp(Rtemp, JVM_CONSTANT_Class);
3950   __ b(slow_case, ne);
3951   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
3952 
3953   // make sure klass is initialized & doesn't have finalizer
3954   // make sure klass is fully initialized
3955   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
3956   __ cmp(Rtemp, InstanceKlass::fully_initialized);
3957   __ b(slow_case, ne);
3958 
3959   // get instance_size in InstanceKlass (scaled to a count of bytes)
3960   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
3961 
3962   // test to see if it has a finalizer or is malformed in some way
3963   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
3964   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
3965 
3966   // Allocate the instance:
3967   //  If TLAB is enabled:
3968   //    Try to allocate in the TLAB.
3969   //    If fails, go to the slow path.
3970   //  Else If inline contiguous allocations are enabled:
3971   //    Try to allocate in eden.
3972   //    If fails due to heap end, go to slow path.
3973   //
3974   //  If TLAB is enabled OR inline contiguous is enabled:
3975   //    Initialize the allocation.
3976   //    Exit.
3977   //
3978   //  Go to slow path.
3979   if (UseTLAB) {
3980     const Register Rtlab_top = R1_tmp;
3981     const Register Rtlab_end = R2_tmp;
3982     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
3983 
3984     __ tlab_allocate(Robj, Rtlab_top, Rtlab_end, Rsize, slow_case);
3985     if (ZeroTLAB) {
3986       // the fields have been already cleared
3987       __ b(initialize_header);
3988     } else {
3989       // initialize both the header and fields
3990       __ b(initialize_object);
3991     }
3992   } else {
3993     // Allocation in the shared Eden, if allowed.
3994     if (allow_shared_alloc) {
3995       const Register Rheap_top_addr = R2_tmp;
3996       const Register Rheap_top = R5_tmp;
3997       const Register Rheap_end = Rtemp;
3998       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
3999 
4000       __ eden_allocate(Robj, Rheap_top, Rheap_top_addr, Rheap_end, Rsize, slow_case);
4001     }
4002   }
4003 
4004   if (UseTLAB || allow_shared_alloc) {
4005     const Register Rzero0 = R1_tmp;
4006     const Register Rzero1 = R2_tmp;
4007     const Register Rzero_end = R5_tmp;
4008     const Register Rzero_cur = Rtemp;
4009     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4010 
4011     // The object is initialized before the header.  If the object size is
4012     // zero, go directly to the header initialization.
4013     __ bind(initialize_object);
4014     __ subs(Rsize, Rsize, sizeof(oopDesc));
4015     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4016     __ b(initialize_header, eq);
4017 
4018 #ifdef ASSERT
4019     // make sure Rsize is a multiple of 8
4020     Label L;
4021     __ tst(Rsize, 0x07);
4022     __ b(L, eq);
4023     __ stop("object size is not multiple of 8 - adjust this code");
4024     __ bind(L);
4025 #endif
4026 
4027     __ mov(Rzero0, 0);
4028     __ mov(Rzero1, 0);
4029     __ add(Rzero_end, Rzero_cur, Rsize);
4030 
4031     // initialize remaining object fields: Rsize was a multiple of 8
4032     { Label loop;
4033       // loop is unrolled 2 times
4034       __ bind(loop);
4035       // #1
4036       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4037       __ cmp(Rzero_cur, Rzero_end);
4038       // #2
4039       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4040       __ cmp(Rzero_cur, Rzero_end, ne);
4041       __ b(loop, ne);
4042     }
4043 
4044     // initialize object header only.
4045     __ bind(initialize_header);
4046     if (UseBiasedLocking) {
4047       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4048     } else {
4049       __ mov_slow(Rtemp, (intptr_t)markWord::prototype().value());
4050     }
4051     // mark
4052     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4053 
4054     // klass
4055     __ store_klass(Rklass, Robj); // blows Rklass:
4056     Rklass = noreg;
4057 
4058     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4059     if (DTraceAllocProbes) {
4060       // Trigger dtrace event for fastpath
4061       Label Lcontinue;
4062 
4063       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4064       __ cbz(Rtemp, Lcontinue);
4065 
4066       __ push(atos);
4067       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4068       __ pop(atos);
4069 
4070       __ bind(Lcontinue);
4071     }
4072 
4073     __ b(done);
4074   } else {
4075     // jump over literals
4076     __ b(slow_case);
4077   }
4078 
4079   if (allow_shared_alloc) {
4080     __ bind_literal(Lheap_top_addr);
4081   }
4082 
4083   // slow case
4084   __ bind(slow_case);
4085   __ get_constant_pool(Rcpool);
4086   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4087   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4088 
4089   // continue
4090   __ bind(done);
4091 
4092   // StoreStore barrier required after complete initialization
4093   // (headers + content zeroing), before the object may escape.
4094   __ membar(MacroAssembler::StoreStore, R1_tmp);
4095 }
4096 
4097 
4098 void TemplateTable::newarray() {
4099   transition(itos, atos);
4100   __ ldrb(R1, at_bcp(1));
4101   __ mov(R2, R0_tos);
4102   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4103   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4104 }
4105 
4106 
4107 void TemplateTable::anewarray() {
4108   transition(itos, atos);
4109   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4110   __ get_constant_pool(R1);
4111   __ mov(R3, R0_tos);
4112   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4113   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4114 }
4115 
4116 
4117 void TemplateTable::arraylength() {
4118   transition(atos, itos);
4119   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4120   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4121 }
4122 
4123 
4124 void TemplateTable::checkcast() {
4125   transition(atos, atos);
4126   Label done, is_null, quicked, resolved, throw_exception;
4127 
4128   const Register Robj = R0_tos;
4129   const Register Rcpool = R2_tmp;
4130   const Register Rtags = R3_tmp;
4131   const Register Rindex = R4_tmp;
4132   const Register Rsuper = R3_tmp;
4133   const Register Rsub   = R4_tmp;
4134   const Register Rsubtype_check_tmp1 = R1_tmp;
4135   const Register Rsubtype_check_tmp2 = LR_tmp;
4136 
4137   __ cbz(Robj, is_null);
4138 
4139   // Get cpool & tags index
4140   __ get_cpool_and_tags(Rcpool, Rtags);
4141   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4142 
4143   // See if bytecode has already been quicked
4144   __ add(Rtemp, Rtags, Rindex);
4145   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4146 
4147   __ cmp(Rtemp, JVM_CONSTANT_Class);
4148 
4149   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4150 
4151   __ b(quicked, eq);
4152 
4153   __ push(atos);
4154   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4155   // vm_result_2 has metadata result
4156   __ get_vm_result_2(Rsuper, Robj);
4157   __ pop_ptr(Robj);
4158   __ b(resolved);
4159 
4160   __ bind(throw_exception);
4161   // Come here on failure of subtype check
4162   __ profile_typecheck_failed(R1_tmp);
4163   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4164   __ b(Interpreter::_throw_ClassCastException_entry);
4165 
4166   // Get superklass in Rsuper and subklass in Rsub
4167   __ bind(quicked);
4168   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4169 
4170   __ bind(resolved);
4171   __ load_klass(Rsub, Robj);
4172 
4173   // Generate subtype check. Blows both tmps and Rtemp.
4174   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4175   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4176 
4177   // Come here on success
4178 
4179   // Collect counts on whether this check-cast sees NULLs a lot or not.
4180   if (ProfileInterpreter) {
4181     __ b(done);
4182     __ bind(is_null);
4183     __ profile_null_seen(R1_tmp);
4184   } else {
4185     __ bind(is_null);   // same as 'done'
4186   }
4187   __ bind(done);
4188 }
4189 
4190 
4191 void TemplateTable::instanceof() {
4192   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4193   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4194 
4195   transition(atos, itos);
4196   Label done, is_null, not_subtype, quicked, resolved;
4197 
4198   const Register Robj = R0_tos;
4199   const Register Rcpool = R2_tmp;
4200   const Register Rtags = R3_tmp;
4201   const Register Rindex = R4_tmp;
4202   const Register Rsuper = R3_tmp;
4203   const Register Rsub   = R4_tmp;
4204   const Register Rsubtype_check_tmp1 = R0_tmp;
4205   const Register Rsubtype_check_tmp2 = R1_tmp;
4206 
4207   __ cbz(Robj, is_null);
4208 
4209   __ load_klass(Rsub, Robj);
4210 
4211   // Get cpool & tags index
4212   __ get_cpool_and_tags(Rcpool, Rtags);
4213   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4214 
4215   // See if bytecode has already been quicked
4216   __ add(Rtemp, Rtags, Rindex);
4217   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4218   __ cmp(Rtemp, JVM_CONSTANT_Class);
4219 
4220   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4221 
4222   __ b(quicked, eq);
4223 
4224   __ push(atos);
4225   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4226   // vm_result_2 has metadata result
4227   __ get_vm_result_2(Rsuper, Robj);
4228   __ pop_ptr(Robj);
4229   __ b(resolved);
4230 
4231   // Get superklass in Rsuper and subklass in Rsub
4232   __ bind(quicked);
4233   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4234 
4235   __ bind(resolved);
4236   __ load_klass(Rsub, Robj);
4237 
4238   // Generate subtype check. Blows both tmps and Rtemp.
4239   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4240 
4241   // Come here on success
4242   __ mov(R0_tos, 1);
4243   __ b(done);
4244 
4245   __ bind(not_subtype);
4246   // Come here on failure
4247   __ profile_typecheck_failed(R1_tmp);
4248   __ mov(R0_tos, 0);
4249 
4250   // Collect counts on whether this test sees NULLs a lot or not.
4251   if (ProfileInterpreter) {
4252     __ b(done);
4253     __ bind(is_null);
4254     __ profile_null_seen(R1_tmp);
4255   } else {
4256     __ bind(is_null);   // same as 'done'
4257   }
4258   __ bind(done);
4259 }
4260 
4261 
4262 //----------------------------------------------------------------------------------------------------
4263 // Breakpoints
4264 void TemplateTable::_breakpoint() {
4265 
4266   // Note: We get here even if we are single stepping..
4267   // jbug inists on setting breakpoints at every bytecode
4268   // even if we are in single step mode.
4269 
4270   transition(vtos, vtos);
4271 
4272   // get the unpatched byte code
4273   __ mov(R1, Rmethod);
4274   __ mov(R2, Rbcp);
4275   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4276   __ mov(Rtmp_save0, R0);
4277 
4278   // post the breakpoint event
4279   __ mov(R1, Rmethod);
4280   __ mov(R2, Rbcp);
4281   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4282 
4283   // complete the execution of original bytecode
4284   __ mov(R3_bytecode, Rtmp_save0);
4285   __ dispatch_only_normal(vtos);
4286 }
4287 
4288 
4289 //----------------------------------------------------------------------------------------------------
4290 // Exceptions
4291 
4292 void TemplateTable::athrow() {
4293   transition(atos, vtos);
4294   __ mov(Rexception_obj, R0_tos);
4295   __ null_check(Rexception_obj, Rtemp);
4296   __ b(Interpreter::throw_exception_entry());
4297 }
4298 
4299 
4300 //----------------------------------------------------------------------------------------------------
4301 // Synchronization
4302 //
4303 // Note: monitorenter & exit are symmetric routines; which is reflected
4304 //       in the assembly code structure as well
4305 //
4306 // Stack layout:
4307 //
4308 // [expressions  ] <--- Rstack_top        = expression stack top
4309 // ..
4310 // [expressions  ]
4311 // [monitor entry] <--- monitor block top = expression stack bot
4312 // ..
4313 // [monitor entry]
4314 // [frame data   ] <--- monitor block bot
4315 // ...
4316 // [saved FP     ] <--- FP
4317 
4318 
4319 void TemplateTable::monitorenter() {
4320   transition(atos, vtos);
4321 
4322   const Register Robj = R0_tos;
4323   const Register Rentry = R1_tmp;
4324 
4325   // check for NULL object
4326   __ null_check(Robj, Rtemp);
4327 
4328   __ resolve(IS_NOT_NULL, Robj);
4329 
4330   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4331   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4332   Label allocate_monitor, allocated;
4333 
4334   // initialize entry pointer
4335   __ mov(Rentry, 0);                             // points to free slot or NULL
4336 
4337   // find a free slot in the monitor block (result in Rentry)
4338   { Label loop, exit;
4339     const Register Rcur = R2_tmp;
4340     const Register Rcur_obj = Rtemp;
4341     const Register Rbottom = R3_tmp;
4342     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4343 
4344     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4345                                  // points to current entry, starting with top-most entry
4346     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4347                                  // points to word before bottom of monitor block
4348 
4349     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4350     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4351                                                  // prefetch monitor's object for the first iteration
4352     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4353 
4354     __ bind(loop);
4355     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4356     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4357 
4358     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4359     __ b(exit, eq);                              // if same object then stop searching
4360 
4361     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4362 
4363     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4364     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4365                                                  // prefetch monitor's object for the next iteration
4366     __ b(loop, ne);                              // if not at bottom then check this entry
4367     __ bind(exit);
4368   }
4369 
4370   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4371 
4372   __ bind(allocate_monitor);
4373 
4374   // allocate one if there's no free slot
4375   { Label loop;
4376     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4377 
4378     // 1. compute new pointers
4379 
4380 
4381     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4382                                                  // old monitor block top / expression stack bottom
4383 
4384     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4385     __ check_stack_top_on_expansion();
4386 
4387     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4388 
4389     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4390 
4391     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4392                                                  // set new monitor block top
4393 
4394     // 2. move expression stack contents
4395 
4396     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4397     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4398     __ b(allocated, eq);
4399 
4400     __ bind(loop);
4401     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4402                                                             // and advance to next word
4403     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
4404     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
4405     __ b(loop, ne);                                         // if not at bottom then copy next word
4406   }
4407 
4408   // call run-time routine
4409 
4410   // Rentry: points to monitor entry
4411   __ bind(allocated);
4412 
4413   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4414   // The object has already been poped from the stack, so the expression stack looks correct.
4415   __ add(Rbcp, Rbcp, 1);
4416 
4417   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
4418   __ lock_object(Rentry);
4419 
4420   // check to make sure this monitor doesn't cause stack overflow after locking
4421   __ save_bcp();  // in case of exception
4422   __ arm_stack_overflow_check(0, Rtemp);
4423 
4424   // The bcp has already been incremented. Just need to dispatch to next instruction.
4425   __ dispatch_next(vtos);
4426 }
4427 
4428 
4429 void TemplateTable::monitorexit() {
4430   transition(atos, vtos);
4431 
4432   const Register Robj = R0_tos;
4433   const Register Rcur = R1_tmp;
4434   const Register Rbottom = R2_tmp;
4435   const Register Rcur_obj = Rtemp;
4436 
4437   // check for NULL object
4438   __ null_check(Robj, Rtemp);
4439 
4440   __ resolve(IS_NOT_NULL, Robj);
4441 
4442   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4443   Label found, throw_exception;
4444 
4445   // find matching slot
4446   { Label loop;
4447     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
4448 
4449     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4450                                  // points to current entry, starting with top-most entry
4451     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4452                                  // points to word before bottom of monitor block
4453 
4454     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4455     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4456                                                  // prefetch monitor's object for the first iteration
4457     __ b(throw_exception, eq);                   // throw exception if there are now monitors
4458 
4459     __ bind(loop);
4460     // check if current entry is for same object
4461     __ cmp(Rcur_obj, Robj);
4462     __ b(found, eq);                             // if same object then stop searching
4463     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4464     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4465     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4466     __ b (loop, ne);                             // if not at bottom then check this entry
4467   }
4468 
4469   // error handling. Unlocking was not block-structured
4470   __ bind(throw_exception);
4471   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4472   __ should_not_reach_here();
4473 
4474   // call run-time routine
4475   // Rcur: points to monitor entry
4476   __ bind(found);
4477   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
4478   __ unlock_object(Rcur);
4479   __ pop_ptr(Robj);                              // discard object
4480 }
4481 
4482 
4483 //----------------------------------------------------------------------------------------------------
4484 // Wide instructions
4485 
4486 void TemplateTable::wide() {
4487   transition(vtos, vtos);
4488   __ ldrb(R3_bytecode, at_bcp(1));
4489 
4490   InlinedAddress Ltable((address)Interpreter::_wentry_point);
4491   __ ldr_literal(Rtemp, Ltable);
4492   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
4493 
4494   __ nop(); // to avoid filling CPU pipeline with invalid instructions
4495   __ nop();
4496   __ bind_literal(Ltable);
4497 }
4498 
4499 
4500 //----------------------------------------------------------------------------------------------------
4501 // Multi arrays
4502 
4503 void TemplateTable::multianewarray() {
4504   transition(vtos, atos);
4505   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
4506 
4507   // last dim is on top of stack; we want address of first one:
4508   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
4509   // the latter wordSize to point to the beginning of the array.
4510   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4511   __ sub(R1, Rtemp, wordSize);
4512 
4513   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
4514   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4515   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4516 }