< prev index next >

src/hotspot/cpu/aarch64/templateTable_aarch64.cpp

Print this page




 130   case TemplateTable::equal        : return Assembler::NE;
 131   case TemplateTable::not_equal    : return Assembler::EQ;
 132   case TemplateTable::less         : return Assembler::GE;
 133   case TemplateTable::less_equal   : return Assembler::GT;
 134   case TemplateTable::greater      : return Assembler::LE;
 135   case TemplateTable::greater_equal: return Assembler::LT;
 136   }
 137   ShouldNotReachHere();
 138   return Assembler::EQ;
 139 }
 140 
 141 
 142 // Miscelaneous helper routines
 143 // Store an oop (or NULL) at the Address described by obj.
 144 // If val == noreg this means store a NULL
 145 static void do_oop_store(InterpreterMacroAssembler* _masm,
 146                          Address dst,
 147                          Register val,
 148                          DecoratorSet decorators) {
 149   assert(val == noreg || val == r0, "parameter is just for looks");
 150   __ store_heap_oop(dst, val, r10, r1, decorators);
 151 }
 152 
 153 static void do_oop_load(InterpreterMacroAssembler* _masm,
 154                         Address src,
 155                         Register dst,
 156                         DecoratorSet decorators) {
 157   __ load_heap_oop(dst, src, r10, r1, decorators);
 158 }
 159 
 160 Address TemplateTable::at_bcp(int offset) {
 161   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 162   return Address(rbcp, offset);
 163 }
 164 
 165 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 166                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 167                                    int byte_no)
 168 {
 169   if (!RewriteBytecodes)  return;
 170   Label L_patch_done;
 171 
 172   switch (bc) {

 173   case Bytecodes::_fast_aputfield:
 174   case Bytecodes::_fast_bputfield:
 175   case Bytecodes::_fast_zputfield:
 176   case Bytecodes::_fast_cputfield:
 177   case Bytecodes::_fast_dputfield:
 178   case Bytecodes::_fast_fputfield:
 179   case Bytecodes::_fast_iputfield:
 180   case Bytecodes::_fast_lputfield:
 181   case Bytecodes::_fast_sputfield:
 182     {
 183       // We skip bytecode quickening for putfield instructions when
 184       // the put_code written to the constant pool cache is zero.
 185       // This is required so that every execution of this instruction
 186       // calls out to InterpreterRuntime::resolve_get_put to do
 187       // additional, required work.
 188       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 189       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 190       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 191       __ movw(bc_reg, bc);
 192       __ cbzw(temp_reg, L_patch_done);  // don't patch


 728 }
 729 
 730 void TemplateTable::index_check(Register array, Register index)
 731 {
 732   // destroys r1, rscratch1
 733   // check array
 734   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 735   // sign extend index for use by indexed load
 736   // __ movl2ptr(index, index);
 737   // check index
 738   Register length = rscratch1;
 739   __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
 740   __ cmpw(index, length);
 741   if (index != r1) {
 742     // ??? convention: move aberrant index into r1 for exception message
 743     assert(r1 != array, "different registers");
 744     __ mov(r1, index);
 745   }
 746   Label ok;
 747   __ br(Assembler::LO, ok);
 748     // ??? convention: move array into r3 for exception message
 749   __ mov(r3, array);
 750   __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
 751   __ br(rscratch1);
 752   __ bind(ok);
 753 }
 754 
 755 void TemplateTable::iaload()
 756 {
 757   transition(itos, itos);
 758   __ mov(r1, r0);
 759   __ pop_ptr(r0);
 760   // r0: array
 761   // r1: index
 762   index_check(r0, r1); // leaves index in r1, kills rscratch1
 763   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
 764   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 765 }
 766 
 767 void TemplateTable::laload()
 768 {
 769   transition(itos, ltos);
 770   __ mov(r1, r0);
 771   __ pop_ptr(r0);


 791 void TemplateTable::daload()
 792 {
 793   transition(itos, dtos);
 794   __ mov(r1, r0);
 795   __ pop_ptr(r0);
 796   // r0: array
 797   // r1: index
 798   index_check(r0, r1); // leaves index in r1, kills rscratch1
 799   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
 800   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 801 }
 802 
 803 void TemplateTable::aaload()
 804 {
 805   transition(itos, atos);
 806   __ mov(r1, r0);
 807   __ pop_ptr(r0);
 808   // r0: array
 809   // r1: index
 810   index_check(r0, r1); // leaves index in r1, kills rscratch1
 811   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 812   do_oop_load(_masm,
 813               Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
 814               r0,
 815               IS_ARRAY);










 816 }
 817 
 818 void TemplateTable::baload()
 819 {
 820   transition(itos, itos);
 821   __ mov(r1, r0);
 822   __ pop_ptr(r0);
 823   // r0: array
 824   // r1: index
 825   index_check(r0, r1); // leaves index in r1, kills rscratch1
 826   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
 827   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
 828 }
 829 
 830 void TemplateTable::caload()
 831 {
 832   transition(itos, itos);
 833   __ mov(r1, r0);
 834   __ pop_ptr(r0);
 835   // r0: array


1085   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
1086   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg);
1087 }
1088 
1089 void TemplateTable::dastore() {
1090   transition(dtos, vtos);
1091   __ pop_i(r1);
1092   __ pop_ptr(r3);
1093   // v0: value
1094   // r1:  index
1095   // r3:  array
1096   index_check(r3, r1); // prefer index in r1
1097   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1098   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
1099 }
1100 
1101 void TemplateTable::aastore() {
1102   Label is_null, ok_is_subtype, done;
1103   transition(vtos, vtos);
1104   // stack: ..., array, index, value
1105   __ ldr(r0, at_tos());    // value
1106   __ ldr(r2, at_tos_p1()); // index
1107   __ ldr(r3, at_tos_p2()); // array
1108 
1109   Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1110 
1111   index_check(r3, r2);     // kills r1
1112   __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);


1113 
1114   // do array store check - check for NULL value first
1115   __ cbz(r0, is_null);
1116 





1117   // Move subklass into r1
1118   __ load_klass(r1, r0);

1119   // Move superklass into r0
1120   __ load_klass(r0, r3);
1121   __ ldr(r0, Address(r0,
1122                      ObjArrayKlass::element_klass_offset()));
1123   // Compress array + index*oopSize + 12 into a single register.  Frees r2.
1124 
1125   // Generate subtype check.  Blows r2, r5
1126   // Superklass in r0.  Subklass in r1.

1127   __ gen_subtype_check(r1, ok_is_subtype);
1128 
1129   // Come here on failure
1130   // object is at TOS
1131   __ b(Interpreter::_throw_ArrayStoreException_entry);
1132 

1133   // Come here on success
1134   __ bind(ok_is_subtype);
1135 

1136   // Get the value we will store
1137   __ ldr(r0, at_tos());
1138   // Now store using the appropriate barrier
1139   do_oop_store(_masm, element_address, r0, IS_ARRAY);
1140   __ b(done);
1141 
1142   // Have a NULL in r0, r3=array, r2=index.  Store NULL at ary[idx]
1143   __ bind(is_null);
1144   __ profile_null_seen(r2);
1145 













1146   // Store a NULL
1147   do_oop_store(_masm, element_address, noreg, IS_ARRAY);








































1148 
1149   // Pop stack arguments
1150   __ bind(done);
1151   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1152 }
1153 
1154 void TemplateTable::bastore()
1155 {
1156   transition(itos, vtos);
1157   __ pop_i(r1);
1158   __ pop_ptr(r3);
1159   // r0: value
1160   // r1: index
1161   // r3: array
1162   index_check(r3, r1); // prefer index in r1
1163 
1164   // Need to check whether array is boolean or byte
1165   // since both types share the bastore bytecode.
1166   __ load_klass(r2, r3);
1167   __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));


2004   __ br(j_not(cc), not_taken);
2005   branch(false, false);
2006   __ bind(not_taken);
2007   __ profile_not_taken_branch(r0);
2008 }
2009 
2010 void TemplateTable::if_nullcmp(Condition cc)
2011 {
2012   transition(atos, vtos);
2013   // assume branch is more often taken than not (loops use backward branches)
2014   Label not_taken;
2015   if (cc == equal)
2016     __ cbnz(r0, not_taken);
2017   else
2018     __ cbz(r0, not_taken);
2019   branch(false, false);
2020   __ bind(not_taken);
2021   __ profile_not_taken_branch(r0);
2022 }
2023 
2024 void TemplateTable::if_acmp(Condition cc)
2025 {
2026   transition(atos, vtos);
2027   // assume branch is more often taken than not (loops use backward branches)
2028   Label not_taken;
2029   __ pop_ptr(r1);




































2030   __ cmpoop(r1, r0);
2031   __ br(j_not(cc), not_taken);

2032   branch(false, false);
2033   __ bind(not_taken);
2034   __ profile_not_taken_branch(r0);
2035 }
2036 










2037 void TemplateTable::ret() {
2038   transition(vtos, vtos);
2039   // We might be moving to a safepoint.  The thread which calls
2040   // Interpreter::notice_safepoints() will effectively flush its cache
2041   // when it makes a system call, but we need to do something to
2042   // ensure that we see the changed dispatch table.
2043   __ membar(MacroAssembler::LoadLoad);
2044 
2045   locals_index(r1);
2046   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2047   __ profile_ret(r1, r2);
2048   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2049   __ lea(rbcp, Address(rbcp, r1));
2050   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2051   __ dispatch_next(vtos, 0, /*generate_poll*/true);
2052 }
2053 
2054 void TemplateTable::wide_ret() {
2055   transition(vtos, vtos);
2056   locals_index_wide(r1);


2266     __ tbz(r3, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2267 
2268     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2269 
2270     __ bind(skip_register_finalizer);
2271   }
2272 
2273   // Issue a StoreStore barrier after all stores but before return
2274   // from any constructor for any class with a final field.  We don't
2275   // know if this is a finalizer, so we always do so.
2276   if (_desc->bytecode() == Bytecodes::_return)
2277     __ membar(MacroAssembler::StoreStore);
2278 
2279   // Narrow result if state is itos but result type is smaller.
2280   // Need to narrow in the return bytecode rather than in generate_return_entry
2281   // since compiled code callers expect the result to already be narrowed.
2282   if (state == itos) {
2283     __ narrow(r0);
2284   }
2285 
2286   __ remove_activation(state);
2287   __ ret(lr);
2288 }
2289 
2290 // ----------------------------------------------------------------------------
2291 // Volatile variables demand their effects be made known to all CPU's
2292 // in order.  Store buffers on most chips allow reads & writes to
2293 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2294 // without some kind of memory barrier (i.e., it's not sufficient that
2295 // the interpreter does not reorder volatile references, the hardware
2296 // also must not reorder them).
2297 //
2298 // According to the new Java Memory Model (JMM):
2299 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2300 //     writes act as aquire & release, so:
2301 // (2) A read cannot let unrelated NON-volatile memory refs that
2302 //     happen after the read float up to before the read.  It's OK for
2303 //     non-volatile memory refs that happen before the volatile read to
2304 //     float down below it.
2305 // (3) Similar a volatile write cannot let unrelated NON-volatile
2306 //     memory refs that happen BEFORE the write float down to after the


2480   // 8179954: We need to make sure that the code generated for
2481   // volatile accesses forms a sequentially-consistent set of
2482   // operations when combined with STLR and LDAR.  Without a leading
2483   // membar it's possible for a simple Dekker test to fail if loads
2484   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
2485   // the stores in one method and we interpret the loads in another.
2486   if (! UseBarriersForVolatile) {
2487     Label notVolatile;
2488     __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2489     __ membar(MacroAssembler::AnyAny);
2490     __ bind(notVolatile);
2491   }
2492 
2493   const Address field(obj, off);
2494 
2495   Label Done, notByte, notBool, notInt, notShort, notChar,
2496               notLong, notFloat, notObj, notDouble;
2497 
2498   // x86 uses a shift and mask or wings it with a shift plus assert
2499   // the mask is not needed. aarch64 just uses bitfield extract
2500   __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift,
2501            ConstantPoolCacheEntry::tos_state_bits);
2502 
2503   assert(btos == 0, "change code, btos != 0");
2504   __ cbnz(flags, notByte);
2505 
2506   // Don't rewrite getstatic, only getfield
2507   if (is_static) rc = may_not_rewrite;
2508 
2509   // btos
2510   __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2511   __ push(btos);
2512   // Rewrite bytecode to be faster
2513   if (rc == may_rewrite) {
2514     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2515   }
2516   __ b(Done);
2517 
2518   __ bind(notByte);
2519   __ cmp(flags, (u1)ztos);
2520   __ br(Assembler::NE, notBool);
2521 
2522   // ztos (same code as btos)
2523   __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2524   __ push(ztos);
2525   // Rewrite bytecode to be faster
2526   if (rc == may_rewrite) {
2527     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2528     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2529   }
2530   __ b(Done);
2531 
2532   __ bind(notBool);
2533   __ cmp(flags, (u1)atos);
2534   __ br(Assembler::NE, notObj);
2535   // atos
2536   do_oop_load(_masm, field, r0, IN_HEAP);
2537   __ push(atos);
2538   if (rc == may_rewrite) {
2539     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);

























































2540   }
2541   __ b(Done);
2542 
2543   __ bind(notObj);
2544   __ cmp(flags, (u1)itos);
2545   __ br(Assembler::NE, notInt);
2546   // itos
2547   __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2548   __ push(itos);
2549   // Rewrite bytecode to be faster
2550   if (rc == may_rewrite) {
2551     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2552   }
2553   __ b(Done);
2554 
2555   __ bind(notInt);
2556   __ cmp(flags, (u1)ctos);
2557   __ br(Assembler::NE, notChar);
2558   // ctos
2559   __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2560   __ push(ctos);
2561   // Rewrite bytecode to be faster


2691     // c_rarg1: object pointer set up above (NULL if static)
2692     // c_rarg2: cache entry pointer
2693     // c_rarg3: jvalue object on the stack
2694     __ call_VM(noreg,
2695                CAST_FROM_FN_PTR(address,
2696                                 InterpreterRuntime::post_field_modification),
2697                c_rarg1, c_rarg2, c_rarg3);
2698     __ get_cache_and_index_at_bcp(cache, index, 1);
2699     __ bind(L1);
2700   }
2701 }
2702 
2703 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2704   transition(vtos, vtos);
2705 
2706   const Register cache = r2;
2707   const Register index = r3;
2708   const Register obj   = r2;
2709   const Register off   = r19;
2710   const Register flags = r0;

2711   const Register bc    = r4;
2712 
2713   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2714   jvmti_post_field_mod(cache, index, is_static);
2715   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2716 
2717   Label Done;
2718   __ mov(r5, flags);
2719 
2720   {
2721     Label notVolatile;
2722     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2723     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2724     __ bind(notVolatile);
2725   }
2726 
2727   // field address
2728   const Address field(obj, off);
2729 
2730   Label notByte, notBool, notInt, notShort, notChar,
2731         notLong, notFloat, notObj, notDouble;
2732 


2733   // x86 uses a shift and mask or wings it with a shift plus assert
2734   // the mask is not needed. aarch64 just uses bitfield extract
2735   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2736 
2737   assert(btos == 0, "change code, btos != 0");
2738   __ cbnz(flags, notByte);
2739 
2740   // Don't rewrite putstatic, only putfield
2741   if (is_static) rc = may_not_rewrite;
2742 
2743   // btos
2744   {
2745     __ pop(btos);
2746     if (!is_static) pop_and_check_object(obj);
2747     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2748     if (rc == may_rewrite) {
2749       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2750     }
2751     __ b(Done);
2752   }


2755   __ cmp(flags, (u1)ztos);
2756   __ br(Assembler::NE, notBool);
2757 
2758   // ztos
2759   {
2760     __ pop(ztos);
2761     if (!is_static) pop_and_check_object(obj);
2762     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2763     if (rc == may_rewrite) {
2764       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2765     }
2766     __ b(Done);
2767   }
2768 
2769   __ bind(notBool);
2770   __ cmp(flags, (u1)atos);
2771   __ br(Assembler::NE, notObj);
2772 
2773   // atos
2774   {
2775     __ pop(atos);
2776     if (!is_static) pop_and_check_object(obj);
2777     // Store into the field
2778     do_oop_store(_masm, field, r0, IN_HEAP);
2779     if (rc == may_rewrite) {
2780       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2781     }
2782     __ b(Done);










































2783   }
2784 
2785   __ bind(notObj);
2786   __ cmp(flags, (u1)itos);
2787   __ br(Assembler::NE, notInt);
2788 
2789   // itos
2790   {
2791     __ pop(itos);
2792     if (!is_static) pop_and_check_object(obj);
2793     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
2794     if (rc == may_rewrite) {
2795       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2796     }
2797     __ b(Done);
2798   }
2799 
2800   __ bind(notInt);
2801   __ cmp(flags, (u1)ctos);
2802   __ br(Assembler::NE, notChar);


2902 void TemplateTable::putstatic(int byte_no) {
2903   putfield_or_static(byte_no, true);
2904 }
2905 
2906 void TemplateTable::jvmti_post_fast_field_mod()
2907 {
2908   if (JvmtiExport::can_post_field_modification()) {
2909     // Check to see if a field modification watch has been set before
2910     // we take the time to call into the VM.
2911     Label L2;
2912     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2913     __ ldrw(c_rarg3, Address(rscratch1));
2914     __ cbzw(c_rarg3, L2);
2915     __ pop_ptr(r19);                  // copy the object pointer from tos
2916     __ verify_oop(r19);
2917     __ push_ptr(r19);                 // put the object pointer back on tos
2918     // Save tos values before call_VM() clobbers them. Since we have
2919     // to do it for every data type, we use the saved values as the
2920     // jvalue object.
2921     switch (bytecode()) {          // load values into the jvalue object

2922     case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
2923     case Bytecodes::_fast_bputfield: // fall through
2924     case Bytecodes::_fast_zputfield: // fall through
2925     case Bytecodes::_fast_sputfield: // fall through
2926     case Bytecodes::_fast_cputfield: // fall through
2927     case Bytecodes::_fast_iputfield: __ push_i(r0); break;
2928     case Bytecodes::_fast_dputfield: __ push_d(); break;
2929     case Bytecodes::_fast_fputfield: __ push_f(); break;
2930     case Bytecodes::_fast_lputfield: __ push_l(r0); break;
2931 
2932     default:
2933       ShouldNotReachHere();
2934     }
2935     __ mov(c_rarg3, esp);             // points to jvalue on the stack
2936     // access constant pool cache entry
2937     __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
2938     __ verify_oop(r19);
2939     // r19: object pointer copied above
2940     // c_rarg2: cache entry pointer
2941     // c_rarg3: jvalue object on the stack
2942     __ call_VM(noreg,
2943                CAST_FROM_FN_PTR(address,
2944                                 InterpreterRuntime::post_field_modification),
2945                r19, c_rarg2, c_rarg3);
2946 
2947     switch (bytecode()) {             // restore tos values

2948     case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
2949     case Bytecodes::_fast_bputfield: // fall through
2950     case Bytecodes::_fast_zputfield: // fall through
2951     case Bytecodes::_fast_sputfield: // fall through
2952     case Bytecodes::_fast_cputfield: // fall through
2953     case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
2954     case Bytecodes::_fast_dputfield: __ pop_d(); break;
2955     case Bytecodes::_fast_fputfield: __ pop_f(); break;
2956     case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
2957     default: break;
2958     }
2959     __ bind(L2);
2960   }
2961 }
2962 
2963 void TemplateTable::fast_storefield(TosState state)
2964 {
2965   transition(state, vtos);
2966 
2967   ByteSize base = ConstantPoolCache::base_offset();


2978   // replace index with field offset from cache entry
2979   __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2980 
2981   {
2982     Label notVolatile;
2983     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2984     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2985     __ bind(notVolatile);
2986   }
2987 
2988   Label notVolatile;
2989 
2990   // Get object from stack
2991   pop_and_check_object(r2);
2992 
2993   // field address
2994   const Address field(r2, r1);
2995 
2996   // access field
2997   switch (bytecode()) {













2998   case Bytecodes::_fast_aputfield:
2999     do_oop_store(_masm, field, r0, IN_HEAP);
3000     break;
3001   case Bytecodes::_fast_lputfield:
3002     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
3003     break;
3004   case Bytecodes::_fast_iputfield:
3005     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
3006     break;
3007   case Bytecodes::_fast_zputfield:
3008     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
3009     break;
3010   case Bytecodes::_fast_bputfield:
3011     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
3012     break;
3013   case Bytecodes::_fast_sputfield:
3014     __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
3015     break;
3016   case Bytecodes::_fast_cputfield:
3017     __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);


3071   // r0: object
3072   __ verify_oop(r0);
3073   __ null_check(r0);
3074   const Address field(r0, r1);
3075 
3076   // 8179954: We need to make sure that the code generated for
3077   // volatile accesses forms a sequentially-consistent set of
3078   // operations when combined with STLR and LDAR.  Without a leading
3079   // membar it's possible for a simple Dekker test to fail if loads
3080   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3081   // the stores in one method and we interpret the loads in another.
3082   if (! UseBarriersForVolatile) {
3083     Label notVolatile;
3084     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3085     __ membar(MacroAssembler::AnyAny);
3086     __ bind(notVolatile);
3087   }
3088 
3089   // access field
3090   switch (bytecode()) {


























3091   case Bytecodes::_fast_agetfield:
3092     do_oop_load(_masm, field, r0, IN_HEAP);
3093     __ verify_oop(r0);
3094     break;
3095   case Bytecodes::_fast_lgetfield:
3096     __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3097     break;
3098   case Bytecodes::_fast_igetfield:
3099     __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3100     break;
3101   case Bytecodes::_fast_bgetfield:
3102     __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3103     break;
3104   case Bytecodes::_fast_sgetfield:
3105     __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3106     break;
3107   case Bytecodes::_fast_cgetfield:
3108     __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3109     break;
3110   case Bytecodes::_fast_fgetfield:


3627            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), r0);
3628       __ pop(atos); // restore the return value
3629 
3630     }
3631     __ b(done);
3632   }
3633 
3634   // slow case
3635   __ bind(slow_case);
3636   __ get_constant_pool(c_rarg1);
3637   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3638   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3639   __ verify_oop(r0);
3640 
3641   // continue
3642   __ bind(done);
3643   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3644   __ membar(Assembler::StoreStore);
3645 }
3646 
























3647 void TemplateTable::newarray() {
3648   transition(itos, atos);
3649   __ load_unsigned_byte(c_rarg1, at_bcp(1));
3650   __ mov(c_rarg2, r0);
3651   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3652           c_rarg1, c_rarg2);
3653   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3654   __ membar(Assembler::StoreStore);
3655 }
3656 
3657 void TemplateTable::anewarray() {
3658   transition(itos, atos);
3659   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3660   __ get_constant_pool(c_rarg1);
3661   __ mov(c_rarg3, r0);
3662   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3663           c_rarg1, c_rarg2, c_rarg3);
3664   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3665   __ membar(Assembler::StoreStore);
3666 }


3698   __ bind(quicked);
3699   __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3700   __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3701 
3702   __ bind(resolved);
3703   __ load_klass(r19, r3);
3704 
3705   // Generate subtype check.  Blows r2, r5.  Object in r3.
3706   // Superklass in r0.  Subklass in r19.
3707   __ gen_subtype_check(r19, ok_is_subtype);
3708 
3709   // Come here on failure
3710   __ push(r3);
3711   // object is at TOS
3712   __ b(Interpreter::_throw_ClassCastException_entry);
3713 
3714   // Come here on success
3715   __ bind(ok_is_subtype);
3716   __ mov(r0, r3); // Restore object in r3
3717 



3718   // Collect counts on whether this test sees NULLs a lot or not.
3719   if (ProfileInterpreter) {
3720     __ b(done);
3721     __ bind(is_null);
3722     __ profile_null_seen(r2);
3723   } else {
3724     __ bind(is_null);   // same as 'done'
3725   }
















3726   __ bind(done);
3727 }
3728 
3729 void TemplateTable::instanceof() {
3730   transition(atos, itos);
3731   Label done, is_null, ok_is_subtype, quicked, resolved;
3732   __ cbz(r0, is_null);
3733 
3734   // Get cpool & tags index
3735   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3736   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3737   // See if bytecode has already been quicked
3738   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3739   __ lea(r1, Address(rscratch1, r19));
3740   __ ldarb(r1, r1);
3741   __ cmp(r1, (u1)JVM_CONSTANT_Class);
3742   __ br(Assembler::EQ, quicked);
3743 
3744   __ push(atos); // save receiver for result, and for GC
3745   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));




 130   case TemplateTable::equal        : return Assembler::NE;
 131   case TemplateTable::not_equal    : return Assembler::EQ;
 132   case TemplateTable::less         : return Assembler::GE;
 133   case TemplateTable::less_equal   : return Assembler::GT;
 134   case TemplateTable::greater      : return Assembler::LE;
 135   case TemplateTable::greater_equal: return Assembler::LT;
 136   }
 137   ShouldNotReachHere();
 138   return Assembler::EQ;
 139 }
 140 
 141 
 142 // Miscelaneous helper routines
 143 // Store an oop (or NULL) at the Address described by obj.
 144 // If val == noreg this means store a NULL
 145 static void do_oop_store(InterpreterMacroAssembler* _masm,
 146                          Address dst,
 147                          Register val,
 148                          DecoratorSet decorators) {
 149   assert(val == noreg || val == r0, "parameter is just for looks");
 150   __ store_heap_oop(dst, val, r10, r1, noreg, decorators); 
 151 }
 152 
 153 static void do_oop_load(InterpreterMacroAssembler* _masm,
 154                         Address src,
 155                         Register dst,
 156                         DecoratorSet decorators) {
 157   __ load_heap_oop(dst, src, r10, r1, decorators);
 158 }
 159 
 160 Address TemplateTable::at_bcp(int offset) {
 161   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 162   return Address(rbcp, offset);
 163 }
 164 
 165 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 166                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 167                                    int byte_no)
 168 {
 169   if (!RewriteBytecodes)  return;
 170   Label L_patch_done;
 171 
 172   switch (bc) {
 173   case Bytecodes::_fast_qputfield:
 174   case Bytecodes::_fast_aputfield:
 175   case Bytecodes::_fast_bputfield:
 176   case Bytecodes::_fast_zputfield:
 177   case Bytecodes::_fast_cputfield:
 178   case Bytecodes::_fast_dputfield:
 179   case Bytecodes::_fast_fputfield:
 180   case Bytecodes::_fast_iputfield:
 181   case Bytecodes::_fast_lputfield:
 182   case Bytecodes::_fast_sputfield:
 183     {
 184       // We skip bytecode quickening for putfield instructions when
 185       // the put_code written to the constant pool cache is zero.
 186       // This is required so that every execution of this instruction
 187       // calls out to InterpreterRuntime::resolve_get_put to do
 188       // additional, required work.
 189       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 190       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 191       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 192       __ movw(bc_reg, bc);
 193       __ cbzw(temp_reg, L_patch_done);  // don't patch


 729 }
 730 
 731 void TemplateTable::index_check(Register array, Register index)
 732 {
 733   // destroys r1, rscratch1
 734   // check array
 735   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 736   // sign extend index for use by indexed load
 737   // __ movl2ptr(index, index);
 738   // check index
 739   Register length = rscratch1;
 740   __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
 741   __ cmpw(index, length);
 742   if (index != r1) {
 743     // ??? convention: move aberrant index into r1 for exception message
 744     assert(r1 != array, "different registers");
 745     __ mov(r1, index);
 746   }
 747   Label ok;
 748   __ br(Assembler::LO, ok);
 749   // ??? convention: move array into r3 for exception message
 750    __ mov(r3, array);
 751    __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
 752    __ br(rscratch1);
 753   __ bind(ok);
 754 }
 755 
 756 void TemplateTable::iaload()
 757 {
 758   transition(itos, itos);
 759   __ mov(r1, r0);
 760   __ pop_ptr(r0);
 761   // r0: array
 762   // r1: index
 763   index_check(r0, r1); // leaves index in r1, kills rscratch1
 764   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
 765   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
 766 }
 767 
 768 void TemplateTable::laload()
 769 {
 770   transition(itos, ltos);
 771   __ mov(r1, r0);
 772   __ pop_ptr(r0);


 792 void TemplateTable::daload()
 793 {
 794   transition(itos, dtos);
 795   __ mov(r1, r0);
 796   __ pop_ptr(r0);
 797   // r0: array
 798   // r1: index
 799   index_check(r0, r1); // leaves index in r1, kills rscratch1
 800   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
 801   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
 802 }
 803 
 804 void TemplateTable::aaload()
 805 {
 806   transition(itos, atos);
 807   __ mov(r1, r0);
 808   __ pop_ptr(r0);
 809   // r0: array
 810   // r1: index
 811   index_check(r0, r1); // leaves index in r1, kills rscratch1
 812   if (ValueArrayFlatten) {
 813     Label is_flat_array, done;
 814 
 815     __ test_flattened_array_oop(r0, r8 /*temp*/, is_flat_array); 
 816     __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 817     do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
 818 
 819     __ b(done);
 820     __ bind(is_flat_array);
 821     __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), r0, r1);
 822     __ bind(done);
 823   } else {
 824     __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 825     do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
 826   }
 827 }
 828 
 829 void TemplateTable::baload()
 830 {
 831   transition(itos, itos);
 832   __ mov(r1, r0);
 833   __ pop_ptr(r0);
 834   // r0: array
 835   // r1: index
 836   index_check(r0, r1); // leaves index in r1, kills rscratch1
 837   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
 838   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
 839 }
 840 
 841 void TemplateTable::caload()
 842 {
 843   transition(itos, itos);
 844   __ mov(r1, r0);
 845   __ pop_ptr(r0);
 846   // r0: array


1096   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
1097   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg);
1098 }
1099 
1100 void TemplateTable::dastore() {
1101   transition(dtos, vtos);
1102   __ pop_i(r1);
1103   __ pop_ptr(r3);
1104   // v0: value
1105   // r1:  index
1106   // r3:  array
1107   index_check(r3, r1); // prefer index in r1
1108   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1109   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
1110 }
1111 
1112 void TemplateTable::aastore() {
1113   Label is_null, ok_is_subtype, done;
1114   transition(vtos, vtos);
1115   // stack: ..., array, index, value
1116   __ ldr(r0, at_tos());    // value 
1117   __ ldr(r2, at_tos_p1()); // index
1118   __ ldr(r3, at_tos_p2()); // array
1119 
1120   Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1121 
1122   index_check(r3, r2);     // kills r1
1123 
1124   // FIXME: Could we remove the line below?
1125   __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); 
1126 
1127   // do array store check - check for NULL value first
1128   __ cbz(r0, is_null);
1129 
1130   Label  is_flat_array;
1131   if (ValueArrayFlatten) {
1132     __ test_flattened_array_oop(r3, r8 /*temp*/, is_flat_array);
1133   }
1134 
1135   // Move subklass into r1
1136   __ load_klass(r1, r0);
1137 
1138   // Move superklass into r0
1139   __ load_klass(r0, r3);
1140   __ ldr(r0, Address(r0, ObjArrayKlass::element_klass_offset()));

1141   // Compress array + index*oopSize + 12 into a single register.  Frees r2.
1142 
1143   // Generate subtype check.  Blows r2, r5
1144   // Superklass in r0.  Subklass in r1.
1145 
1146   __ gen_subtype_check(r1, ok_is_subtype);
1147 
1148   // Come here on failure
1149   // object is at TOS
1150   __ b(Interpreter::_throw_ArrayStoreException_entry);
1151 
1152 
1153   // Come here on success
1154   __ bind(ok_is_subtype);
1155 
1156 
1157   // Get the value we will store
1158   __ ldr(r0, at_tos());
1159   // Now store using the appropriate barrier
1160   do_oop_store(_masm, element_address, r0, IS_ARRAY);
1161   __ b(done);
1162 
1163   // Have a NULL in r0, r3=array, r2=index.  Store NULL at ary[idx]
1164   __ bind(is_null);
1165   __ profile_null_seen(r2);
1166 
1167   if (EnableValhalla) {
1168     Label is_null_into_value_array_npe, store_null;
1169 
1170     // No way to store null in flat array
1171     __ test_null_free_array_oop(r3, r8, is_null_into_value_array_npe); 
1172     __ b(store_null);
1173 
1174     __ bind(is_null_into_value_array_npe);
1175     __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
1176 
1177     __ bind(store_null);
1178   }
1179 
1180   // Store a NULL
1181   do_oop_store(_masm, element_address, noreg, IS_ARRAY); 
1182   __ b(done);
1183 
1184   if (EnableValhalla) { 
1185      Label is_type_ok;
1186 
1187     // store non-null value
1188     __ bind(is_flat_array);
1189 
1190     // Simplistic type check...
1191     // r0 - value, r2 - index, r3 - array.
1192 
1193     // Profile the not-null value's klass.
1194     // Load value class 
1195      __ load_klass(r1, r0);
1196      __ profile_typecheck(r2, r1, r0); // blows r2, and r0
1197 
1198     // flat value array needs exact type match
1199     // is "r8 == r0" (value subclass == array element superclass)
1200 
1201     // Move element klass into r0
1202 
1203      __ load_klass(r0, r3);
1204 
1205      __ ldr(r0, Address(r0, ArrayKlass::element_klass_offset())); 
1206      __ cmp(r0, r1);
1207      __ br(Assembler::EQ, is_type_ok);
1208 
1209      __ profile_typecheck_failed(r2);
1210      __ b(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1211 
1212      __ bind(is_type_ok);
1213 
1214     // Reload from TOS to be safe, because of profile_typecheck that blows r2 and r0.
1215     // FIXME: Should we really do it?
1216      __ ldr(r1, at_tos());  // value
1217      __ mov(r2, r3); // array, ldr(r2, at_tos_p2()); 
1218      __ ldr(r3, at_tos_p1()); // index
1219      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_store), r1, r2, r3);
1220   }
1221 
1222 
1223   // Pop stack arguments
1224   __ bind(done);
1225   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1226 }
1227 
1228 void TemplateTable::bastore()
1229 {
1230   transition(itos, vtos);
1231   __ pop_i(r1);
1232   __ pop_ptr(r3);
1233   // r0: value
1234   // r1: index
1235   // r3: array
1236   index_check(r3, r1); // prefer index in r1
1237 
1238   // Need to check whether array is boolean or byte
1239   // since both types share the bastore bytecode.
1240   __ load_klass(r2, r3);
1241   __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));


2078   __ br(j_not(cc), not_taken);
2079   branch(false, false);
2080   __ bind(not_taken);
2081   __ profile_not_taken_branch(r0);
2082 }
2083 
2084 void TemplateTable::if_nullcmp(Condition cc)
2085 {
2086   transition(atos, vtos);
2087   // assume branch is more often taken than not (loops use backward branches)
2088   Label not_taken;
2089   if (cc == equal)
2090     __ cbnz(r0, not_taken);
2091   else
2092     __ cbz(r0, not_taken);
2093   branch(false, false);
2094   __ bind(not_taken);
2095   __ profile_not_taken_branch(r0);
2096 }
2097 
2098 void TemplateTable::if_acmp(Condition cc) {

2099   transition(atos, vtos);
2100   // assume branch is more often taken than not (loops use backward branches)
2101   Label taken, not_taken;
2102   __ pop_ptr(r1);
2103 
2104   Register is_value_mask = rscratch1;
2105   __ mov(is_value_mask, markOopDesc::always_locked_pattern);
2106 
2107   if (EnableValhalla) {
2108     __ cmp(r1, r0);
2109     __ br(Assembler::EQ, (cc == equal) ? taken : not_taken);
2110 
2111     // might be substitutable, test if either r0 or r1 is null
2112     __ andr(r2, r0, r1);
2113     __ cbz(r2, (cc == equal) ? not_taken : taken);
2114 
2115     // and both are values ?
2116     __ ldr(r2, Address(r1, oopDesc::mark_offset_in_bytes()));
2117     __ andr(r2, r2, is_value_mask);
2118     __ ldr(r4, Address(r0, oopDesc::mark_offset_in_bytes()));
2119     __ andr(r4, r4, is_value_mask);
2120     __ andr(r2, r2, r4);
2121     __ cmp(r2,  is_value_mask);
2122     __ br(Assembler::NE, (cc == equal) ? not_taken : taken);
2123 
2124     // same value klass ?
2125     __ load_metadata(r2, r1);
2126     __ load_metadata(r4, r0);
2127     __ cmp(r2, r4);
2128     __ br(Assembler::NE, (cc == equal) ? not_taken : taken);
2129 
2130     // Know both are the same type, let's test for substitutability...
2131     if (cc == equal) {
2132       invoke_is_substitutable(r0, r1, taken, not_taken);
2133     } else {
2134       invoke_is_substitutable(r0, r1, not_taken, taken);
2135     }
2136     __ stop("Not reachable");
2137   }
2138 
2139   __ cmpoop(r1, r0);
2140   __ br(j_not(cc), not_taken);
2141   __ bind(taken);
2142   branch(false, false);
2143   __ bind(not_taken);
2144   __ profile_not_taken_branch(r0);
2145 }
2146 
2147 void TemplateTable::invoke_is_substitutable(Register aobj, Register bobj,
2148                                             Label& is_subst, Label& not_subst) {
2149 
2150   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::is_substitutable), aobj, bobj);
2151   // Restored... r0 answer, jmp to outcome...
2152   __ cbz(r0, not_subst);
2153   __ b(is_subst);
2154 }
2155 
2156 
2157 void TemplateTable::ret() {
2158   transition(vtos, vtos);
2159   // We might be moving to a safepoint.  The thread which calls
2160   // Interpreter::notice_safepoints() will effectively flush its cache
2161   // when it makes a system call, but we need to do something to
2162   // ensure that we see the changed dispatch table.
2163   __ membar(MacroAssembler::LoadLoad);
2164 
2165   locals_index(r1);
2166   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2167   __ profile_ret(r1, r2);
2168   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2169   __ lea(rbcp, Address(rbcp, r1));
2170   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2171   __ dispatch_next(vtos, 0, /*generate_poll*/true);
2172 }
2173 
2174 void TemplateTable::wide_ret() {
2175   transition(vtos, vtos);
2176   locals_index_wide(r1);


2386     __ tbz(r3, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2387 
2388     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2389 
2390     __ bind(skip_register_finalizer);
2391   }
2392 
2393   // Issue a StoreStore barrier after all stores but before return
2394   // from any constructor for any class with a final field.  We don't
2395   // know if this is a finalizer, so we always do so.
2396   if (_desc->bytecode() == Bytecodes::_return)
2397     __ membar(MacroAssembler::StoreStore);
2398 
2399   // Narrow result if state is itos but result type is smaller.
2400   // Need to narrow in the return bytecode rather than in generate_return_entry
2401   // since compiled code callers expect the result to already be narrowed.
2402   if (state == itos) {
2403     __ narrow(r0);
2404   }
2405 
2406   __ remove_activation(state); 
2407   __ ret(lr);
2408 }
2409 
2410 // ----------------------------------------------------------------------------
2411 // Volatile variables demand their effects be made known to all CPU's
2412 // in order.  Store buffers on most chips allow reads & writes to
2413 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2414 // without some kind of memory barrier (i.e., it's not sufficient that
2415 // the interpreter does not reorder volatile references, the hardware
2416 // also must not reorder them).
2417 //
2418 // According to the new Java Memory Model (JMM):
2419 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2420 //     writes act as aquire & release, so:
2421 // (2) A read cannot let unrelated NON-volatile memory refs that
2422 //     happen after the read float up to before the read.  It's OK for
2423 //     non-volatile memory refs that happen before the volatile read to
2424 //     float down below it.
2425 // (3) Similar a volatile write cannot let unrelated NON-volatile
2426 //     memory refs that happen BEFORE the write float down to after the


2600   // 8179954: We need to make sure that the code generated for
2601   // volatile accesses forms a sequentially-consistent set of
2602   // operations when combined with STLR and LDAR.  Without a leading
2603   // membar it's possible for a simple Dekker test to fail if loads
2604   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
2605   // the stores in one method and we interpret the loads in another.
2606   if (! UseBarriersForVolatile) {
2607     Label notVolatile;
2608     __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2609     __ membar(MacroAssembler::AnyAny);
2610     __ bind(notVolatile);
2611   }
2612 
2613   const Address field(obj, off);
2614 
2615   Label Done, notByte, notBool, notInt, notShort, notChar,
2616               notLong, notFloat, notObj, notDouble;
2617 
2618   // x86 uses a shift and mask or wings it with a shift plus assert
2619   // the mask is not needed. aarch64 just uses bitfield extract
2620   __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);

2621 
2622   assert(btos == 0, "change code, btos != 0");
2623   __ cbnz(flags, notByte);
2624 
2625   // Don't rewrite getstatic, only getfield
2626   if (is_static) rc = may_not_rewrite;
2627 
2628   // btos
2629   __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2630   __ push(btos);
2631   // Rewrite bytecode to be faster
2632   if (rc == may_rewrite) {
2633     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2634   }
2635   __ b(Done);
2636 
2637   __ bind(notByte);
2638   __ cmp(flags, (u1)ztos);
2639   __ br(Assembler::NE, notBool);
2640 
2641   // ztos (same code as btos)
2642   __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2643   __ push(ztos);
2644   // Rewrite bytecode to be faster
2645   if (rc == may_rewrite) {
2646     // use btos rewriting, no truncating to t/f bit is needed for getfield.
2647     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2648   }
2649   __ b(Done);
2650 
2651   __ bind(notBool);
2652   __ cmp(flags, (u1)atos);
2653   __ br(Assembler::NE, notObj);
2654   // atos
2655   if (!EnableValhalla) {
2656     do_oop_load(_masm, field, r0, IN_HEAP);
2657     __ push(atos);
2658     if (rc == may_rewrite) {
2659       patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2660     }  
2661     __ b(Done);
2662   } else { // Valhalla
2663 
2664     if (is_static) {
2665       __ load_heap_oop(r0, field);
2666       Label isFlattenable, isUninitialized;
2667       // Issue below if the static field has not been initialized yet
2668       __ test_field_is_flattenable(raw_flags, r8 /*temp*/, isFlattenable);
2669         // Not flattenable case
2670         __ push(atos);
2671         __ b(Done);
2672       // Flattenable case, must not return null even if uninitialized
2673       __ bind(isFlattenable);
2674         __ cbz(r0, isUninitialized);
2675           __ push(atos);
2676           __ b(Done);
2677         __ bind(isUninitialized);
2678           __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2679           __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_value_field), obj, raw_flags);
2680           __ verify_oop(r0);
2681           __ push(atos);
2682           __ b(Done);
2683     } else {
2684       Label isFlattened, isInitialized, isFlattenable, rewriteFlattenable;
2685         __ test_field_is_flattenable(raw_flags, r8 /*temp*/, isFlattenable);
2686         // Non-flattenable field case, also covers the object case
2687         __ load_heap_oop(r0, field);
2688         __ push(atos);
2689         if (rc == may_rewrite) {
2690           patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2691         }
2692         __ b(Done);
2693       __ bind(isFlattenable);
2694         __ test_field_is_flattened(raw_flags, r8 /* temp */, isFlattened);
2695          // Non-flattened field case
2696           __ load_heap_oop(r0, field);
2697           __ cbnz(r0, isInitialized);
2698             __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2699             __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field), obj, raw_flags);
2700           __ bind(isInitialized);
2701           __ verify_oop(r0);
2702           __ push(atos);
2703           __ b(rewriteFlattenable);
2704         __ bind(isFlattened);
2705           __ ldr(r10, Address(cache, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset())));
2706           __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2707           call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), obj, raw_flags, r10); 
2708           __ verify_oop(r0);
2709           __ push(atos);
2710       __ bind(rewriteFlattenable);
2711       if (rc == may_rewrite) { 
2712          patch_bytecode(Bytecodes::_fast_qgetfield, bc, r1);
2713       }
2714       __ b(Done);
2715     }
2716   }

2717 
2718   __ bind(notObj);
2719   __ cmp(flags, (u1)itos);
2720   __ br(Assembler::NE, notInt);
2721   // itos
2722   __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2723   __ push(itos);
2724   // Rewrite bytecode to be faster
2725   if (rc == may_rewrite) {
2726     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2727   }
2728   __ b(Done);
2729 
2730   __ bind(notInt);
2731   __ cmp(flags, (u1)ctos);
2732   __ br(Assembler::NE, notChar);
2733   // ctos
2734   __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2735   __ push(ctos);
2736   // Rewrite bytecode to be faster


2866     // c_rarg1: object pointer set up above (NULL if static)
2867     // c_rarg2: cache entry pointer
2868     // c_rarg3: jvalue object on the stack
2869     __ call_VM(noreg,
2870                CAST_FROM_FN_PTR(address,
2871                                 InterpreterRuntime::post_field_modification),
2872                c_rarg1, c_rarg2, c_rarg3);
2873     __ get_cache_and_index_at_bcp(cache, index, 1);
2874     __ bind(L1);
2875   }
2876 }
2877 
2878 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2879   transition(vtos, vtos);
2880 
2881   const Register cache = r2;
2882   const Register index = r3;
2883   const Register obj   = r2;
2884   const Register off   = r19;
2885   const Register flags = r0;
2886   const Register flags2 = r6;
2887   const Register bc    = r4;
2888 
2889   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2890   jvmti_post_field_mod(cache, index, is_static);
2891   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2892 
2893   Label Done;
2894   __ mov(r5, flags);
2895 
2896   {
2897     Label notVolatile;
2898     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2899     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
2900     __ bind(notVolatile);
2901   }
2902 
2903   // field address
2904   const Address field(obj, off);
2905 
2906   Label notByte, notBool, notInt, notShort, notChar,
2907         notLong, notFloat, notObj, notDouble;
2908 
2909   __ mov(flags2, flags); 
2910 
2911   // x86 uses a shift and mask or wings it with a shift plus assert
2912   // the mask is not needed. aarch64 just uses bitfield extract
2913   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2914 
2915   assert(btos == 0, "change code, btos != 0");
2916   __ cbnz(flags, notByte);
2917 
2918   // Don't rewrite putstatic, only putfield
2919   if (is_static) rc = may_not_rewrite;
2920 
2921   // btos
2922   {
2923     __ pop(btos);
2924     if (!is_static) pop_and_check_object(obj);
2925     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2926     if (rc == may_rewrite) {
2927       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2928     }
2929     __ b(Done);
2930   }


2933   __ cmp(flags, (u1)ztos);
2934   __ br(Assembler::NE, notBool);
2935 
2936   // ztos
2937   {
2938     __ pop(ztos);
2939     if (!is_static) pop_and_check_object(obj);
2940     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2941     if (rc == may_rewrite) {
2942       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2943     }
2944     __ b(Done);
2945   }
2946 
2947   __ bind(notBool);
2948   __ cmp(flags, (u1)atos);
2949   __ br(Assembler::NE, notObj);
2950 
2951   // atos
2952   {
2953      if (!EnableValhalla) {
2954       __ pop(atos);
2955       if (!is_static) pop_and_check_object(obj);
2956       // Store into the field
2957       do_oop_store(_masm, field, r0, IN_HEAP);
2958       if (rc == may_rewrite) {
2959         patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2960       }
2961       __ b(Done);
2962      } else { // Valhalla
2963 
2964       __ pop(atos);
2965       if (is_static) {
2966         Label notFlattenable;
2967          __ test_field_is_not_flattenable(flags2, r8 /* temp */, notFlattenable);
2968          __ null_check(r0);
2969          __ bind(notFlattenable);
2970          do_oop_store(_masm, field, r0, IN_HEAP); 
2971          __ b(Done);
2972       } else {
2973         Label isFlattenable, isFlattened, notBuffered, notBuffered2, rewriteNotFlattenable, rewriteFlattenable;
2974         __ test_field_is_flattenable(flags2, r8 /*temp*/, isFlattenable);
2975         // Not flattenable case, covers not flattenable values and objects
2976         pop_and_check_object(obj);
2977         // Store into the field
2978         do_oop_store(_masm, field, r0, IN_HEAP);
2979         __ bind(rewriteNotFlattenable);
2980         if (rc == may_rewrite) {
2981           patch_bytecode(Bytecodes::_fast_aputfield, bc, r19, true, byte_no); 
2982         }
2983         __ b(Done);
2984         // Implementation of the flattenable semantic
2985         __ bind(isFlattenable);
2986         __ null_check(r0);
2987         __ test_field_is_flattened(flags2, r8 /*temp*/, isFlattened);
2988         // Not flattened case
2989         pop_and_check_object(obj);
2990         // Store into the field
2991         do_oop_store(_masm, field, r0, IN_HEAP);
2992         __ b(rewriteFlattenable);
2993         __ bind(isFlattened);
2994         pop_and_check_object(obj);
2995         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), r0, off, obj);
2996         __ bind(rewriteFlattenable);
2997         if (rc == may_rewrite) {
2998           patch_bytecode(Bytecodes::_fast_qputfield, bc, r19, true, byte_no);
2999         }
3000         __ b(Done);
3001       }
3002      }  // Valhalla
3003   }
3004 
3005   __ bind(notObj);
3006   __ cmp(flags, (u1)itos);
3007   __ br(Assembler::NE, notInt);
3008 
3009   // itos
3010   {
3011     __ pop(itos);
3012     if (!is_static) pop_and_check_object(obj);
3013     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
3014     if (rc == may_rewrite) {
3015       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
3016     }
3017     __ b(Done);
3018   }
3019 
3020   __ bind(notInt);
3021   __ cmp(flags, (u1)ctos);
3022   __ br(Assembler::NE, notChar);


3122 void TemplateTable::putstatic(int byte_no) {
3123   putfield_or_static(byte_no, true);
3124 }
3125 
3126 void TemplateTable::jvmti_post_fast_field_mod()
3127 {
3128   if (JvmtiExport::can_post_field_modification()) {
3129     // Check to see if a field modification watch has been set before
3130     // we take the time to call into the VM.
3131     Label L2;
3132     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3133     __ ldrw(c_rarg3, Address(rscratch1));
3134     __ cbzw(c_rarg3, L2);
3135     __ pop_ptr(r19);                  // copy the object pointer from tos
3136     __ verify_oop(r19);
3137     __ push_ptr(r19);                 // put the object pointer back on tos
3138     // Save tos values before call_VM() clobbers them. Since we have
3139     // to do it for every data type, we use the saved values as the
3140     // jvalue object.
3141     switch (bytecode()) {          // load values into the jvalue object
3142     case Bytecodes::_fast_qputfield: //fall through
3143     case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
3144     case Bytecodes::_fast_bputfield: // fall through
3145     case Bytecodes::_fast_zputfield: // fall through
3146     case Bytecodes::_fast_sputfield: // fall through
3147     case Bytecodes::_fast_cputfield: // fall through
3148     case Bytecodes::_fast_iputfield: __ push_i(r0); break;
3149     case Bytecodes::_fast_dputfield: __ push_d(); break;
3150     case Bytecodes::_fast_fputfield: __ push_f(); break;
3151     case Bytecodes::_fast_lputfield: __ push_l(r0); break;
3152 
3153     default:
3154       ShouldNotReachHere();
3155     }
3156     __ mov(c_rarg3, esp);             // points to jvalue on the stack
3157     // access constant pool cache entry
3158     __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
3159     __ verify_oop(r19);
3160     // r19: object pointer copied above
3161     // c_rarg2: cache entry pointer
3162     // c_rarg3: jvalue object on the stack
3163     __ call_VM(noreg,
3164                CAST_FROM_FN_PTR(address,
3165                                 InterpreterRuntime::post_field_modification),
3166                r19, c_rarg2, c_rarg3);
3167 
3168     switch (bytecode()) {             // restore tos values
3169     case Bytecodes::_fast_qputfield: //fall through
3170     case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
3171     case Bytecodes::_fast_bputfield: // fall through
3172     case Bytecodes::_fast_zputfield: // fall through
3173     case Bytecodes::_fast_sputfield: // fall through
3174     case Bytecodes::_fast_cputfield: // fall through
3175     case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
3176     case Bytecodes::_fast_dputfield: __ pop_d(); break;
3177     case Bytecodes::_fast_fputfield: __ pop_f(); break;
3178     case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
3179     default: break;
3180     }
3181     __ bind(L2);
3182   }
3183 }
3184 
3185 void TemplateTable::fast_storefield(TosState state)
3186 {
3187   transition(state, vtos);
3188 
3189   ByteSize base = ConstantPoolCache::base_offset();


3200   // replace index with field offset from cache entry
3201   __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3202 
3203   {
3204     Label notVolatile;
3205     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3206     __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
3207     __ bind(notVolatile);
3208   }
3209 
3210   Label notVolatile;
3211 
3212   // Get object from stack
3213   pop_and_check_object(r2);
3214 
3215   // field address
3216   const Address field(r2, r1);
3217 
3218   // access field
3219   switch (bytecode()) {
3220   case Bytecodes::_fast_qputfield: //fall through 
3221    {
3222       Label isFlattened, done; 
3223       __ null_check(r0);
3224       __ test_field_is_flattened(r3, r8 /* temp */, isFlattened);
3225       // No Flattened case
3226       do_oop_store(_masm, field, r0, IN_HEAP);
3227       __ b(done);
3228       __ bind(isFlattened);
3229       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), r0, r1, r2);
3230       __ bind(done);
3231     }
3232     break;
3233   case Bytecodes::_fast_aputfield:
3234     do_oop_store(_masm, field, r0, IN_HEAP);
3235     break;
3236   case Bytecodes::_fast_lputfield:
3237     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
3238     break;
3239   case Bytecodes::_fast_iputfield:
3240     __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
3241     break;
3242   case Bytecodes::_fast_zputfield:
3243     __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
3244     break;
3245   case Bytecodes::_fast_bputfield:
3246     __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
3247     break;
3248   case Bytecodes::_fast_sputfield:
3249     __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
3250     break;
3251   case Bytecodes::_fast_cputfield:
3252     __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);


3306   // r0: object
3307   __ verify_oop(r0);
3308   __ null_check(r0);
3309   const Address field(r0, r1);
3310 
3311   // 8179954: We need to make sure that the code generated for
3312   // volatile accesses forms a sequentially-consistent set of
3313   // operations when combined with STLR and LDAR.  Without a leading
3314   // membar it's possible for a simple Dekker test to fail if loads
3315   // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
3316   // the stores in one method and we interpret the loads in another.
3317   if (! UseBarriersForVolatile) {
3318     Label notVolatile;
3319     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3320     __ membar(MacroAssembler::AnyAny);
3321     __ bind(notVolatile);
3322   }
3323 
3324   // access field
3325   switch (bytecode()) {
3326   case Bytecodes::_fast_qgetfield: 
3327     {
3328        Label isFlattened, isInitialized, Done;
3329        // FIXME: We don't need to reload registers multiple times, but stay close to x86 code
3330        __ ldrw(r9, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()))); 
3331        __ test_field_is_flattened(r9, r8 /* temp */, isFlattened);
3332         // Non-flattened field case
3333         __ mov(r9, r0);
3334         __ load_heap_oop(r0, field);
3335         __ cbnz(r0, isInitialized);
3336           __ mov(r0, r9);
3337           __ ldrw(r9, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()))); 
3338           __ andw(r9, r9, ConstantPoolCacheEntry::field_index_mask);
3339           __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field), r0, r9);
3340         __ bind(isInitialized);
3341         __ verify_oop(r0);
3342         __ b(Done);
3343       __ bind(isFlattened);
3344         __ ldrw(r9, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())));
3345         __ andw(r9, r9, ConstantPoolCacheEntry::field_index_mask);
3346         __ ldr(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset())));
3347         call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), r0, r9, r3);
3348         __ verify_oop(r0);
3349       __ bind(Done);
3350     }
3351     break;
3352   case Bytecodes::_fast_agetfield:
3353     do_oop_load(_masm, field, r0, IN_HEAP);
3354     __ verify_oop(r0);
3355     break;
3356   case Bytecodes::_fast_lgetfield:
3357     __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3358     break;
3359   case Bytecodes::_fast_igetfield:
3360     __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3361     break;
3362   case Bytecodes::_fast_bgetfield:
3363     __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3364     break;
3365   case Bytecodes::_fast_sgetfield:
3366     __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3367     break;
3368   case Bytecodes::_fast_cgetfield:
3369     __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3370     break;
3371   case Bytecodes::_fast_fgetfield:


3888            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), r0);
3889       __ pop(atos); // restore the return value
3890 
3891     }
3892     __ b(done);
3893   }
3894 
3895   // slow case
3896   __ bind(slow_case);
3897   __ get_constant_pool(c_rarg1);
3898   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3899   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3900   __ verify_oop(r0);
3901 
3902   // continue
3903   __ bind(done);
3904   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3905   __ membar(Assembler::StoreStore);
3906 }
3907 
3908 void TemplateTable::defaultvalue() {
3909   transition(vtos, atos);
3910   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3911   __ get_constant_pool(c_rarg1);
3912   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::defaultvalue),
3913           c_rarg1, c_rarg2);
3914   __ verify_oop(r0);
3915   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3916   __ membar(Assembler::StoreStore);
3917 }
3918 
3919 void TemplateTable::withfield() {
3920   transition(vtos, atos);
3921   resolve_cache_and_index(f2_byte, c_rarg1 /*cache*/, c_rarg2 /*index*/, sizeof(u2));
3922 
3923   // n.b. unlike x86 cache is now rcpool plus the indexed offset
3924   // so using rcpool to meet shared code expectations
3925  
3926   call_VM(r1, CAST_FROM_FN_PTR(address, InterpreterRuntime::withfield), rcpool);
3927   __ verify_oop(r1);
3928   __ add(esp, esp, r0);
3929   __ mov(r0, r1);
3930 }
3931 
3932 void TemplateTable::newarray() {
3933   transition(itos, atos);
3934   __ load_unsigned_byte(c_rarg1, at_bcp(1));
3935   __ mov(c_rarg2, r0);
3936   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3937           c_rarg1, c_rarg2);
3938   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3939   __ membar(Assembler::StoreStore);
3940 }
3941 
3942 void TemplateTable::anewarray() {
3943   transition(itos, atos);
3944   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3945   __ get_constant_pool(c_rarg1);
3946   __ mov(c_rarg3, r0);
3947   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3948           c_rarg1, c_rarg2, c_rarg3);
3949   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3950   __ membar(Assembler::StoreStore);
3951 }


3983   __ bind(quicked);
3984   __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3985   __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3986 
3987   __ bind(resolved);
3988   __ load_klass(r19, r3);
3989 
3990   // Generate subtype check.  Blows r2, r5.  Object in r3.
3991   // Superklass in r0.  Subklass in r19.
3992   __ gen_subtype_check(r19, ok_is_subtype);
3993 
3994   // Come here on failure
3995   __ push(r3);
3996   // object is at TOS
3997   __ b(Interpreter::_throw_ClassCastException_entry);
3998 
3999   // Come here on success
4000   __ bind(ok_is_subtype);
4001   __ mov(r0, r3); // Restore object in r3
4002 
4003   __ b(done);
4004   __ bind(is_null);
4005 
4006   // Collect counts on whether this test sees NULLs a lot or not.
4007   if (ProfileInterpreter) {


4008     __ profile_null_seen(r2);


4009   }
4010 
4011   if (EnableValhalla) {
4012     // Get cpool & tags index
4013     __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
4014     __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
4015      // See if bytecode has already been quicked
4016     __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
4017     __ lea(r1, Address(rscratch1, r19));
4018     __ ldarb(r1, r1); 
4019     // See if CP entry is a Q-descriptor
4020     __ andr (r1, r1, JVM_CONSTANT_QDescBit);
4021     __ cmp(r1, (u1) JVM_CONSTANT_QDescBit);
4022     __ br(Assembler::NE, done);
4023     __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
4024   }
4025 
4026   __ bind(done);
4027 }
4028 
4029 void TemplateTable::instanceof() {
4030   transition(atos, itos);
4031   Label done, is_null, ok_is_subtype, quicked, resolved;
4032   __ cbz(r0, is_null);
4033 
4034   // Get cpool & tags index
4035   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
4036   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
4037   // See if bytecode has already been quicked
4038   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
4039   __ lea(r1, Address(rscratch1, r19));
4040   __ ldarb(r1, r1);
4041   __ cmp(r1, (u1)JVM_CONSTANT_Class);
4042   __ br(Assembler::EQ, quicked);
4043 
4044   __ push(atos); // save receiver for result, and for GC
4045   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));


< prev index next >