< prev index next >

src/hotspot/cpu/aarch64/templateTable_aarch64.cpp

Print this page

        

*** 1476,1486 **** __ fdivs(v0, v1, v0); break; case rem: __ fmovs(v1, v0); __ pop_f(v0); ! __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); break; default: ShouldNotReachHere(); break; } --- 1476,1487 ---- __ fdivs(v0, v1, v0); break; case rem: __ fmovs(v1, v0); __ pop_f(v0); ! __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::frem), ! 0, 2, MacroAssembler::ret_type_float); break; default: ShouldNotReachHere(); break; }
*** 1508,1518 **** __ fdivd(v0, v1, v0); break; case rem: __ fmovd(v1, v0); __ pop_d(v0); ! __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); break; default: ShouldNotReachHere(); break; } --- 1509,1520 ---- __ fdivd(v0, v1, v0); break; case rem: __ fmovd(v1, v0); __ pop_d(v0); ! __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::drem), ! 0, 2, MacroAssembler::ret_type_double); break; default: ShouldNotReachHere(); break; }
*** 1649,1670 **** Label L_Okay; __ clear_fpsr(); __ fcvtzsw(r0, v0); __ get_fpsr(r1); __ cbzw(r1, L_Okay); ! __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i)); __ bind(L_Okay); } break; case Bytecodes::_f2l: { Label L_Okay; __ clear_fpsr(); __ fcvtzs(r0, v0); __ get_fpsr(r1); __ cbzw(r1, L_Okay); ! __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); __ bind(L_Okay); } break; case Bytecodes::_f2d: __ fcvts(v0, v0); --- 1651,1674 ---- Label L_Okay; __ clear_fpsr(); __ fcvtzsw(r0, v0); __ get_fpsr(r1); __ cbzw(r1, L_Okay); ! __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), ! 0, 1, MacroAssembler::ret_type_integral); __ bind(L_Okay); } break; case Bytecodes::_f2l: { Label L_Okay; __ clear_fpsr(); __ fcvtzs(r0, v0); __ get_fpsr(r1); __ cbzw(r1, L_Okay); ! __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), ! 0, 1, MacroAssembler::ret_type_integral); __ bind(L_Okay); } break; case Bytecodes::_f2d: __ fcvts(v0, v0);
*** 1674,1695 **** Label L_Okay; __ clear_fpsr(); __ fcvtzdw(r0, v0); __ get_fpsr(r1); __ cbzw(r1, L_Okay); ! __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); __ bind(L_Okay); } break; case Bytecodes::_d2l: { Label L_Okay; __ clear_fpsr(); __ fcvtzd(r0, v0); __ get_fpsr(r1); __ cbzw(r1, L_Okay); ! __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); __ bind(L_Okay); } break; case Bytecodes::_d2f: __ fcvtd(v0, v0); --- 1678,1701 ---- Label L_Okay; __ clear_fpsr(); __ fcvtzdw(r0, v0); __ get_fpsr(r1); __ cbzw(r1, L_Okay); ! __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), ! 0, 1, MacroAssembler::ret_type_integral); __ bind(L_Okay); } break; case Bytecodes::_d2l: { Label L_Okay; __ clear_fpsr(); __ fcvtzd(r0, v0); __ get_fpsr(r1); __ cbzw(r1, L_Okay); ! __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), ! 0, 1, MacroAssembler::ret_type_integral); __ bind(L_Okay); } break; case Bytecodes::_d2f: __ fcvtd(v0, v0);
*** 2315,2325 **** Register index, size_t index_size) { const Register temp = r19; assert_different_registers(Rcache, index, temp); ! Label resolved, clinit_barrier_slow; Bytecodes::Code code = bytecode(); switch (code) { case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; --- 2321,2331 ---- Register index, size_t index_size) { const Register temp = r19; assert_different_registers(Rcache, index, temp); ! Label resolved; Bytecodes::Code code = bytecode(); switch (code) { case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
*** 2330,2357 **** __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); __ subs(zr, temp, (int) code); // have we resolved this bytecode? __ br(Assembler::EQ, resolved); // resolve first time through - // Class initialization barrier slow path lands here as well. - __ bind(clinit_barrier_slow); address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); __ mov(temp, (int) code); __ call_VM(noreg, entry, temp); // Update registers with resolved info __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); // n.b. unlike x86 Rcache is now rcpool plus the indexed offset // so all clients ofthis method must be modified accordingly __ bind(resolved); - - // Class initialization barrier for static methods - if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) { - __ load_resolved_method_at_index(byte_no, temp, Rcache); - __ load_method_holder(temp, temp); - __ clinit_barrier(temp, rscratch1, NULL, &clinit_barrier_slow); - } } // The Rcache and index registers must be set before call // n.b unlike x86 cache already includes the index offset void TemplateTable::load_field_cp_cache_entry(Register obj, --- 2336,2354 ----
*** 3419,3430 **** // profile this call __ profile_virtual_call(r3, r13, r19); // Get declaring interface class from method, and itable index ! ! __ load_method_holder(r0, rmethod); __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset())); __ subw(rmethod, rmethod, Method::itable_index_max); __ negw(rmethod, rmethod); // Preserve recvKlass for throw_AbstractMethodErrorVerbose. --- 3416,3428 ---- // profile this call __ profile_virtual_call(r3, r13, r19); // Get declaring interface class from method, and itable index ! __ ldr(r0, Address(rmethod, Method::const_offset())); ! __ ldr(r0, Address(r0, ConstMethod::constants_offset())); ! __ ldr(r0, Address(r0, ConstantPool::pool_holder_offset_in_bytes())); __ ldrw(rmethod, Address(rmethod, Method::itable_index_offset())); __ subw(rmethod, rmethod, Method::itable_index_max); __ negw(rmethod, rmethod); // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
*** 3613,3623 **** // initialize object header only. __ bind(initialize_header); if (UseBiasedLocking) { __ ldr(rscratch1, Address(r4, Klass::prototype_header_offset())); } else { ! __ mov(rscratch1, (intptr_t)markWord::prototype().value()); } __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); __ store_klass_gap(r0, zr); // zero klass gap for compressed oops __ store_klass(r0, r4); // store klass last --- 3611,3621 ---- // initialize object header only. __ bind(initialize_header); if (UseBiasedLocking) { __ ldr(rscratch1, Address(r4, Klass::prototype_header_offset())); } else { ! __ mov(rscratch1, (intptr_t)markOopDesc::prototype()); } __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); __ store_klass_gap(r0, zr); // zero klass gap for compressed oops __ store_klass(r0, r4); // store klass last
< prev index next >