< prev index next > src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
Print this page
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/register.hpp"
#include "atomic_aarch64.hpp"
+ #include "code/aotCodeCache.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/shared/tlab_globals.hpp"
__ align(CodeEntryAlignment);
StubGenStubId stub_id = StubGenStubId::multiplyToLen_id;
StubCodeMark mark(this, stub_id);
address start = __ pc();
+
+ if (AOTCodeCache::load_stub(this, vmIntrinsics::_multiplyToLen, "multiplyToLen", start)) {
+ return start;
+ }
const Register x = r0;
const Register xlen = r1;
const Register y = r2;
const Register ylen = r3;
const Register z = r4;
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ multiply_to_len(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(lr);
+ AOTCodeCache::store_stub(this, vmIntrinsics::_multiplyToLen, "multiplyToLen", start);
return start;
}
address generate_squareToLen() {
// squareToLen algorithm for sizes 1..127 described in java code works
__ align(CodeEntryAlignment);
StubGenStubId stub_id = StubGenStubId::squareToLen_id;
StubCodeMark mark(this, stub_id);
address start = __ pc();
+ if (AOTCodeCache::load_stub(this, vmIntrinsics::_squareToLen, "squareToLen", start)) {
+ return start;
+ }
const Register x = r0;
const Register xlen = r1;
const Register z = r2;
const Register y = r4; // == x
const Register ylen = r5; // == xlen
__ mov(ylen, xlen);
__ multiply_to_len(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
__ pop(spilled_regs, sp);
__ leave();
__ ret(lr);
+
+ AOTCodeCache::store_stub(this, vmIntrinsics::_squareToLen, "squareToLen", start);
return start;
}
address generate_mulAdd() {
__ align(CodeEntryAlignment);
StubGenStubId stub_id = StubGenStubId::mulAdd_id;
StubCodeMark mark(this, stub_id);
address start = __ pc();
+ if (AOTCodeCache::load_stub(this, vmIntrinsics::_mulAdd, "mulAdd", start)) {
+ return start;
+ }
const Register out = r0;
const Register in = r1;
const Register offset = r2;
const Register len = r3;
const Register k = r4;
__ enter();
__ mul_add(out, in, offset, len, k);
__ leave();
__ ret(lr);
+ AOTCodeCache::store_stub(this, vmIntrinsics::_mulAdd, "mulAdd", start);
return start;
}
// Arguments:
//
// t0 = sub(Pm_base, Pn_base, t0, len);
// }
};
// Initialization
+ void generate_preuniverse_stubs() {
+ // preuniverse stubs are not needed for aarch64
+ }
+
void generate_initial_stubs() {
// Generate initial stubs and initializes the entry points
// entry points that exist in all platforms Note: This is code
// that could be shared among different platforms - however the
}
public:
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
switch(blob_id) {
+ case preuniverse_id:
+ generate_preuniverse_stubs();
+ break;
case initial_id:
generate_initial_stubs();
break;
case continuation_id:
generate_continuation_stubs();
< prev index next >