< prev index next > src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
Print this page
*/
#include "asm/macroAssembler.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/vmIntrinsics.hpp"
+ #include "code/aotCodeCache.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/gc_globals.hpp"
__ align(CodeEntryAlignment);
StubGenStubId stub_id = StubGenStubId::multiplyToLen_id;
StubCodeMark mark(this, stub_id);
address start = __ pc();
+ if (AOTCodeCache::load_stub(this, vmIntrinsics::_multiplyToLen, "multiplyToLen", start)) {
+ return start;
+ }
+
// Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
// Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
const Register x = rdi;
const Register xlen = rax;
const Register y = rsi;
restore_arg_regs();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
+ AOTCodeCache::store_stub(this, vmIntrinsics::_multiplyToLen, "multiplyToLen", start);
return start;
}
/**
* Arguments:
__ align(CodeEntryAlignment);
StubGenStubId stub_id = StubGenStubId::squareToLen_id;
StubCodeMark mark(this, stub_id);
address start = __ pc();
+ if (AOTCodeCache::load_stub(this, vmIntrinsics::_squareToLen, "squareToLen", start)) {
+ return start;
+ }
+
// Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
// Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...)
const Register x = rdi;
const Register len = rsi;
const Register z = r8;
restore_arg_regs();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
+ AOTCodeCache::store_stub(this, vmIntrinsics::_squareToLen, "squareToLen", start);
return start;
}
address StubGenerator::generate_method_entry_barrier() {
__ align(CodeEntryAlignment);
__ align(CodeEntryAlignment);
StubGenStubId stub_id = StubGenStubId::mulAdd_id;
StubCodeMark mark(this, stub_id);
address start = __ pc();
+ if (AOTCodeCache::load_stub(this, vmIntrinsics::_mulAdd, "mulAdd", start)) {
+ return start;
+ }
+
// Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
// Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
const Register out = rdi;
const Register in = rsi;
const Register offset = r11;
restore_arg_regs();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
+ AOTCodeCache::store_stub(this, vmIntrinsics::_mulAdd, "mulAdd", start);
return start;
}
address StubGenerator::generate_bigIntegerRightShift() {
__ align(CodeEntryAlignment);
// Round to zero, 64-bit mode, exceptions masked, flags specialized
StubRoutines::x86::_mxcsr_rz = EnableX86ECoreOpts ? 0x7FBF : 0x7F80;
}
// Initialization
+ void StubGenerator::generate_preuniverse_stubs() {
+ // atomic calls
+ StubRoutines::_fence_entry = generate_orderaccess_fence();
+ }
+
void StubGenerator::generate_initial_stubs() {
// Generates all stubs and initializes the entry points
// This platform-specific settings are needed by generate_call_stub()
create_control_words();
generate_call_stub(StubRoutines::_call_stub_return_address);
// is referenced by megamorphic call
StubRoutines::_catch_exception_entry = generate_catch_exception();
- // atomic calls
- StubRoutines::_fence_entry = generate_orderaccess_fence();
-
// platform dependent
StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
#endif // COMPILER2_OR_JVMCI
}
StubGenerator::StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
switch(blob_id) {
+ case preuniverse_id:
+ generate_preuniverse_stubs();
+ break;
case initial_id:
generate_initial_stubs();
break;
case continuation_id:
generate_continuation_stubs();
< prev index next >