< prev index next > src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
Print this page
}
return stack;
}
+ const uint SharedRuntime::java_return_convention_max_int = 1;
+ const uint SharedRuntime::java_return_convention_max_float = 1;
+ int SharedRuntime::java_return_convention(const BasicType *sig_bt,
+ VMRegPair *regs,
+ int total_args_passed) {
+ Unimplemented();
+ return 0;
+ }
+
// Patch the callers callsite with entry to compiled code if it exists.
static void patch_callers_callsite(MacroAssembler *masm) {
Label L;
__ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
__ jcc(Assembler::equal, L);
int next_off = st_off - Interpreter::stackElementSize;
__ movdbl(Address(rsp, next_off), r);
}
static void gen_c2i_adapter(MacroAssembler *masm,
- int total_args_passed,
- int comp_args_on_stack,
- const BasicType *sig_bt,
+ const GrowableArray<SigEntry>& sig_extended,
const VMRegPair *regs,
- Label& skip_fixup) {
+ Label& skip_fixup,
+ address start,
+ OopMapSet*& oop_maps,
+ int& frame_complete,
+ int& frame_size_in_words) {
// Before we get into the guts of the C2I adapter, see if we should be here
// at all. We've come from compiled code and are attempting to jump to the
// interpreter, which means the caller made a static call to get here
// (vcalls always get a compiled target if there is one). Check for a
// compiled target. If there is one, we need to patch the caller's call.
#endif /* COMPILER2 */
// Since all args are passed on the stack, total_args_passed * interpreter_
// stack_element_size is the
// space we need.
- int extraspace = total_args_passed * Interpreter::stackElementSize;
+ int extraspace = sig_extended.length() * Interpreter::stackElementSize;
// Get return address
__ pop(rax);
// set senderSP value
__ movptr(rsi, rsp);
__ subptr(rsp, extraspace);
// Now write the args into the outgoing interpreter space
- for (int i = 0; i < total_args_passed; i++) {
- if (sig_bt[i] == T_VOID) {
- assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
+ for (int i = 0; i < sig_extended.length(); i++) {
+ if (sig_extended.at(i)._bt == T_VOID) {
+ assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
continue;
}
// st_off points to lowest address on stack.
- int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
+ int st_off = ((sig_extended.length() - 1) - i) * Interpreter::stackElementSize;
int next_off = st_off - Interpreter::stackElementSize;
// Say 4 args:
// i st_off
// 0 12 T_LONG
} else {
assert(r_1->is_XMMRegister(), "");
if (!r_2->is_valid()) {
__ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
} else {
- assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
+ assert(sig_extended.at(i)._bt == T_DOUBLE || sig_extended.at(i)._bt == T_LONG, "wrong type");
move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
}
}
}
__ jcc(Assembler::below, L_ok);
__ bind(L_fail);
}
void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
- int total_args_passed,
int comp_args_on_stack,
- const BasicType *sig_bt,
+ const GrowableArray<SigEntry>& sig_extended,
const VMRegPair *regs) {
+
// Note: rsi contains the senderSP on entry. We must preserve it since
// we may do a i2c -> c2i transition if we lose a race where compiled
// code goes non-entrant while we get args ready.
// Adapters can be frameless because they do not require the caller
// Pre-load the register-jump target early, to schedule it better.
__ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
// Now generate the shuffle code. Pick up all register args and move the
// rest through the floating point stack top.
- for (int i = 0; i < total_args_passed; i++) {
- if (sig_bt[i] == T_VOID) {
+ for (int i = 0; i < sig_extended.length(); i++) {
+ if (sig_extended.at(i)._bt == T_VOID) {
// Longs and doubles are passed in native word order, but misaligned
// in the 32-bit build.
- assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
+ assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
continue;
}
// Pick up 0, 1 or 2 words from SP+offset.
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
"scrambled load targets?");
// Load in argument order going down.
- int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
+ int ld_off = (sig_extended.length() - i) * Interpreter::stackElementSize;
// Point to interpreter value (vs. tag)
int next_off = ld_off - Interpreter::stackElementSize;
//
//
//
__ jmp(rdi);
}
// ---------------------------------------------------------------
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
- int total_args_passed,
int comp_args_on_stack,
- const BasicType *sig_bt,
+ const GrowableArray<SigEntry>& sig_extended,
const VMRegPair *regs,
- AdapterFingerPrint* fingerprint) {
+ AdapterFingerPrint* fingerprint,
+ AdapterBlob*& new_adapter) {
address i2c_entry = __ pc();
- gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
+ gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs);
// -------------------------------------------------------------------------
// Generate a C2I adapter. On entry we know rbx, holds the Method* during calls
// to the interpreter. The args start out packed in the compiled layout. They
// need to be unpacked into the interpreter layout. This will almost always
address c2i_entry = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
- gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
+ OopMapSet* oop_maps = nullptr;
+ int frame_complete = CodeOffsets::frame_never_safe;
+ int frame_size_in_words = 0;
+ gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
+ new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps);
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
// return the blob
// frame_size_words or bytes??
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}
+ BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
+ Unimplemented();
+ return nullptr;
+ }
+
//------------------------------------------------------------------------------------------------------------------------
// Continuation point for throwing of implicit exceptions that are not handled in
// the current activation. Fabricates an exception oop and initiates normal
// exception dispatching in this frame.
//
< prev index next >