< prev index next > src/hotspot/share/opto/output.cpp
Print this page
#include "compiler/compileBroker.hpp"
#include "compiler/compilerDirectives.hpp"
#include "compiler/disassembler.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSet.hpp"
+ #include "gc/shared/gc_globals.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/allocation.hpp"
#include "opto/ad.hpp"
#include "opto/block.hpp"
_buf_sizes(),
_block(nullptr),
_index(0) {
C->set_output(this);
if (C->stub_name() == nullptr) {
! _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);
}
}
PhaseOutput::~PhaseOutput() {
C->set_output(nullptr);
_buf_sizes(),
_block(nullptr),
_index(0) {
C->set_output(this);
if (C->stub_name() == nullptr) {
! int fixed_slots = C->fixed_slots();
+ if (C->needs_stack_repair()) {
+ fixed_slots -= 2;
+ }
+ // TODO 8284443 Only reserve extra slot if needed
+ if (InlineTypeReturnedAsFields) {
+ fixed_slots -= 2;
+ }
+ _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
}
}
PhaseOutput::~PhaseOutput() {
C->set_output(nullptr);
Block *broot = C->cfg()->get_root_block();
const StartNode *start = entry->head()->as_Start();
// Replace StartNode with prolog
! MachPrologNode *prolog = new MachPrologNode();
entry->map_node(prolog, 0);
C->cfg()->map_node_to_block(prolog, entry);
C->cfg()->unmap_node_from_block(start); // start is no longer in any block
// Virtual methods need an unverified entry point
!
! if( C->is_osr_compilation() ) {
- if( PoisonOSREntry ) {
// TODO: Should use a ShouldNotReachHereNode...
C->cfg()->insert( broot, 0, new MachBreakpointNode() );
}
} else {
! if( C->method() && !C->method()->flags().is_static() ) {
! // Insert unvalidated entry point
! C->cfg()->insert( broot, 0, new MachUEPNode() );
}
-
}
// Break before main entry point
if ((C->method() && C->directive()->BreakAtExecuteOption) ||
(OptoBreakpoint && C->is_method_compilation()) ||
Block *broot = C->cfg()->get_root_block();
const StartNode *start = entry->head()->as_Start();
// Replace StartNode with prolog
! Label verified_entry;
+ MachPrologNode* prolog = new MachPrologNode(&verified_entry);
entry->map_node(prolog, 0);
C->cfg()->map_node_to_block(prolog, entry);
C->cfg()->unmap_node_from_block(start); // start is no longer in any block
// Virtual methods need an unverified entry point
! if (C->is_osr_compilation()) {
! if (PoisonOSREntry) {
// TODO: Should use a ShouldNotReachHereNode...
C->cfg()->insert( broot, 0, new MachBreakpointNode() );
}
} else {
! if (C->method()) {
! if (C->method()->has_scalarized_args()) {
! // Add entry point to unpack all inline type arguments
+ C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
+ if (!C->method()->is_static()) {
+ // Add verified/unverified entry points to only unpack inline type receiver at interface calls
+ C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
+ C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ true));
+ C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
+ }
+ } else if (!C->method()->is_static()) {
+ // Insert unvalidated entry point
+ C->cfg()->insert(broot, 0, new MachUEPNode());
+ }
}
}
// Break before main entry point
if ((C->method() && C->directive()->BreakAtExecuteOption) ||
(OptoBreakpoint && C->is_method_compilation()) ||
// Must be done before ScheduleAndBundle due to SPARC delay slots
uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
blk_starts[0] = 0;
shorten_branches(blk_starts);
+ if (!C->is_osr_compilation() && C->has_scalarized_args()) {
+ // Compute the offsets of the entry points required by the inline type calling convention
+ if (!C->method()->is_static()) {
+ // We have entries at the beginning of the method, implemented by the first 4 nodes.
+ // Entry (unverified) @ offset 0
+ // Verified_Inline_Entry_RO
+ // Inline_Entry (unverified)
+ // Verified_Inline_Entry
+ uint offset = 0;
+ _code_offsets.set_value(CodeOffsets::Entry, offset);
+
+ offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
+ _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
+
+ offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
+ _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
+
+ offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
+ _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
+ } else {
+ _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
+ _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
+ }
+ }
+
ScheduleAndBundle();
if (C->failing()) {
return;
}
reloc_size += CallStubImpl::reloc_call_trampoline();
MachCallNode *mcall = mach->as_MachCall();
// This destination address is NOT PC-relative
! mcall->method_set((intptr_t)mcall->entry_point());
if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
stub_size += CompiledDirectCall::to_interp_stub_size();
reloc_size += CompiledDirectCall::reloc_to_interp_stub();
}
reloc_size += CallStubImpl::reloc_call_trampoline();
MachCallNode *mcall = mach->as_MachCall();
// This destination address is NOT PC-relative
! if (mcall->entry_point() != nullptr) {
+ mcall->method_set((intptr_t)mcall->entry_point());
+ }
if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
stub_size += CompiledDirectCall::to_interp_stub_size();
reloc_size += CompiledDirectCall::reloc_to_interp_stub();
}
ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
if (sv == nullptr) {
ciKlass* cik = t->is_oopptr()->exact_klass();
assert(cik->is_instance_klass() ||
cik->is_array_klass(), "Not supported allocation.");
sv = new ObjectValue(spobj->_idx,
! new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
set_sv_for_object_node(objs, sv);
- uint first_ind = spobj->first_index(sfpt->jvms());
for (uint i = 0; i < spobj->n_fields(); i++) {
Node* fld_node = sfpt->in(first_ind+i);
(void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
}
}
ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
if (sv == nullptr) {
ciKlass* cik = t->is_oopptr()->exact_klass();
assert(cik->is_instance_klass() ||
cik->is_array_klass(), "Not supported allocation.");
+ uint first_ind = spobj->first_index(sfpt->jvms());
+ // Nullable, scalarized inline types have an is_init input
+ // that needs to be checked before using the field values.
+ ScopeValue* is_init = nullptr;
+ if (cik->is_inlinetype()) {
+ Node* init_node = sfpt->in(first_ind++);
+ assert(init_node != nullptr, "is_init node not found");
+ if (!init_node->is_top()) {
+ const TypeInt* init_type = init_node->bottom_type()->is_int();
+ if (init_node->is_Con()) {
+ is_init = new ConstantIntValue(init_type->get_con());
+ } else {
+ OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
+ is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
+ }
+ }
+ }
sv = new ObjectValue(spobj->_idx,
! new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, is_init);
set_sv_for_object_node(objs, sv);
for (uint i = 0; i < spobj->n_fields(); i++) {
Node* fld_node = sfpt->in(first_ind+i);
(void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
}
}
MachCallNode *mcall;
int safepoint_pc_offset = current_offset;
bool is_method_handle_invoke = false;
bool return_oop = false;
+ bool return_scalarized = false;
bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
bool arg_escape = false;
// Add the safepoint in the DebugInfoRecorder
if( !mach->is_MachCall() ) {
}
arg_escape = mcall->as_MachCallJava()->_arg_escape;
}
// Check if a call returns an object.
! if (mcall->returns_pointer()) {
return_oop = true;
}
safepoint_pc_offset += mcall->ret_addr_offset();
C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
}
// Loop over the JVMState list to add scope information
}
arg_escape = mcall->as_MachCallJava()->_arg_escape;
}
// Check if a call returns an object.
! if (mcall->returns_pointer() || mcall->returns_scalarized()) {
return_oop = true;
}
+ if (mcall->returns_scalarized()) {
+ return_scalarized = true;
+ }
safepoint_pc_offset += mcall->ret_addr_offset();
C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
}
// Loop over the JVMState list to add scope information
jvms->bci(),
jvms->should_reexecute(),
rethrow_exception,
is_method_handle_invoke,
return_oop,
+ return_scalarized,
has_ea_local_in_scope,
arg_escape,
locvals,
expvals,
monvals
bool observe_safepoint = is_sfn;
// Remember the start of the last call in a basic block
if (is_mcall) {
MachCallNode *mcall = mach->as_MachCall();
! // This destination address is NOT PC-relative
! mcall->method_set((intptr_t)mcall->entry_point());
// Save the return address
call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
observe_safepoint = mcall->guaranteed_safepoint();
bool observe_safepoint = is_sfn;
// Remember the start of the last call in a basic block
if (is_mcall) {
MachCallNode *mcall = mach->as_MachCall();
! if (mcall->entry_point() != nullptr) {
! // This destination address is NOT PC-relative
+ mcall->method_set((intptr_t)mcall->entry_point());
+ }
// Save the return address
call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
observe_safepoint = mcall->guaranteed_safepoint();
return;
}
assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
"ret_addr_offset() not within emitted code");
-
#ifdef ASSERT
uint n_size = n->size(C->regalloc());
if (n_size < (current_offset-instr_offset)) {
MachNode* mach = n->as_Mach();
n->dump();
t->is_ptr()->offset() != 0 ) {
last_safept_node->add_prec( m );
break;
}
}
+
+ // Do not allow a CheckCastPP node whose input is a raw pointer to
+ // float past a safepoint. This can occur when a buffered inline
+ // type is allocated in a loop and the CheckCastPP from that
+ // allocation is reused outside the loop. If the use inside the
+ // loop is scalarized the CheckCastPP will no longer be connected
+ // to the loop safepoint. See JDK-8264340.
+ if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
+ Node *def = m->in(1);
+ if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
+ last_safept_node->add_prec(m);
+ }
+ }
}
if( n->jvms() ) { // Precedence edge from derived to safept
// Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
if( b->get_node(last_safept) != last_safept_node ) {
}
ResourceMark rm;
_scratch_const_size = const_size;
int size = C2Compiler::initial_code_buffer_size(const_size);
+ if (C->has_scalarized_args()) {
+ // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
+ // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
+ ciMethod* method = C->method();
+ int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
+ int arg_num = 0;
+ if (!method->is_static()) {
+ if (method->is_scalarized_arg(arg_num)) {
+ size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
+ }
+ arg_num++;
+ }
+ for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
+ if (method->is_scalarized_arg(arg_num)) {
+ size += str.type()->as_inline_klass()->oop_count() * barrier_size;
+ }
+ arg_num++;
+ }
+ }
blob = BufferBlob::create("Compile::scratch_buffer", size);
// Record the buffer blob for next time.
set_scratch_buffer_blob(blob);
// Have we run out of code space?
if (scratch_buffer_blob() == nullptr) {
n->emit(&masm, C->regalloc());
// Emitting into the scratch buffer should not fail
assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
! if (is_branch) // Restore label.
n->as_MachBranch()->label_set(saveL, save_bnum);
// End scratch_emit_size section.
set_in_scratch_emit_size(false);
return buf.insts_size();
n->emit(&masm, C->regalloc());
// Emitting into the scratch buffer should not fail
assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
! // Restore label.
+ if (is_branch) {
n->as_MachBranch()->label_set(saveL, save_bnum);
+ }
// End scratch_emit_size section.
set_in_scratch_emit_size(false);
return buf.insts_size();
if (C->is_osr_compilation()) {
_code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
_code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
} else {
- if (!target->is_static()) {
- // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
- // before the inline cache check, so we don't have to execute any nop instructions when dispatching
- // through the UEP, yet we can ensure that the VEP is aligned appropriately.
- _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
- }
_code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
_code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
}
C->env()->register_method(target,
! entry_bci,
! &_code_offsets,
! _orig_pc_slot_offset_in_bytes,
! code_buffer(),
! frame_size_in_words(),
! oop_map_set(),
! &_handler_table,
! inc_table(),
! compiler,
! has_unsafe_access,
! SharedRuntime::is_wide_vector(C->max_vector_size()),
! C->has_monitors(),
! C->has_scoped_access(),
! 0);
if (C->log() != nullptr) { // Print code cache state into compiler log
C->log()->code_cache_state();
}
}
if (C->is_osr_compilation()) {
_code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
_code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
} else {
_code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
+ if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
+ _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
+ }
+ if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
+ _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
+ }
+ if (_code_offsets.value(CodeOffsets::Entry) == -1) {
+ _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
+ }
_code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
}
C->env()->register_method(target,
! entry_bci,
! &_code_offsets,
! _orig_pc_slot_offset_in_bytes,
! code_buffer(),
! frame_size_in_words(),
! _oop_map_set,
! &_handler_table,
! inc_table(),
! compiler,
! has_unsafe_access,
! SharedRuntime::is_wide_vector(C->max_vector_size()),
! C->has_monitors(),
! C->has_scoped_access(),
! 0);
if (C->log() != nullptr) { // Print code cache state into compiler log
C->log()->code_cache_state();
}
}
< prev index next >