< prev index next > src/hotspot/share/opto/graphKit.cpp
Print this page
// Update the two tail pointers in parallel.
out_jvms = out_jvms->caller();
in_jvms = in_jvms->caller();
}
+ PEAState& as = youngest_jvms->alloc_state();
+ backfill_materialized(call, TypeFunc::Parms, call->req(), as);
assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
// Test the correctness of JVMState::debug_xxx accessors:
assert(call->jvms()->debug_start() == non_debug_edges, "");
assert(call->jvms()->debug_end() == call->req(), "");
}
#endif
return flock;
}
+ // Clone LockNode for PEA materialization.
+ // LockNode is a safepoint, so it's location sensitive. We can't just clone it.
+ // We create it using the current JVMState and mark bytecode non-reexecute.
+ //
+ // It is worthy noting that we reuse BoxNode. It represents the slot on stack.
+ // PhaseMacroExpand::mark_eliminated_box() can't eliminate BoxLockNode and its
+ // associated AbstractLockNodes until share prove that the 'eliminated' BoxLockNode
+ // is exclusive.
+ void GraphKit::clone_shared_lock(Node* box, Node* obj) {
+ kill_dead_locals();
+
+ Node* mem = reset_memory();
+
+ FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
+ const TypeFunc *tf = LockNode::lock_type();
+ LockNode *lock = new LockNode(C, tf);
+
+ lock->set_req(TypeFunc::Control, control());
+ lock->set_req(TypeFunc::Memory , mem);
+ lock->set_req(TypeFunc::I_O , top()) ; // does no i/o
+ lock->set_req(TypeFunc::FramePtr, frameptr());
+ lock->set_req(TypeFunc::ReturnAdr, top());
+ lock->set_req(TypeFunc::Parms + 0, obj);
+ lock->set_req(TypeFunc::Parms + 1, box);
+ lock->set_req(TypeFunc::Parms + 2, flock);
+
+ // we can't reexecute current bc. it's not monitorenter.
+ jvms()->set_should_reexecute(false);
+ add_safepoint_edges(lock);
+
+ lock = _gvn.transform( lock )->as_Lock();
+
+ // lock has no side-effects, sets few values
+ set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
+
+ insert_mem_bar(Op_MemBarAcquireLock);
+
+ // Add this to the worklist so that the lock can be eliminated
+ record_for_igvn(lock);
+ }
//------------------------------shared_unlock----------------------------------
// Emit unlocking code.
- void GraphKit::shared_unlock(Node* box, Node* obj) {
+ void GraphKit::shared_unlock(Node* box, Node* obj, bool preserve_monitor) {
// bci is either a monitorenter bc or InvocationEntryBci
// %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
assert(SynchronizationEntryBCI == InvocationEntryBci, "");
if( !GenerateSynchronizationCode )
return;
if (stopped()) { // Dead monitor?
- map()->pop_monitor(); // Kill monitor from debug info
+ if (!preserve_monitor) {
+ map()->pop_monitor(); // Kill monitor from debug info
+ }
return;
}
// Memory barrier to avoid floating things down past the locked region
insert_mem_bar(Op_MemBarReleaseLock);
Node* mem = reset_memory();
// unlock has no side-effects, sets few values
set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
-
- // Kill monitor from debug info
- map()->pop_monitor( );
+ if (!preserve_monitor) {
+ // Kill monitor from debug info
+ map()->pop_monitor();
+ }
}
//-------------------------------get_layout_helper-----------------------------
// If the given klass is a constant or known to be an array,
// fetch the constant layout helper value into constant_value
Node* prevmem = kit.memory(alias_idx);
init_in_merge->set_memory_at(alias_idx, prevmem);
kit.set_memory(init_out_raw, alias_idx);
}
-
- //---------------------------set_output_for_allocation-------------------------
- Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
+ Node* GraphKit::set_output_for_allocation_common(AllocateNode* alloc,
const TypeOopPtr* oop_type,
bool deoptimize_on_exception) {
int rawidx = Compile::AliasIdxRaw;
- alloc->set_req( TypeFunc::FramePtr, frameptr() );
- add_safepoint_edges(alloc);
Node* allocx = _gvn.transform(alloc);
set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
// create memory projection for i_o
set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
#endif //ASSERT
return javaoop;
}
+ //---------------------------set_output_for_allocation-------------------------
+ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
+ const TypeOopPtr* oop_type,
+ bool deoptimize_on_exception) {
+ alloc->set_req( TypeFunc::FramePtr, frameptr() );
+ add_safepoint_edges(alloc);
+ return set_output_for_allocation_common(alloc, oop_type, deoptimize_on_exception);
+ }
+
+ //
+ // Position-Agnostic Materialization
+ // -------------------------------------
+ // When PEA materializes a virtual object, it emits a cluster of nodes in the current position.
+ // Unlike ordinary "floating" nodes, an AllocateNode is a subclass of SafePointNode so it is
+ // dependent on JVMState. The JVMState of current position may not fit for an AllocateNode.
+ //
+ // To ensure we can safely embed nodes into the curren position, we have the following
+ // measures:
+ //
+ // 1. Debug edges and JVMState of the cloned AllocateNode are not from current GraphKit.
+ // We copy them from the original AllocateNode instead.
+ //
+ // 2. Choose deoptimization on exception. A real exception may be mistakenly dispatched to
+ // the exception handler in current context.
+ //
+ Node* GraphKit::materialize_object(AllocateNode* alloc, const TypeOopPtr* oop_type) {
+ Node *mem = reset_memory();
+ AllocateNode* allocx = new AllocateNode(C, alloc->tf(), control(), mem, i_o(),
+ alloc->in(AllocateNode::AllocSize),
+ alloc->in(AllocateNode::KlassNode),
+ alloc->in(AllocateNode::InitialTest));
+ allocx->set_req(TypeFunc::FramePtr, frameptr());
+
+ JVMState* out_jvms = alloc->jvms()->clone_shallow(C);
+ out_jvms->bind_map(allocx);
+ // copy all debuginfo edges from the original AllocateNode
+ for (uint i=allocx->req(); i < alloc->req(); ++i) {
+ allocx->add_req(alloc->in(i));
+ }
+
+ JVMState* jvms = sync_jvms();
+ // We can not use PreserveJVMState here because 'this' is a Parse. We would fail in jvms_in_sync().
+ GraphKit kit(allocx->jvms());
+ kit.set_map_clone(allocx);
+
+ Node* objx = kit.set_output_for_allocation_common(allocx, oop_type, true /*deoptimize_on_ex*/);
+
+ // copy back compile-time state to 'this'.
+ set_jvms(jvms);
+ set_control(kit.control());
+ set_i_o(kit.i_o());
+ set_all_memory(kit.merged_memory());
+
+ return objx;
+ }
+
//---------------------------new_instance--------------------------------------
// This routine takes a klass_node which may be constant (for a static type)
// or may be non-constant (for reflective code). It will work equally well
// for either, and the graph will fold nicely if the optimizer later reduces
// the type to a constant.
if (con_type != nullptr) {
return makecon(con_type);
}
return nullptr;
}
+
+ void GraphKit::backfill_materialized(SafePointNode* map, uint begin, uint end, PEAState& as){
+ bool printed = false;
+
+ for (uint i = begin; i < end; ++i) {
+ Node* t = map->in(i);
+
+ if (t != nullptr && t->is_CheckCastPP()) {
+ AllocateNode* alloc = AllocateNode::Ideal_allocation(t);
+
+ if (as.contains(alloc)) {
+ Node* neww = as.get_materialized_value(alloc);
+ if (neww != nullptr && neww != t) {
+ #ifndef PRODUCT
+ if (PEAVerbose) {
+ if (!printed) {
+ map->dump();
+ printed = true;
+ }
+ tty->print_cr("[PEA] replace %d with node %d", i, neww->_idx);
+ }
+ #endif
+ map->set_req(i, neww);
+ }
+ }
+ }
+ }
+ }
< prev index next >