< prev index next >

src/share/vm/opto/memnode.cpp

Print this page

        

*** 36,45 **** --- 36,50 ---- #include "opto/matcher.hpp" #include "opto/memnode.hpp" #include "opto/mulnode.hpp" #include "opto/phaseX.hpp" #include "opto/regmask.hpp" + #if INCLUDE_ALL_GCS + #include "gc_implementation/shenandoah/shenandoahForwarding.hpp" + #include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" + #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" + #endif // Portions of code courtesy of Clifford Click // Optimization - Graph Style
*** 670,889 **** #endif return tp; } } - //------------------------adr_phi_is_loop_invariant---------------------------- - // A helper function for Ideal_DU_postCCP to check if a Phi in a counted - // loop is loop invariant. Make a quick traversal of Phi and associated - // CastPP nodes, looking to see if they are a closed group within the loop. - bool MemNode::adr_phi_is_loop_invariant(Node* adr_phi, Node* cast) { - // The idea is that the phi-nest must boil down to only CastPP nodes - // with the same data. This implies that any path into the loop already - // includes such a CastPP, and so the original cast, whatever its input, - // must be covered by an equivalent cast, with an earlier control input. - ResourceMark rm; - - // The loop entry input of the phi should be the unique dominating - // node for every Phi/CastPP in the loop. - Unique_Node_List closure; - closure.push(adr_phi->in(LoopNode::EntryControl)); - - // Add the phi node and the cast to the worklist. - Unique_Node_List worklist; - worklist.push(adr_phi); - if( cast != NULL ){ - if( !cast->is_ConstraintCast() ) return false; - worklist.push(cast); - } - - // Begin recursive walk of phi nodes. - while( worklist.size() ){ - // Take a node off the worklist - Node *n = worklist.pop(); - if( !closure.member(n) ){ - // Add it to the closure. - closure.push(n); - // Make a sanity check to ensure we don't waste too much time here. - if( closure.size() > 20) return false; - // This node is OK if: - // - it is a cast of an identical value - // - or it is a phi node (then we add its inputs to the worklist) - // Otherwise, the node is not OK, and we presume the cast is not invariant - if( n->is_ConstraintCast() ){ - worklist.push(n->in(1)); - } else if( n->is_Phi() ) { - for( uint i = 1; i < n->req(); i++ ) { - worklist.push(n->in(i)); - } - } else { - return false; - } - } - } - - // Quit when the worklist is empty, and we've found no offending nodes. - return true; - } - - //------------------------------Ideal_DU_postCCP------------------------------- - // Find any cast-away of null-ness and keep its control. Null cast-aways are - // going away in this pass and we need to make this memory op depend on the - // gating null check. - Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { - return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address)); - } - - // I tried to leave the CastPP's in. This makes the graph more accurate in - // some sense; we get to keep around the knowledge that an oop is not-null - // after some test. Alas, the CastPP's interfere with GVN (some values are - // the regular oop, some are the CastPP of the oop, all merge at Phi's which - // cannot collapse, etc). This cost us 10% on SpecJVM, even when I removed - // some of the more trivial cases in the optimizer. Removing more useless - // Phi's started allowing Loads to illegally float above null checks. I gave - // up on this approach. CNC 10/20/2000 - // This static method may be called not from MemNode (EncodePNode calls it). - // Only the control edge of the node 'n' might be updated. - Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) { - Node *skipped_cast = NULL; - // Need a null check? Regular static accesses do not because they are - // from constant addresses. Array ops are gated by the range check (which - // always includes a NULL check). Just check field ops. - if( n->in(MemNode::Control) == NULL ) { - // Scan upwards for the highest location we can place this memory op. - while( true ) { - switch( adr->Opcode() ) { - - case Op_AddP: // No change to NULL-ness, so peek thru AddP's - adr = adr->in(AddPNode::Base); - continue; - - case Op_DecodeN: // No change to NULL-ness, so peek thru - case Op_DecodeNKlass: - adr = adr->in(1); - continue; - - case Op_EncodeP: - case Op_EncodePKlass: - // EncodeP node's control edge could be set by this method - // when EncodeP node depends on CastPP node. - // - // Use its control edge for memory op because EncodeP may go away - // later when it is folded with following or preceding DecodeN node. - if (adr->in(0) == NULL) { - // Keep looking for cast nodes. - adr = adr->in(1); - continue; - } - ccp->hash_delete(n); - n->set_req(MemNode::Control, adr->in(0)); - ccp->hash_insert(n); - return n; - - case Op_CastPP: - // If the CastPP is useless, just peek on through it. - if( ccp->type(adr) == ccp->type(adr->in(1)) ) { - // Remember the cast that we've peeked though. If we peek - // through more than one, then we end up remembering the highest - // one, that is, if in a loop, the one closest to the top. - skipped_cast = adr; - adr = adr->in(1); - continue; - } - // CastPP is going away in this pass! We need this memory op to be - // control-dependent on the test that is guarding the CastPP. - ccp->hash_delete(n); - n->set_req(MemNode::Control, adr->in(0)); - ccp->hash_insert(n); - return n; - - case Op_Phi: - // Attempt to float above a Phi to some dominating point. - if (adr->in(0) != NULL && adr->in(0)->is_CountedLoop()) { - // If we've already peeked through a Cast (which could have set the - // control), we can't float above a Phi, because the skipped Cast - // may not be loop invariant. - if (adr_phi_is_loop_invariant(adr, skipped_cast)) { - adr = adr->in(1); - continue; - } - } - - // Intentional fallthrough! - - // No obvious dominating point. The mem op is pinned below the Phi - // by the Phi itself. If the Phi goes away (no true value is merged) - // then the mem op can float, but not indefinitely. It must be pinned - // behind the controls leading to the Phi. - case Op_CheckCastPP: - // These usually stick around to change address type, however a - // useless one can be elided and we still need to pick up a control edge - if (adr->in(0) == NULL) { - // This CheckCastPP node has NO control and is likely useless. But we - // need check further up the ancestor chain for a control input to keep - // the node in place. 4959717. - skipped_cast = adr; - adr = adr->in(1); - continue; - } - ccp->hash_delete(n); - n->set_req(MemNode::Control, adr->in(0)); - ccp->hash_insert(n); - return n; - - // List of "safe" opcodes; those that implicitly block the memory - // op below any null check. - case Op_CastX2P: // no null checks on native pointers - case Op_Parm: // 'this' pointer is not null - case Op_LoadP: // Loading from within a klass - case Op_LoadN: // Loading from within a klass - case Op_LoadKlass: // Loading from within a klass - case Op_LoadNKlass: // Loading from within a klass - case Op_ConP: // Loading from a klass - case Op_ConN: // Loading from a klass - case Op_ConNKlass: // Loading from a klass - case Op_CreateEx: // Sucking up the guts of an exception oop - case Op_Con: // Reading from TLS - case Op_CMoveP: // CMoveP is pinned - case Op_CMoveN: // CMoveN is pinned - break; // No progress - - case Op_Proj: // Direct call to an allocation routine - case Op_SCMemProj: // Memory state from store conditional ops - #ifdef ASSERT - { - assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value"); - const Node* call = adr->in(0); - if (call->is_CallJava()) { - const CallJavaNode* call_java = call->as_CallJava(); - const TypeTuple *r = call_java->tf()->range(); - assert(r->cnt() > TypeFunc::Parms, "must return value"); - const Type* ret_type = r->field_at(TypeFunc::Parms); - assert(ret_type && ret_type->isa_ptr(), "must return pointer"); - // We further presume that this is one of - // new_instance_Java, new_array_Java, or - // the like, but do not assert for this. - } else if (call->is_Allocate()) { - // similar case to new_instance_Java, etc. - } else if (!call->is_CallLeaf()) { - // Projections from fetch_oop (OSR) are allowed as well. - ShouldNotReachHere(); - } - } - #endif - break; - default: - ShouldNotReachHere(); - } - break; - } - } - - return NULL; // No progress - } - - //============================================================================= // Should LoadNode::Ideal() attempt to remove control edges? bool LoadNode::can_remove_control() const { return true; } --- 675,684 ----
*** 1104,1113 **** --- 899,913 ---- // Load boxed value from result of valueOf() call is input parameter. if (this->is_Load() && ld_adr->is_AddP() && (tp != NULL) && tp->is_ptr_to_boxed_value()) { intptr_t ignore = 0; Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore); + #if INCLUDE_ALL_GCS + if (UseShenandoahGC) { + base = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(base); + } + #endif if (base != NULL && base->is_Proj() && base->as_Proj()->_con == TypeFunc::Parms && base->in(0)->is_CallStaticJava() && base->in(0)->as_CallStaticJava()->is_boxing_method()) { return base->in(0)->in(TypeFunc::Parms);
*** 1153,1164 **** --- 953,997 ---- // If the input to the store does not fit with the load's result type, // it must be truncated via an Ideal call. if (!phase->type(value)->higher_equal(phase->type(this))) return this; } + PhaseIterGVN* igvn = phase->is_IterGVN(); + if (UseShenandoahGC && + igvn != NULL && + value->is_Phi() && + value->req() > 2 && + value->in(1) != NULL && + value->in(1)->Opcode() == Op_ShenandoahLoadReferenceBarrier) { + if (igvn->_worklist.member(value) || + igvn->_worklist.member(value->in(0)) || + (value->in(0)->in(1) != NULL && + value->in(0)->in(1)->is_IfProj() && + (igvn->_worklist.member(value->in(0)->in(1)) || + (value->in(0)->in(1)->in(0) != NULL && + igvn->_worklist.member(value->in(0)->in(1)->in(0)))))) { + igvn->_worklist.push(this); + return this; + } + } // (This works even when value is a Con, but LoadNode::Value // usually runs first, producing the singleton type of the Con.) + // TODO!! + if (false && UseShenandoahGC) { + Node* value_no_barrier = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(value->Opcode() == Op_EncodeP ? value->in(1) : value); + if (value->Opcode() == Op_EncodeP) { + if (value_no_barrier != value->in(1)) { + Node* encode = value->clone(); + encode->set_req(1, value_no_barrier); + encode = phase->transform(encode); + return encode; + } + } else { + return value_no_barrier; + } + } + return value; } // Search for an existing data phi which was generated before for the same // instance's field to avoid infinite generation of phis in a loop.
*** 1693,1705 **** // possible base offset. const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); const bool off_beyond_header = ((uint)off >= (uint)min_base_off); // Try to constant-fold a stable array element. ! if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) { // Make sure the reference is not into the header and the offset is constant ! if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) { const Type* con_type = fold_stable_ary_elem(ary, off, memory_type()); if (con_type != NULL) { return con_type; } } --- 1526,1539 ---- // possible base offset. const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); const bool off_beyond_header = ((uint)off >= (uint)min_base_off); // Try to constant-fold a stable array element. ! if (FoldStableValues && ary->is_stable()) { // Make sure the reference is not into the header and the offset is constant ! ciObject* aobj = ary->const_oop(); ! if (aobj != NULL && off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) { const Type* con_type = fold_stable_ary_elem(ary, off, memory_type()); if (con_type != NULL) { return con_type; } }
*** 2549,2597 **** Node *StoreNode::Identity( PhaseTransform *phase ) { Node* mem = in(MemNode::Memory); Node* adr = in(MemNode::Address); Node* val = in(MemNode::ValueIn); // Load then Store? Then the Store is useless if (val->is_Load() && val->in(MemNode::Address)->eqv_uncast(adr) && val->in(MemNode::Memory )->eqv_uncast(mem) && val->as_Load()->store_Opcode() == Opcode()) { ! return mem; } // Two stores in a row of the same value? if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(adr) && mem->in(MemNode::ValueIn)->eqv_uncast(val) && mem->Opcode() == Opcode()) { ! return mem; } // Store of zero anywhere into a freshly-allocated object? // Then the store is useless. // (It must already have been captured by the InitializeNode.) ! if (ReduceFieldZeroing && phase->type(val)->is_zero_type()) { // a newly allocated object is already all-zeroes everywhere if (mem->is_Proj() && mem->in(0)->is_Allocate()) { ! return mem; } ! // the store may also apply to zero-bits in an earlier object ! Node* prev_mem = find_previous_store(phase); ! // Steps (a), (b): Walk past independent stores to find an exact match. ! if (prev_mem != NULL) { ! Node* prev_val = can_see_stored_value(prev_mem, phase); ! if (prev_val != NULL && phase->eqv(prev_val, val)) { ! // prev_val and val might differ by a cast; it would be good ! // to keep the more informative of the two. ! return mem; } } } ! return this; } //------------------------------match_edge------------------------------------- // Do we Match on this edge index or not? Match only memory & value uint StoreNode::match_edge(uint idx) const { --- 2383,2448 ---- Node *StoreNode::Identity( PhaseTransform *phase ) { Node* mem = in(MemNode::Memory); Node* adr = in(MemNode::Address); Node* val = in(MemNode::ValueIn); + Node* result = this; + // Load then Store? Then the Store is useless if (val->is_Load() && val->in(MemNode::Address)->eqv_uncast(adr) && val->in(MemNode::Memory )->eqv_uncast(mem) && val->as_Load()->store_Opcode() == Opcode()) { ! result = mem; } // Two stores in a row of the same value? if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(adr) && mem->in(MemNode::ValueIn)->eqv_uncast(val) && mem->Opcode() == Opcode()) { ! result = mem; } // Store of zero anywhere into a freshly-allocated object? // Then the store is useless. // (It must already have been captured by the InitializeNode.) ! if (result == this && ! ReduceFieldZeroing && phase->type(val)->is_zero_type()) { // a newly allocated object is already all-zeroes everywhere if (mem->is_Proj() && mem->in(0)->is_Allocate()) { ! result = mem; } ! if (result == this) { ! // the store may also apply to zero-bits in an earlier object ! Node* prev_mem = find_previous_store(phase); ! // Steps (a), (b): Walk past independent stores to find an exact match. ! if (prev_mem != NULL) { ! Node* prev_val = can_see_stored_value(prev_mem, phase); ! if (prev_val != NULL && phase->eqv(prev_val, val)) { ! // prev_val and val might differ by a cast; it would be good ! // to keep the more informative of the two. ! result = mem; ! } } } } ! if (result != this && phase->is_IterGVN() != NULL) { ! MemBarNode* trailing = trailing_membar(); ! if (trailing != NULL) { ! #ifdef ASSERT ! const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr(); ! assert(t_oop == NULL || t_oop->is_known_instance_field(), "only for non escaping objects"); ! #endif ! PhaseIterGVN* igvn = phase->is_IterGVN(); ! trailing->remove(igvn); ! } ! } ! ! return result; } //------------------------------match_edge------------------------------------- // Do we Match on this edge index or not? Match only memory & value uint StoreNode::match_edge(uint idx) const {
*** 2666,2675 **** --- 2517,2552 ---- } } return true; } + MemBarNode* StoreNode::trailing_membar() const { + if (is_release()) { + MemBarNode* trailing_mb = NULL; + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + Node* u = fast_out(i); + if (u->is_MemBar()) { + if (u->as_MemBar()->trailing_store()) { + assert(u->Opcode() == Op_MemBarVolatile, ""); + assert(trailing_mb == NULL, "only one"); + trailing_mb = u->as_MemBar(); + #ifdef ASSERT + Node* leading = u->as_MemBar()->leading_membar(); + assert(leading->Opcode() == Op_MemBarRelease, "incorrect membar"); + assert(leading->as_MemBar()->leading_store(), "incorrect membar pair"); + assert(leading->as_MemBar()->trailing_membar() == u, "incorrect membar pair"); + #endif + } else { + assert(u->as_MemBar()->standalone(), ""); + } + } + } + return trailing_mb; + } + return NULL; + } + //============================================================================= //------------------------------Ideal------------------------------------------ // If the store is from an AND mask that leaves the low bits untouched, then // we can skip the AND operation. If the store is from a sign-extension // (a left shift, then right shift) we can skip both.
*** 2778,2787 **** --- 2655,2688 ---- return false; } return true; } + MemBarNode* LoadStoreNode::trailing_membar() const { + MemBarNode* trailing = NULL; + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { + Node* u = fast_out(i); + if (u->is_MemBar()) { + if (u->as_MemBar()->trailing_load_store()) { + assert(u->Opcode() == Op_MemBarAcquire, ""); + assert(trailing == NULL, "only one"); + trailing = u->as_MemBar(); + #ifdef ASSERT + Node* leading = trailing->leading_membar(); + assert(support_IRIW_for_not_multiple_copy_atomic_cpu || leading->Opcode() == Op_MemBarRelease, "incorrect membar"); + assert(leading->as_MemBar()->leading_load_store(), "incorrect membar pair"); + assert(leading->as_MemBar()->trailing_membar() == trailing, "incorrect membar pair"); + #endif + } else { + assert(u->as_MemBar()->standalone(), "wrong barrier kind"); + } + } + } + + return trailing; + } + uint LoadStoreNode::size_of() const { return sizeof(*this); } //============================================================================= //----------------------------------LoadStoreConditionalNode-------------------- LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) {
*** 3012,3022 **** } //============================================================================= MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent) : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)), ! _adr_type(C->get_adr_type(alias_idx)) { init_class_id(Class_MemBar); Node* top = C->top(); init_req(TypeFunc::I_O,top); init_req(TypeFunc::FramePtr,top); --- 2913,2926 ---- } //============================================================================= MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent) : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)), ! _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone) ! #ifdef ASSERT ! , _pair_idx(0) ! #endif { init_class_id(Class_MemBar); Node* top = C->top(); init_req(TypeFunc::I_O,top); init_req(TypeFunc::FramePtr,top);
*** 3046,3055 **** --- 2950,2974 ---- case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn); default: ShouldNotReachHere(); return NULL; } } + void MemBarNode::remove(PhaseIterGVN *igvn) { + if (outcnt() != 2) { + return; + } + if (trailing_store() || trailing_load_store()) { + MemBarNode* leading = leading_membar(); + if (leading != NULL) { + assert(leading->trailing_membar() == this, "inconsistent leading/trailing membars"); + leading->remove(igvn); + } + } + igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory)); + igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control)); + } + //------------------------------Ideal------------------------------------------ // Return a node which is more "ideal" than the current node. Strip out // control copies Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (remove_dead_region(phase, can_reshape)) return this;
*** 3092,3111 **** } } else if (opc == Op_MemBarRelease) { // Final field stores. Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); if ((alloc != NULL) && alloc->is_Allocate() && ! alloc->as_Allocate()->_is_non_escaping) { // The allocated object does not escape. eliminate = true; } } if (eliminate) { // Replace MemBar projections by its inputs. PhaseIterGVN* igvn = phase->is_IterGVN(); ! igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory)); ! igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control)); // Must return either the original node (now dead) or a new node // (Do not return a top here, since that would break the uniqueness of top.) return new (phase->C) ConINode(TypeInt::ZERO); } } --- 3011,3031 ---- } } else if (opc == Op_MemBarRelease) { // Final field stores. Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); if ((alloc != NULL) && alloc->is_Allocate() && ! AARCH64_ONLY ( alloc->as_Allocate()->does_not_escape_thread() ) ! NOT_AARCH64 ( alloc->as_Allocate()->_is_non_escaping ) ! ) { // The allocated object does not escape. eliminate = true; } } if (eliminate) { // Replace MemBar projections by its inputs. PhaseIterGVN* igvn = phase->is_IterGVN(); ! remove(igvn); // Must return either the original node (now dead) or a new node // (Do not return a top here, since that would break the uniqueness of top.) return new (phase->C) ConINode(TypeInt::ZERO); } }
*** 3130,3139 **** --- 3050,3190 ---- } ShouldNotReachHere(); return NULL; } + void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) { + trailing->_kind = TrailingStore; + leading->_kind = LeadingStore; + #ifdef ASSERT + trailing->_pair_idx = leading->_idx; + leading->_pair_idx = leading->_idx; + #endif + } + + void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) { + trailing->_kind = TrailingLoadStore; + leading->_kind = LeadingLoadStore; + #ifdef ASSERT + trailing->_pair_idx = leading->_idx; + leading->_pair_idx = leading->_idx; + #endif + } + + MemBarNode* MemBarNode::trailing_membar() const { + ResourceMark rm; + Node* trailing = (Node*)this; + VectorSet seen(Thread::current()->resource_area()); + + Node_Stack multis(0); + do { + Node* c = trailing; + uint i = 0; + do { + trailing = NULL; + for (; i < c->outcnt(); i++) { + Node* next = c->raw_out(i); + if (next != c && next->is_CFG()) { + if (c->is_MultiBranch()) { + if (multis.node() == c) { + multis.set_index(i+1); + } else { + multis.push(c, i+1); + } + } + trailing = next; + break; + } + } + if (trailing != NULL && !seen.test_set(trailing->_idx)) { + break; + } + while (multis.size() > 0) { + c = multis.node(); + i = multis.index(); + if (i < c->req()) { + break; + } + multis.pop(); + } + } while (multis.size() > 0); + } while (!trailing->is_MemBar() || !trailing->as_MemBar()->trailing()); + + MemBarNode* mb = trailing->as_MemBar(); + assert((mb->_kind == TrailingStore && _kind == LeadingStore) || + (mb->_kind == TrailingLoadStore && _kind == LeadingLoadStore), "bad trailing membar"); + assert(mb->_pair_idx == _pair_idx, "bad trailing membar"); + return mb; + } + + MemBarNode* MemBarNode::leading_membar() const { + ResourceMark rm; + VectorSet seen(Thread::current()->resource_area()); + Node_Stack regions(0); + Node* leading = in(0); + while (leading != NULL && (!leading->is_MemBar() || !leading->as_MemBar()->leading())) { + while (leading == NULL || leading->is_top() || seen.test_set(leading->_idx)) { + leading = NULL; + while (regions.size() > 0 && leading == NULL) { + Node* r = regions.node(); + uint i = regions.index(); + if (i < r->req()) { + leading = r->in(i); + regions.set_index(i+1); + } else { + regions.pop(); + } + } + if (leading == NULL) { + assert(regions.size() == 0, "all paths should have been tried"); + return NULL; + } + } + if (leading->is_Region()) { + regions.push(leading, 2); + leading = leading->in(1); + } else { + leading = leading->in(0); + } + } + #ifdef ASSERT + Unique_Node_List wq; + wq.push((Node*)this); + uint found = 0; + for (uint i = 0; i < wq.size(); i++) { + Node* n = wq.at(i); + if (n->is_Region()) { + for (uint j = 1; j < n->req(); j++) { + Node* in = n->in(j); + if (in != NULL && !in->is_top()) { + wq.push(in); + } + } + } else { + if (n->is_MemBar() && n->as_MemBar()->leading()) { + assert(n == leading, "consistency check failed"); + found++; + } else { + Node* in = n->in(0); + if (in != NULL && !in->is_top()) { + wq.push(in); + } + } + } + } + assert(found == 1 || (found == 0 && leading == NULL), "consistency check failed"); + #endif + if (leading == NULL) { + return NULL; + } + MemBarNode* mb = leading->as_MemBar(); + assert((mb->_kind == LeadingStore && _kind == TrailingStore) || + (mb->_kind == LeadingLoadStore && _kind == TrailingLoadStore), "bad leading membar"); + assert(mb->_pair_idx == _pair_idx, "bad leading membar"); + return mb; + } + //===========================InitializeNode==================================== // SUMMARY: // This node acts as a memory barrier on raw memory, after some raw stores. // The 'cooked' oop value feeds from the Initialize, not the Allocation. // The Initialize can 'capture' suitably constrained stores as raw inits.
< prev index next >