< prev index next >

src/share/vm/opto/library_call.cpp

Print this page

        

*** 40,49 **** --- 40,54 ---- #include "opto/runtime.hpp" #include "opto/subnode.hpp" #include "prims/nativeLookup.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/macros.hpp" + #if INCLUDE_ALL_GCS + #include "gc_implementation/shenandoah/shenandoahRuntime.hpp" + #include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" + #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" + #endif class LibraryIntrinsic : public InlineCallGenerator { // Extend the set of intrinsics known to the runtime: public: private:
*** 2428,2438 **** // is enabled, we need to log the value in the referent field in an SATB buffer. // This routine performs some compile time filters and generates suitable // runtime filters that guard the pre-barrier code. // Also add memory barrier for non volatile load from the referent field // to prevent commoning of loads across safepoint. ! if (!UseG1GC && !need_mem_bar) return; // Some compile time checks. // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? --- 2433,2443 ---- // is enabled, we need to log the value in the referent field in an SATB buffer. // This routine performs some compile time filters and generates suitable // runtime filters that guard the pre-barrier code. // Also add memory barrier for non volatile load from the referent field // to prevent commoning of loads across safepoint. ! if (!(UseG1GC || UseShenandoahGC) && !need_mem_bar) return; // Some compile time checks. // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
*** 2685,2694 **** --- 2690,2707 ---- // the barriers get omitted and the unsafe reference begins to "pollute" // the alias analysis of the rest of the graph, either Compile::can_alias // or Compile::must_alias will throw a diagnostic assert.) bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM); + #if INCLUDE_ALL_GCS + // Work around JDK-8220714 bug. This is done for Shenandoah only, until + // the shared code fix is upstreamed and properly tested there. + if (UseShenandoahGC) { + need_mem_bar |= is_native_ptr; + } + #endif + // If we are reading the value of the referent field of a Reference // object (either by using Unsafe directly or through reflection) // then, if G1 is enabled, we need to record the referent in an // SATB log buffer using the pre-barrier mechanism. // Also we need to add memory barrier to prevent commoning reads
*** 2710,2733 **** // Heap pointers get a null-check from the interpreter, // as a courtesy. However, this is not guaranteed by Unsafe, // and it is not possible to fully distinguish unintended nulls // from intended ones in this API. if (is_volatile) { // We need to emit leading and trailing CPU membars (see below) in // addition to memory membars when is_volatile. This is a little // too strong, but avoids the need to insert per-alias-type // volatile membars (for stores; compare Parse::do_put_xxx), which // we cannot do effectively here because we probably only have a // rough approximation of type. need_mem_bar = true; // For Stores, place a memory ordering barrier now. if (is_store) { ! insert_mem_bar(Op_MemBarRelease); } else { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Op_MemBarVolatile); } } } // Memory barrier to prevent normal and 'unsafe' accesses from --- 2723,2749 ---- // Heap pointers get a null-check from the interpreter, // as a courtesy. However, this is not guaranteed by Unsafe, // and it is not possible to fully distinguish unintended nulls // from intended ones in this API. + Node* load = NULL; + Node* store = NULL; + Node* leading_membar = NULL; if (is_volatile) { // We need to emit leading and trailing CPU membars (see below) in // addition to memory membars when is_volatile. This is a little // too strong, but avoids the need to insert per-alias-type // volatile membars (for stores; compare Parse::do_put_xxx), which // we cannot do effectively here because we probably only have a // rough approximation of type. need_mem_bar = true; // For Stores, place a memory ordering barrier now. if (is_store) { ! leading_membar = insert_mem_bar(Op_MemBarRelease); } else { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { ! leading_membar = insert_mem_bar(Op_MemBarVolatile); } } } // Memory barrier to prevent normal and 'unsafe' accesses from
*** 2740,2750 **** if (!is_store) { MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered; // To be valid, unsafe loads may depend on other conditions than // the one that guards them: pin the Load node ! Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched); // load value switch (type) { case T_BOOLEAN: case T_CHAR: case T_BYTE: --- 2756,2771 ---- if (!is_store) { MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered; // To be valid, unsafe loads may depend on other conditions than // the one that guards them: pin the Load node ! load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched); ! #if INCLUDE_ALL_GCS ! if (UseShenandoahGC && (type == T_OBJECT || type == T_ARRAY)) { ! load = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load); ! } ! #endif // load value switch (type) { case T_BOOLEAN: case T_CHAR: case T_BYTE:
*** 2754,2780 **** case T_FLOAT: case T_DOUBLE: break; case T_OBJECT: if (need_read_barrier) { ! insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar)); } break; case T_ADDRESS: // Cast to an int type. ! p = _gvn.transform(new (C) CastP2XNode(NULL, p)); ! p = ConvX2UL(p); break; default: fatal(err_msg_res("unexpected type %d: %s", type, type2name(type))); break; } // The load node has the control of the preceding MemBarCPUOrder. All // following nodes will have the control of the MemBarCPUOrder inserted at // the end of this method. So, pushing the load onto the stack at a later // point is fine. ! set_result(p); } else { // place effect of store into memory switch (type) { case T_DOUBLE: val = dstore_rounding(val); --- 2775,2801 ---- case T_FLOAT: case T_DOUBLE: break; case T_OBJECT: if (need_read_barrier) { ! insert_pre_barrier(heap_base_oop, offset, load, !(is_volatile || need_mem_bar)); } break; case T_ADDRESS: // Cast to an int type. ! load = _gvn.transform(new (C) CastP2XNode(NULL, load)); ! load = ConvX2UL(load); break; default: fatal(err_msg_res("unexpected type %d: %s", type, type2name(type))); break; } // The load node has the control of the preceding MemBarCPUOrder. All // following nodes will have the control of the MemBarCPUOrder inserted at // the end of this method. So, pushing the load onto the stack at a later // point is fine. ! set_result(load); } else { // place effect of store into memory switch (type) { case T_DOUBLE: val = dstore_rounding(val);
*** 2786,2807 **** break; } MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered; if (type == T_OBJECT ) { ! (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched); } else { ! (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched); } } if (is_volatile) { if (!is_store) { ! insert_mem_bar(Op_MemBarAcquire); } else { if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Op_MemBarVolatile); } } } if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); --- 2807,2835 ---- break; } MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered; if (type == T_OBJECT ) { ! store = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched); } else { ! store = store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched); } } if (is_volatile) { if (!is_store) { ! #if INCLUDE_ALL_GCS ! if (UseShenandoahGC) { ! load = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(load); ! } ! #endif ! Node* mb = insert_mem_bar(Op_MemBarAcquire, load); ! mb->as_MemBar()->set_trailing_load(); } else { if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { ! Node* mb = insert_mem_bar(Op_MemBarVolatile, store); ! MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar()); } } } if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
*** 2997,3007 **** // Memory-model-wise, a LoadStore acts like a little synchronized // block, so needs barriers on each side. These don't translate // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. ! insert_mem_bar(Op_MemBarRelease); insert_mem_bar(Op_MemBarCPUOrder); // 4984716: MemBars must be inserted before this // memory node in order to avoid a false // dependency which will confuse the scheduler. --- 3025,3035 ---- // Memory-model-wise, a LoadStore acts like a little synchronized // block, so needs barriers on each side. These don't translate // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. ! Node* leading_membar = insert_mem_bar(Op_MemBarRelease); insert_mem_bar(Op_MemBarCPUOrder); // 4984716: MemBars must be inserted before this // memory node in order to avoid a false // dependency which will confuse the scheduler.
*** 3096,3111 **** --- 3124,3146 ---- // main role is to prevent LoadStore nodes from being optimized away // when their results aren't used. Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); set_memory(proj, alias_idx); + Node* access = load_store; + if (type == T_OBJECT && kind == LS_xchg) { #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type())); } #endif + #if INCLUDE_ALL_GCS + if (UseShenandoahGC) { + load_store = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load_store); + } + #endif if (can_move_pre_barrier()) { // Don't need to load pre_val. The old value is returned by load_store. // The pre_barrier can execute after the xchg as long as no safepoint // gets inserted between them. pre_barrier(false /* do_load */,
*** 3115,3125 **** } } // Add the trailing membar surrounding the access insert_mem_bar(Op_MemBarCPUOrder); ! insert_mem_bar(Op_MemBarAcquire); assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); set_result(load_store); return true; } --- 3150,3161 ---- } } // Add the trailing membar surrounding the access insert_mem_bar(Op_MemBarCPUOrder); ! Node* mb = insert_mem_bar(Op_MemBarAcquire, access); ! MemBarNode::set_load_store_pair(leading_membar->as_MemBar(), mb->as_MemBar()); assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); set_result(load_store); return true; }
*** 4536,4545 **** --- 4572,4595 ---- // Compute the length also, if needed: Node* countx = size; countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off))); countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) )); + #if INCLUDE_ALL_GCS + if (UseShenandoahGC && ShenandoahCloneBarrier) { + assert (src->is_AddP(), "for clone the src should be the interior ptr"); + assert (dest->is_AddP(), "for clone the dst should be the interior ptr"); + + // Make sure that references in the cloned object are updated for Shenandoah. + make_runtime_call(RC_LEAF|RC_NO_FP, + OptoRuntime::shenandoah_clone_barrier_Type(), + CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier), + "shenandoah_clone_barrier", TypePtr::BOTTOM, + src->in(AddPNode::Base)); + } + #endif + const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; bool disjoint_bases = true; generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases, src, NULL, dest, NULL, countx, /*dest_uninitialized*/true);
*** 5272,5282 **** checked_value = cv; } // At this point we know we do not need type checks on oop stores. // Let's see if we need card marks: ! if (alloc != NULL && use_ReduceInitialCardMarks()) { // If we do not need card marks, copy using the jint or jlong stub. copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT); assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type), "sizes agree"); } --- 5322,5332 ---- checked_value = cv; } // At this point we know we do not need type checks on oop stores. // Let's see if we need card marks: ! if (alloc != NULL && use_ReduceInitialCardMarks() && ! UseShenandoahGC) { // If we do not need card marks, copy using the jint or jlong stub. copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT); assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type), "sizes agree"); }
*** 6306,6315 **** --- 6356,6371 ---- const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass); Node* no_ctrl = NULL; Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered); + #if INCLUDE_ALL_GCS + if (UseShenandoahGC) { + result = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, result); + } + #endif + // Use the pre-barrier to record the value in the referent field pre_barrier(false /* do_load */, control(), NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, result /* pre_val */,
*** 6355,6376 **** type = TypeOopPtr::make_from_klass(field_klass->as_klass()); } else { type = Type::get_const_basic_type(bt); } if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) { ! insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier } // Build the load. MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol); // If reference is volatile, prevent following memory ops from // floating up past the volatile read. Also prevents commoning // another volatile read. if (is_vol) { // Memory barrier includes bogus read of value to force load BEFORE membar ! insert_mem_bar(Op_MemBarAcquire, loadedField); } return loadedField; } --- 6411,6440 ---- type = TypeOopPtr::make_from_klass(field_klass->as_klass()); } else { type = Type::get_const_basic_type(bt); } + Node* leading_membar = NULL; if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) { ! leading_membar = insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier } // Build the load. MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol); + #if INCLUDE_ALL_GCS + if (UseShenandoahGC && (bt == T_OBJECT || bt == T_ARRAY)) { + loadedField = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, loadedField); + } + #endif + // If reference is volatile, prevent following memory ops from // floating up past the volatile read. Also prevents commoning // another volatile read. if (is_vol) { // Memory barrier includes bogus read of value to force load BEFORE membar ! Node* mb = insert_mem_bar(Op_MemBarAcquire, loadedField); ! mb->as_MemBar()->set_trailing_load(); } return loadedField; }
< prev index next >