< prev index next > src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
Print this page
/*
* Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved.
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+ #include "gc/shenandoah/shenandoahCardTable.hpp"
#include "gc/shenandoah/shenandoahForwarding.hpp"
#include "gc/shenandoah/shenandoahHeap.hpp"
#include "gc/shenandoah/shenandoahRuntime.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
// Final sync IdealKit and GraphKit.
kit->final_sync(ideal);
}
+ Node* ShenandoahBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
+ BarrierSet* bs = BarrierSet::barrier_set();
+ ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
+ CardTable::CardValue* card_table_base = ctbs->card_table()->byte_map_base();
+ if (card_table_base != nullptr) {
+ return kit->makecon(TypeRawPtr::make((address)card_table_base));
+ } else {
+ return kit->null();
+ }
+ }
+
+ void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,
+ Node* ctl,
+ Node* oop_store,
+ Node* obj,
+ Node* adr,
+ uint adr_idx,
+ Node* val,
+ BasicType bt,
+ bool use_precise) const {
+ assert(ShenandoahCardBarrier, "Should have been checked by caller");
+
+ // No store check needed if we're storing a null.
+ if (val != nullptr && val->is_Con()) {
+ // must be either an oop or NULL
+ const Type* t = val->bottom_type();
+ if (t == TypePtr::NULL_PTR || t == Type::TOP)
+ return;
+ }
+
+ if (ReduceInitialCardMarks && obj == kit->just_allocated_object(kit->control())) {
+ // We can skip marks on a freshly-allocated object in Eden.
+ // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
+ // That routine informs GC to take appropriate compensating steps,
+ // upon a slow-path allocation, so as to make this card-mark
+ // elision safe.
+ return;
+ }
+
+ if (!use_precise) {
+ // All card marks for a (non-array) instance are in one place:
+ adr = obj;
+ }
+ // (Else it's an array (or unknown), and we want more precise card marks.)
+ assert(adr != nullptr, "");
+
+ IdealKit ideal(kit, true);
+
+ // Convert the pointer to an int prior to doing math on it
+ Node* cast = __ CastPX(__ ctrl(), adr);
+
+ // Divide by card size
+ Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift()) );
+
+ // Combine card table base and card offset
+ Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset );
+
+ // Get the alias_index for raw card-mark memory
+ int adr_type = Compile::AliasIdxRaw;
+ Node* zero = __ ConI(0); // Dirty card value
+
+ if (UseCondCardMark) {
+ // The classic GC reference write barrier is typically implemented
+ // as a store into the global card mark table. Unfortunately
+ // unconditional stores can result in false sharing and excessive
+ // coherence traffic as well as false transactional aborts.
+ // UseCondCardMark enables MP "polite" conditional card mark
+ // stores. In theory we could relax the load from ctrl() to
+ // no_ctrl, but that doesn't buy much latitude.
+ Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
+ __ if_then(card_val, BoolTest::ne, zero);
+ }
+
+ // Smash zero into card
+ __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
+
+ if (UseCondCardMark) {
+ __ end_if();
+ }
+
+ // Final sync IdealKit and GraphKit.
+ kit->final_sync(ideal);
+ }
+
#undef __
const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_Type() {
const Type **fields = TypeTuple::fields(2);
fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
uint adr_idx = kit->C->get_alias_index(adr_type);
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
static_cast<const TypeOopPtr*>(val.type()), nullptr /* pre_val */, access.type());
+
+ Node* result = BarrierSetC2::store_at_resolved(access, val);
+
+ if (ShenandoahCardBarrier) {
+ const bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
+ const bool is_array = (decorators & IS_ARRAY) != 0;
+ const bool use_precise = is_array || anonymous;
+ post_barrier(kit, kit->control(), access.raw_access(), access.base(),
+ adr, adr_idx, val.node(), access.type(), use_precise);
+ }
+ return result;
+ } else {
+ assert(access.is_opt_access(), "only for optimization passes");
+ assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
+ return BarrierSetC2::store_at_resolved(access, val);
}
- return BarrierSetC2::store_at_resolved(access, val);
}
Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
// 1: non-reference load, no additional barrier is needed
if (!access.is_oop()) {
return load;
}
Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
- Node* new_val, const Type* value_type) const {
+ Node* new_val, const Type* value_type) const {
GraphKit* kit = access.kit();
if (access.is_oop()) {
shenandoah_write_barrier_pre(kit, false /* do_load */,
nullptr, nullptr, max_juint, nullptr, nullptr,
expected_val /* pre_val */, T_OBJECT);
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
}
#endif
load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators()));
+ if (ShenandoahCardBarrier) {
+ post_barrier(kit, kit->control(), access.raw_access(), access.base(),
+ access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
+ }
return load_store;
}
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
}
}
}
}
access.set_raw_access(load_store);
pin_atomic_op(access);
+ if (ShenandoahCardBarrier) {
+ post_barrier(kit, kit->control(), access.raw_access(), access.base(),
+ access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
+ }
return load_store;
}
return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
}
if (access.is_oop()) {
result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, result, access.decorators()));
shenandoah_write_barrier_pre(kit, false /* do_load */,
nullptr, nullptr, max_juint, nullptr, nullptr,
result /* pre_val */, T_OBJECT);
+ if (ShenandoahCardBarrier) {
+ post_barrier(kit, kit->control(), access.raw_access(), access.base(),
+ access.addr().node(), access.alias_idx(), val, T_OBJECT, true);
+ }
}
return result;
}
if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
}
}
- void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const {
- if (is_shenandoah_wb_pre_call(n)) {
- shenandoah_eliminate_wb_pre(n, ¯o->igvn());
+ void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
+ if (is_shenandoah_wb_pre_call(node)) {
+ shenandoah_eliminate_wb_pre(node, ¯o->igvn());
+ }
+ if (ShenandoahCardBarrier && node->Opcode() == Op_CastP2X) {
+ Node* shift = node->unique_out();
+ Node* addp = shift->unique_out();
+ for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
+ Node* mem = addp->last_out(j);
+ if (UseCondCardMark && mem->is_Load()) {
+ assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
+ // The load is checking if the card has been written so
+ // replace it with zero to fold the test.
+ macro->replace_node(mem, macro->intcon(0));
+ continue;
+ }
+ assert(mem->is_Store(), "store required");
+ macro->replace_node(mem, mem->in(MemNode::Memory));
+ }
}
}
void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
< prev index next >