1 /* 2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "opto/arraycopynode.hpp" 26 #include "opto/graphKit.hpp" 27 #include "opto/idealKit.hpp" 28 #include "gc/shared/c2/modRefBarrierSetC2.hpp" 29 30 Node* ModRefBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { 31 DecoratorSet decorators = access.decorators(); 32 33 Node* adr = access.addr().node(); 34 35 bool is_array = (decorators & IS_ARRAY) != 0; 36 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; 37 bool in_heap = (decorators & IN_HEAP) != 0; 38 bool use_precise = is_array || anonymous; 39 bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0; 40 41 const InlineTypeNode* vt = nullptr; 42 if (access.is_parse_access() && static_cast<C2ParseAccess&>(access).vt() != nullptr) { 43 vt = static_cast<C2ParseAccess&>(access).vt(); 44 } 45 46 if (vt == nullptr && (!access.is_oop() || tightly_coupled_alloc || (!in_heap && !anonymous))) { 47 return BarrierSetC2::store_at_resolved(access, val); 48 } 49 50 assert(access.is_parse_access(), "entry not supported at optimization time"); 51 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); 52 53 Node* store = BarrierSetC2::store_at_resolved(access, val); 54 55 // TODO 8350865 56 // - We actually only need the post barrier once for non-arrays (same for C1, right)? 57 // - Value is only needed to determine if we are storing null. Maybe we can go with a simple boolean? 58 GraphKit* kit = parse_access.kit(); 59 if (vt != nullptr) { 60 for (uint i = 0; i < vt->field_count(); ++i) { 61 ciType* type = vt->field_type(i); 62 if (!type->is_primitive_type()) { 63 ciInlineKlass* vk = vt->bottom_type()->inline_klass(); 64 int field_offset = vt->field_offset(i) - vk->payload_offset(); 65 Node* value = vt->field_value(i); 66 Node* field_adr = kit->basic_plus_adr(access.base(), adr, field_offset); 67 post_barrier(kit, access.base(), field_adr, value, use_precise); 68 } 69 } 70 } else { 71 post_barrier(kit, access.base(), adr, val.node(), use_precise); 72 } 73 74 return store; 75 } 76 77 Node* ModRefBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 78 Node* new_val, const Type* value_type) const { 79 if (!access.is_oop()) { 80 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); 81 } 82 83 Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); 84 85 post_barrier(access.kit(), access.base(), access.addr().node(), new_val, true); 86 87 return result; 88 } 89 90 Node* ModRefBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 91 Node* new_val, const Type* value_type) const { 92 GraphKit* kit = access.kit(); 93 94 if (!access.is_oop()) { 95 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); 96 } 97 98 Node* load_store = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); 99 100 // Emit the post barrier only when the actual store happened. This makes sense 101 // to check only for LS_cmp_* that can fail to set the value. 102 // LS_cmp_exchange does not produce any branches by default, so there is no 103 // boolean result to piggyback on. TODO: When we merge CompareAndSwap with 104 // CompareAndExchange and move branches here, it would make sense to conditionalize 105 // post_barriers for LS_cmp_exchange as well. 106 // 107 // CAS success path is marked more likely since we anticipate this is a performance 108 // critical path, while CAS failure path can use the penalty for going through unlikely 109 // path as backoff. Which is still better than doing a store barrier there. 110 IdealKit ideal(kit); 111 ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); { 112 kit->sync_kit(ideal); 113 post_barrier(kit, access.base(), access.addr().node(), new_val, true); 114 ideal.sync_kit(kit); 115 } ideal.end_if(); 116 kit->final_sync(ideal); 117 118 return load_store; 119 } 120 121 Node* ModRefBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const { 122 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type); 123 if (!access.is_oop()) { 124 return result; 125 } 126 127 post_barrier(access.kit(), access.base(), access.addr().node(), new_val, true); 128 129 return result; 130 }