1 /* 2 * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_G1_G1OOPCLOSURES_INLINE_HPP 26 #define SHARE_GC_G1_G1OOPCLOSURES_INLINE_HPP 27 28 #include "gc/g1/g1OopClosures.hpp" 29 30 #include "gc/g1/g1CollectedHeap.hpp" 31 #include "gc/g1/g1ConcurrentMark.inline.hpp" 32 #include "gc/g1/g1ParScanThreadState.inline.hpp" 33 #include "gc/g1/g1RemSet.hpp" 34 #include "gc/g1/heapRegion.inline.hpp" 35 #include "gc/g1/heapRegionRemSet.inline.hpp" 36 #include "memory/iterator.inline.hpp" 37 #include "oops/access.inline.hpp" 38 #include "oops/compressedOops.inline.hpp" 39 #include "oops/oopsHierarchy.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "runtime/prefetch.inline.hpp" 42 #include "utilities/align.hpp" 43 44 template <class T> 45 inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) { 46 // We're not going to even bother checking whether the object is 47 // already forwarded or not, as this usually causes an immediate 48 // stall. We'll try to prefetch the object (for write, given that 49 // we might need to install the forwarding reference) and we'll 50 // get back to it when pop it from the queue 51 Prefetch::write(obj->mark_addr(), 0); 52 Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); 53 54 // slightly paranoid test; I'm trying to catch potential 55 // problems before we go into push_on_queue to know where the 56 // problem is coming from 57 assert((obj == RawAccess<>::oop_load(p)) || 58 (obj->is_forwarded() && 59 obj->forwardee() == RawAccess<>::oop_load(p)), 60 "p should still be pointing to obj or to its forwardee"); 61 62 _par_scan_state->push_on_queue(ScannerTask(p)); 63 } 64 65 template <class T> 66 inline void G1ScanClosureBase::handle_non_cset_obj_common(G1HeapRegionAttr const region_attr, T* p, oop const obj) { 67 if (region_attr.is_humongous_candidate()) { 68 _g1h->set_humongous_is_live(obj); 69 } else if (region_attr.is_optional()) { 70 _par_scan_state->remember_reference_into_optional_region(p); 71 } 72 } 73 74 inline void G1ScanClosureBase::trim_queue_partially() { 75 _par_scan_state->trim_queue_partially(); 76 } 77 78 template <class T> 79 inline void G1ScanEvacuatedObjClosure::do_oop_work(T* p) { 80 T heap_oop = RawAccess<>::oop_load(p); 81 82 if (CompressedOops::is_null(heap_oop)) { 83 return; 84 } 85 oop obj = CompressedOops::decode_not_null(heap_oop); 86 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj); 87 if (region_attr.is_in_cset()) { 88 prefetch_and_push(p, obj); 89 } else if (!HeapRegion::is_in_same_region(p, obj)) { 90 handle_non_cset_obj_common(region_attr, p, obj); 91 assert(_skip_card_enqueue != Uninitialized, "Scan location has not been initialized."); 92 if (_skip_card_enqueue == True) { 93 return; 94 } 95 _par_scan_state->enqueue_card_if_tracked(region_attr, p, obj); 96 } 97 } 98 99 template <class T> 100 inline void G1CMOopClosure::do_oop_work(T* p) { 101 _task->deal_with_reference(p); 102 } 103 104 template <class T> 105 inline void G1RootRegionScanClosure::do_oop_work(T* p) { 106 T heap_oop = RawAccess<MO_RELAXED>::oop_load(p); 107 if (CompressedOops::is_null(heap_oop)) { 108 return; 109 } 110 oop obj = CompressedOops::decode_not_null(heap_oop); 111 _cm->mark_in_bitmap(_worker_id, obj); 112 } 113 114 template <class T> 115 inline static void check_obj_during_refinement(T* p, oop const obj) { 116 #ifdef ASSERT 117 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 118 // can't do because of races 119 // assert(oopDesc::is_oop_or_null(obj), "expected an oop"); 120 assert(is_object_aligned(obj), "obj must be aligned"); 121 assert(g1h->is_in(obj), "invariant"); 122 assert(g1h->is_in(p), "invariant"); 123 #endif // ASSERT 124 } 125 126 template <class T> 127 inline void G1ConcurrentRefineOopClosure::do_oop_work(T* p) { 128 T o = RawAccess<MO_RELAXED>::oop_load(p); 129 if (CompressedOops::is_null(o)) { 130 return; 131 } 132 oop obj = CompressedOops::decode_not_null(o); 133 134 check_obj_during_refinement(p, obj); 135 136 if (HeapRegion::is_in_same_region(p, obj)) { 137 // Normally this closure should only be called with cross-region references. 138 // But since Java threads are manipulating the references concurrently and we 139 // reload the values things may have changed. 140 // Also this check lets slip through references from a humongous continues region 141 // to its humongous start region, as they are in different regions, and adds a 142 // remembered set entry. This is benign (apart from memory usage), as we never 143 // try to either evacuate or eager reclaim humonguous arrays of j.l.O. 144 return; 145 } 146 147 HeapRegionRemSet* to_rem_set = _g1h->heap_region_containing(obj)->rem_set(); 148 149 assert(to_rem_set != NULL, "Need per-region 'into' remsets."); 150 if (to_rem_set->is_tracked()) { 151 to_rem_set->add_reference(p, _worker_id); 152 } 153 } 154 155 template <class T> 156 inline void G1ScanCardClosure::do_oop_work(T* p) { 157 T o = RawAccess<>::oop_load(p); 158 if (CompressedOops::is_null(o)) { 159 return; 160 } 161 oop obj = CompressedOops::decode_not_null(o); 162 163 check_obj_during_refinement(p, obj); 164 165 assert(!_g1h->is_in_cset((HeapWord*)p), 166 "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.", 167 p2i(p), _g1h->addr_to_region(p)); 168 169 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj); 170 if (region_attr.is_in_cset()) { 171 // Since the source is always from outside the collection set, here we implicitly know 172 // that this is a cross-region reference too. 173 prefetch_and_push(p, obj); 174 _heap_roots_found++; 175 } else if (!HeapRegion::is_in_same_region(p, obj)) { 176 handle_non_cset_obj_common(region_attr, p, obj); 177 _par_scan_state->enqueue_card_if_tracked(region_attr, p, obj); 178 } 179 } 180 181 template <class T> 182 inline void G1ScanRSForOptionalClosure::do_oop_work(T* p) { 183 const G1HeapRegionAttr region_attr = _g1h->region_attr(p); 184 // Entries in the optional collection set may start to originate from the collection 185 // set after one or more increments. In this case, previously optional regions 186 // became actual collection set regions. Filter them out here. 187 if (region_attr.is_in_cset()) { 188 return; 189 } 190 _scan_cl->do_oop_work(p); 191 _scan_cl->trim_queue_partially(); 192 } 193 194 void G1ParCopyHelper::do_cld_barrier(oop new_obj) { 195 if (_g1h->heap_region_containing(new_obj)->is_young()) { 196 _scanned_cld->record_modified_oops(); 197 } 198 } 199 200 void G1ParCopyHelper::mark_object(oop obj) { 201 assert(!_g1h->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); 202 203 // We know that the object is not moving so it's safe to read its size. 204 _cm->mark_in_bitmap(_worker_id, obj); 205 } 206 207 void G1ParCopyHelper::trim_queue_partially() { 208 _par_scan_state->trim_queue_partially(); 209 } 210 211 template <G1Barrier barrier, bool should_mark> 212 template <class T> 213 void G1ParCopyClosure<barrier, should_mark>::do_oop_work(T* p) { 214 T heap_oop = RawAccess<>::oop_load(p); 215 216 if (CompressedOops::is_null(heap_oop)) { 217 return; 218 } 219 220 oop obj = CompressedOops::decode_not_null(heap_oop); 221 222 assert(_worker_id == _par_scan_state->worker_id(), "sanity"); 223 224 const G1HeapRegionAttr state = _g1h->region_attr(obj); 225 if (state.is_in_cset()) { 226 oop forwardee; 227 markWord m = obj->mark(); 228 if (m.is_marked()) { 229 forwardee = cast_to_oop(m.decode_pointer()); 230 } else { 231 forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); 232 } 233 assert(forwardee != NULL, "forwardee should not be NULL"); 234 RawAccess<IS_NOT_NULL>::oop_store(p, forwardee); 235 236 if (barrier == G1BarrierCLD) { 237 do_cld_barrier(forwardee); 238 } 239 } else { 240 if (state.is_humongous_candidate()) { 241 _g1h->set_humongous_is_live(obj); 242 } else if ((barrier != G1BarrierNoOptRoots) && state.is_optional()) { 243 _par_scan_state->remember_root_into_optional_region(p); 244 } 245 246 // The object is not in the collection set. should_mark is true iff the 247 // current closure is applied on strong roots (and weak roots when class 248 // unloading is disabled) in a concurrent mark start pause. 249 if (should_mark) { 250 mark_object(obj); 251 } 252 } 253 trim_queue_partially(); 254 } 255 256 template <class T> void G1RebuildRemSetClosure::do_oop_work(T* p) { 257 oop const obj = RawAccess<MO_RELAXED>::oop_load(p); 258 if (obj == NULL) { 259 return; 260 } 261 262 if (HeapRegion::is_in_same_region(p, obj)) { 263 return; 264 } 265 266 HeapRegion* to = _g1h->heap_region_containing(obj); 267 HeapRegionRemSet* rem_set = to->rem_set(); 268 if (rem_set->is_tracked()) { 269 rem_set->add_reference(p, _worker_id); 270 } 271 } 272 273 #endif // SHARE_GC_G1_G1OOPCLOSURES_INLINE_HPP