1 /*
  2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
 26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
 27 
 28 #include "gc/g1/g1CollectedHeap.hpp"
 29 
 30 #include "gc/g1/g1BarrierSet.hpp"
 31 #include "gc/g1/g1CollectorState.hpp"
 32 #include "gc/g1/g1EvacFailureRegions.hpp"
 33 #include "gc/g1/g1Policy.hpp"
 34 #include "gc/g1/g1RemSet.hpp"
 35 #include "gc/g1/heapRegion.inline.hpp"
 36 #include "gc/g1/heapRegionManager.inline.hpp"
 37 #include "gc/g1/heapRegionRemSet.hpp"
 38 #include "gc/g1/heapRegionSet.inline.hpp"
 39 #include "gc/shared/markBitMap.inline.hpp"
 40 #include "gc/shared/taskqueue.inline.hpp"
 41 #include "runtime/atomic.hpp"
 42 #include "utilities/bitMap.inline.hpp"
 43 
 44 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
 45   return _policy->phase_times();
 46 }
 47 
 48 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
 49   switch (dest.type()) {
 50     case G1HeapRegionAttr::Young:
 51       return &_survivor_evac_stats;
 52     case G1HeapRegionAttr::Old:
 53       return &_old_evac_stats;
 54     default:
 55       ShouldNotReachHere();
 56       return NULL; // Keep some compilers happy
 57   }
 58 }
 59 
 60 size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
 61   size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
 62   // Prevent humongous PLAB sizes for two reasons:
 63   // * PLABs are allocated using a similar paths as oops, but should
 64   //   never be in a humongous region
 65   // * Allowing humongous PLABs needlessly churns the region free lists
 66   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
 67 }
 68 
 69 // Inline functions for G1CollectedHeap
 70 
 71 // Return the region with the given index. It assumes the index is valid.
 72 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
 73 
 74 // Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
 75 inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
 76 
 77 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
 78   return _hrm.next_region_in_humongous(hr);
 79 }
 80 
 81 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
 82   assert(is_in_reserved(addr),
 83          "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
 84          p2i(addr), p2i(reserved().start()), p2i(reserved().end()));
 85   return (uint)(pointer_delta(addr, reserved().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
 86 }
 87 
 88 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
 89   return _hrm.reserved().start() + index * HeapRegion::GrainWords;
 90 }
 91 
 92 template <class T>
 93 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
 94   assert(addr != NULL, "invariant");
 95   assert(is_in_reserved((const void*) addr),
 96          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
 97          p2i((void*)addr), p2i(reserved().start()), p2i(reserved().end()));
 98   return _hrm.addr_to_region((HeapWord*)(void*) addr);
 99 }
100 
101 template <class T>
102 inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {
103   assert(addr != NULL, "invariant");
104   assert(is_in_reserved((const void*) addr),
105          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
106          p2i((void*)addr), p2i(reserved().start()), p2i(reserved().end()));
107   uint const region_idx = addr_to_region(addr);
108   return region_at_or_null(region_idx);
109 }
110 
111 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
112   _old_set.add(hr);
113 }
114 
115 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
116   _old_set.remove(hr);
117 }
118 
119 inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
120   _archive_set.add(hr);
121 }
122 
123 // It dirties the cards that cover the block so that the post
124 // write barrier never queues anything when updating objects on this
125 // block. It is assumed (and in fact we assert) that the block
126 // belongs to a young region.
127 inline void
128 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
129   assert_heap_not_locked();
130 
131   // Assign the containing region to containing_hr so that we don't
132   // have to keep calling heap_region_containing() in the
133   // asserts below.
134   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
135   assert(word_size > 0, "pre-condition");
136   assert(containing_hr->is_in(start), "it should contain start");
137   assert(containing_hr->is_young(), "it should be young");
138   assert(!containing_hr->is_humongous(), "it should not be humongous");
139 
140   HeapWord* end = start + word_size;
141   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
142 
143   MemRegion mr(start, end);
144   card_table()->g1_mark_as_young(mr);
145 }
146 
147 inline G1ScannerTasksQueueSet* G1CollectedHeap::task_queues() const {
148   return _task_queues;
149 }
150 
151 inline G1ScannerTasksQueue* G1CollectedHeap::task_queue(uint i) const {
152   return _task_queues->queue(i);
153 }
154 
155 inline bool G1CollectedHeap::is_marked_next(oop obj) const {
156   return _cm->next_mark_bitmap()->is_marked(obj);
157 }
158 
159 inline bool G1CollectedHeap::is_in_cset(oop obj) {
160   return is_in_cset(cast_from_oop<HeapWord*>(obj));
161 }
162 
163 inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
164   return _region_attr.is_in_cset(addr);
165 }
166 
167 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
168   return _region_attr.is_in_cset(hr);
169 }
170 
171 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
172   return _region_attr.is_in_cset_or_humongous(cast_from_oop<HeapWord*>(obj));
173 }
174 
175 G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {
176   return _region_attr.at((HeapWord*)addr);
177 }
178 
179 G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
180   return _region_attr.get_by_index(idx);
181 }
182 
183 void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
184   _region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
185 }
186 
187 void G1CollectedHeap::register_new_survivor_region_with_region_attr(HeapRegion* r) {
188   _region_attr.set_new_survivor_region(r->hrm_index());
189 }
190 
191 void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
192   _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
193 }
194 
195 void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
196   _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
197   _rem_set->exclude_region_from_scan(r->hrm_index());
198 }
199 
200 void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
201   _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
202 }
203 
204 inline bool G1CollectedHeap::is_in_young(const oop obj) {
205   if (obj == NULL) {
206     return false;
207   }
208   return heap_region_containing(obj)->is_young();
209 }
210 
211 inline bool G1CollectedHeap::is_obj_dead(const oop obj, const HeapRegion* hr) const {
212   return hr->is_obj_dead(obj, _cm->prev_mark_bitmap());
213 }
214 
215 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
216   if (obj == NULL) {
217     return false;
218   }
219   return is_obj_dead(obj, heap_region_containing(obj));
220 }
221 
222 inline bool G1CollectedHeap::is_obj_ill(const oop obj, const HeapRegion* hr) const {
223   return
224     !hr->obj_allocated_since_next_marking(obj) &&
225     !is_marked_next(obj) &&
226     !hr->is_closed_archive();
227 }
228 
229 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
230   if (obj == NULL) {
231     return false;
232   }
233   return is_obj_ill(obj, heap_region_containing(obj));
234 }
235 
236 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
237    return !is_marked_next(obj) && !hr->is_closed_archive();
238 }
239 
240 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
241     return is_obj_dead_full(obj, heap_region_containing(obj));
242 }
243 
244 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
245   assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
246   _humongous_reclaim_candidates.set_candidate(region, value);
247 }
248 
249 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
250   assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
251   return _humongous_reclaim_candidates.is_candidate(region);
252 }
253 
254 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
255   uint region = addr_to_region(cast_from_oop<HeapWord*>(obj));
256   // Clear the flag in the humongous_reclaim_candidates table.  Also
257   // reset the entry in the region attribute table so that subsequent references
258   // to the same humongous object do not go into the slow path again.
259   // This is racy, as multiple threads may at the same time enter here, but this
260   // is benign.
261   // During collection we only ever clear the "candidate" flag, and only ever clear the
262   // entry in the in_cset_fast_table.
263   // We only ever evaluate the contents of these tables (in the VM thread) after
264   // having synchronized the worker threads with the VM thread, or in the same
265   // thread (i.e. within the VM thread).
266   if (is_humongous_reclaim_candidate(region)) {
267     set_humongous_reclaim_candidate(region, false);
268     _region_attr.clear_humongous(region);
269   }
270 }
271 
272 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP