< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp

Print this page

 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
 26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
 27 
 28 #include "gc/g1/g1CollectedHeap.hpp"
 29 
 30 #include "gc/g1/g1BarrierSet.hpp"

 31 #include "gc/g1/g1CollectorState.hpp"
 32 #include "gc/g1/g1EvacFailureRegions.hpp"
 33 #include "gc/g1/g1Policy.hpp"
 34 #include "gc/g1/g1RemSet.hpp"
 35 #include "gc/g1/heapRegion.inline.hpp"
 36 #include "gc/g1/heapRegionManager.inline.hpp"
 37 #include "gc/g1/heapRegionRemSet.hpp"
 38 #include "gc/g1/heapRegionSet.inline.hpp"
 39 #include "gc/shared/markBitMap.inline.hpp"
 40 #include "gc/shared/taskqueue.inline.hpp"
 41 #include "runtime/atomic.hpp"
 42 #include "utilities/bitMap.inline.hpp"
 43 
 44 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
 45   return _policy->phase_times();
 46 }
 47 
 48 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
 49   switch (dest.type()) {
 50     case G1HeapRegionAttr::Young:

184   _region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
185 }
186 
187 void G1CollectedHeap::register_new_survivor_region_with_region_attr(HeapRegion* r) {
188   _region_attr.set_new_survivor_region(r->hrm_index());
189 }
190 
191 void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
192   _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
193 }
194 
195 void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
196   _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
197   _rem_set->exclude_region_from_scan(r->hrm_index());
198 }
199 
200 void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
201   _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
202 }
203 
204 inline bool G1CollectedHeap::is_in_young(const oop obj) {

205   if (obj == NULL) {
206     return false;
207   }
208   return heap_region_containing(obj)->is_young();
209 }
210 
211 inline bool G1CollectedHeap::is_obj_dead(const oop obj, const HeapRegion* hr) const {
212   return hr->is_obj_dead(obj, _cm->prev_mark_bitmap());
213 }
214 
215 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
216   if (obj == NULL) {
217     return false;
218   }
219   return is_obj_dead(obj, heap_region_containing(obj));
220 }
221 
222 inline bool G1CollectedHeap::is_obj_ill(const oop obj, const HeapRegion* hr) const {
223   return
224     !hr->obj_allocated_since_next_marking(obj) &&

252 }
253 
254 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
255   uint region = addr_to_region(cast_from_oop<HeapWord*>(obj));
256   // Clear the flag in the humongous_reclaim_candidates table.  Also
257   // reset the entry in the region attribute table so that subsequent references
258   // to the same humongous object do not go into the slow path again.
259   // This is racy, as multiple threads may at the same time enter here, but this
260   // is benign.
261   // During collection we only ever clear the "candidate" flag, and only ever clear the
262   // entry in the in_cset_fast_table.
263   // We only ever evaluate the contents of these tables (in the VM thread) after
264   // having synchronized the worker threads with the VM thread, or in the same
265   // thread (i.e. within the VM thread).
266   if (is_humongous_reclaim_candidate(region)) {
267     set_humongous_reclaim_candidate(region, false);
268     _region_attr.clear_humongous(region);
269   }
270 }
271 





272 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP

 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
 26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
 27 
 28 #include "gc/g1/g1CollectedHeap.hpp"
 29 
 30 #include "gc/g1/g1BarrierSet.hpp"
 31 #include "gc/g1/g1CardSetContainers.hpp"
 32 #include "gc/g1/g1CollectorState.hpp"
 33 #include "gc/g1/g1EvacFailureRegions.hpp"
 34 #include "gc/g1/g1Policy.hpp"
 35 #include "gc/g1/g1RemSet.hpp"
 36 #include "gc/g1/heapRegion.inline.hpp"
 37 #include "gc/g1/heapRegionManager.inline.hpp"
 38 #include "gc/g1/heapRegionRemSet.hpp"
 39 #include "gc/g1/heapRegionSet.inline.hpp"
 40 #include "gc/shared/markBitMap.inline.hpp"
 41 #include "gc/shared/taskqueue.inline.hpp"
 42 #include "runtime/atomic.hpp"
 43 #include "utilities/bitMap.inline.hpp"
 44 
 45 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
 46   return _policy->phase_times();
 47 }
 48 
 49 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
 50   switch (dest.type()) {
 51     case G1HeapRegionAttr::Young:

185   _region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
186 }
187 
188 void G1CollectedHeap::register_new_survivor_region_with_region_attr(HeapRegion* r) {
189   _region_attr.set_new_survivor_region(r->hrm_index());
190 }
191 
192 void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
193   _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
194 }
195 
196 void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
197   _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
198   _rem_set->exclude_region_from_scan(r->hrm_index());
199 }
200 
201 void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
202   _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
203 }
204 
205 
206 inline bool G1CollectedHeap::is_in_young(const oop obj) const {
207   if (obj == NULL) {
208     return false;
209   }
210   return heap_region_containing(obj)->is_young();
211 }
212 
213 inline bool G1CollectedHeap::is_obj_dead(const oop obj, const HeapRegion* hr) const {
214   return hr->is_obj_dead(obj, _cm->prev_mark_bitmap());
215 }
216 
217 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
218   if (obj == NULL) {
219     return false;
220   }
221   return is_obj_dead(obj, heap_region_containing(obj));
222 }
223 
224 inline bool G1CollectedHeap::is_obj_ill(const oop obj, const HeapRegion* hr) const {
225   return
226     !hr->obj_allocated_since_next_marking(obj) &&

254 }
255 
256 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
257   uint region = addr_to_region(cast_from_oop<HeapWord*>(obj));
258   // Clear the flag in the humongous_reclaim_candidates table.  Also
259   // reset the entry in the region attribute table so that subsequent references
260   // to the same humongous object do not go into the slow path again.
261   // This is racy, as multiple threads may at the same time enter here, but this
262   // is benign.
263   // During collection we only ever clear the "candidate" flag, and only ever clear the
264   // entry in the in_cset_fast_table.
265   // We only ever evaluate the contents of these tables (in the VM thread) after
266   // having synchronized the worker threads with the VM thread, or in the same
267   // thread (i.e. within the VM thread).
268   if (is_humongous_reclaim_candidate(region)) {
269     set_humongous_reclaim_candidate(region, false);
270     _region_attr.clear_humongous(region);
271   }
272 }
273 
274 inline bool G1CollectedHeap::requires_barriers(oop obj) const {
275   assert (obj != NULL, "");
276   return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary NULL check
277 }
278 
279 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
< prev index next >