24
25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29
30 #include "gc/g1/g1BarrierSet.hpp"
31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
32 #include "gc/g1/g1EvacFailureRegions.hpp"
33 #include "gc/g1/g1EvacStats.inline.hpp"
34 #include "gc/g1/g1HeapRegion.inline.hpp"
35 #include "gc/g1/g1HeapRegionManager.inline.hpp"
36 #include "gc/g1/g1HeapRegionRemSet.hpp"
37 #include "gc/g1/g1HeapRegionSet.inline.hpp"
38 #include "gc/g1/g1Policy.hpp"
39 #include "gc/g1/g1RegionPinCache.inline.hpp"
40 #include "gc/g1/g1RemSet.hpp"
41 #include "gc/shared/collectedHeap.inline.hpp"
42 #include "gc/shared/markBitMap.inline.hpp"
43 #include "gc/shared/taskqueue.inline.hpp"
44 #include "oops/stackChunkOop.hpp"
45 #include "runtime/threadSMR.inline.hpp"
46 #include "utilities/bitMap.inline.hpp"
47
48 inline bool G1STWIsAliveClosure::do_object_b(oop p) {
49 // An object is reachable if it is outside the collection set and not a
50 // humongous candidate, or is inside and copied.
51 return !_g1h->is_in_cset_or_humongous_candidate(p) || p->is_forwarded();
52 }
53
54 inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
55 count = 0;
56 if (_cur_claim.load_relaxed() >= _list.length()) {
57 return nullptr;
58 }
59 uint claim = _cur_claim.fetch_then_add(_claim_step);
60 if (claim >= _list.length()) {
61 return nullptr;
62 }
63 count = MIN2(_list.length() - claim, _claim_step);
64 return _list.list()->threads() + claim;
65 }
66
67 inline void G1JavaThreadsListClaimer::apply(ThreadClosure* cl) {
68 JavaThread* const* list;
69 uint count;
70
71 while ((list = claim(count)) != nullptr) {
72 for (uint i = 0; i < count; i++) {
73 cl->do_thread(list[i]);
74 }
75 }
76 }
77
78 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
79 return _policy->phase_times();
80 }
81
82 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
83 switch (dest.type()) {
84 case G1HeapRegionAttr::Young:
85 return &_survivor_evac_stats;
86 case G1HeapRegionAttr::Old:
87 return &_old_evac_stats;
88 default:
89 ShouldNotReachHere();
90 return nullptr; // Keep some compilers happy
91 }
92 }
93
94 size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
95 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_size(workers()->active_workers());
96 return clamp_plab_size(gclab_word_size);
97 }
|
24
25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29
30 #include "gc/g1/g1BarrierSet.hpp"
31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
32 #include "gc/g1/g1EvacFailureRegions.hpp"
33 #include "gc/g1/g1EvacStats.inline.hpp"
34 #include "gc/g1/g1HeapRegion.inline.hpp"
35 #include "gc/g1/g1HeapRegionManager.inline.hpp"
36 #include "gc/g1/g1HeapRegionRemSet.hpp"
37 #include "gc/g1/g1HeapRegionSet.inline.hpp"
38 #include "gc/g1/g1Policy.hpp"
39 #include "gc/g1/g1RegionPinCache.inline.hpp"
40 #include "gc/g1/g1RemSet.hpp"
41 #include "gc/shared/collectedHeap.inline.hpp"
42 #include "gc/shared/markBitMap.inline.hpp"
43 #include "gc/shared/taskqueue.inline.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "oops/stackChunkOop.hpp"
46 #include "runtime/threadSMR.inline.hpp"
47 #include "utilities/bitMap.inline.hpp"
48
49 inline bool G1STWIsAliveClosure::do_object_b(oop p) {
50 // An object is reachable if it is outside the collection set and not a
51 // humongous candidate, or is inside and copied.
52 return !_g1h->is_in_cset_or_humongous_candidate(p) || p->is_forwarded();
53 }
54
55 inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
56 count = 0;
57 if (_cur_claim.load_relaxed() >= _list.length()) {
58 return nullptr;
59 }
60 uint claim = _cur_claim.fetch_then_add(_claim_step);
61 if (claim >= _list.length()) {
62 return nullptr;
63 }
64 count = MIN2(_list.length() - claim, _claim_step);
65 return _list.list()->threads() + claim;
66 }
67
68 inline void G1JavaThreadsListClaimer::apply(ThreadClosure* cl) {
69 JavaThread* const* list;
70 uint count;
71
72 while ((list = claim(count)) != nullptr) {
73 for (uint i = 0; i < count; i++) {
74 cl->do_thread(list[i]);
75 }
76 }
77 }
78
79 bool G1CollectedHeap::can_be_marked_through_immediately(oop obj) const {
80 return obj->is_array() && !obj->is_array_with_oops();
81 }
82
83 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
84 return _policy->phase_times();
85 }
86
87 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
88 switch (dest.type()) {
89 case G1HeapRegionAttr::Young:
90 return &_survivor_evac_stats;
91 case G1HeapRegionAttr::Old:
92 return &_old_evac_stats;
93 default:
94 ShouldNotReachHere();
95 return nullptr; // Keep some compilers happy
96 }
97 }
98
99 size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
100 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_size(workers()->active_workers());
101 return clamp_plab_size(gclab_word_size);
102 }
|