1 /*
2 * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
27
28 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
29
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
32 #include "runtime/atomic.hpp"
33
34 HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest::Type type) {
35 shenandoah_assert_heaplocked_or_safepoint();
36 assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size);
37
38 HeapWord* obj = top();
39 if (pointer_delta(end(), obj) >= size) {
40 make_regular_allocation();
41 adjust_alloc_metadata(type, size);
42
43 HeapWord* new_top = obj + size;
44 set_top(new_top);
45
46 assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
47 assert(is_object_aligned(obj), "obj is not aligned: " PTR_FORMAT, p2i(obj));
48
49 return obj;
50 } else {
51 return nullptr;
52 }
53 }
54
55 inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t size) {
56 switch (type) {
57 case ShenandoahAllocRequest::_alloc_shared:
58 case ShenandoahAllocRequest::_alloc_shared_gc:
59 // Counted implicitly by tlab/gclab allocs
60 break;
61 case ShenandoahAllocRequest::_alloc_tlab:
62 _tlab_allocs += size;
63 break;
64 case ShenandoahAllocRequest::_alloc_gclab:
65 _gclab_allocs += size;
66 break;
67 default:
68 ShouldNotReachHere();
69 }
70 }
71
72 inline void ShenandoahHeapRegion::increase_live_data_alloc_words(size_t s) {
73 internal_increase_live_data(s);
74 }
75
76 inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) {
77 internal_increase_live_data(s);
78 if (ShenandoahPacing) {
79 ShenandoahHeap::heap()->pacer()->report_mark(s);
80 }
81 }
82
83 inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
84 size_t new_live_data = Atomic::add(&_live_data, s, memory_order_relaxed);
85 #ifdef ASSERT
86 size_t live_bytes = new_live_data * HeapWordSize;
87 size_t used_bytes = used();
88 assert(live_bytes <= used_bytes,
89 "can't have more live data than used: " SIZE_FORMAT ", " SIZE_FORMAT, live_bytes, used_bytes);
90 #endif
91 }
92
93 inline void ShenandoahHeapRegion::clear_live_data() {
94 Atomic::store(&_live_data, (size_t)0);
95 }
96
97 inline size_t ShenandoahHeapRegion::get_live_data_words() const {
98 return Atomic::load(&_live_data);
99 }
100
101 inline size_t ShenandoahHeapRegion::get_live_data_bytes() const {
102 return get_live_data_words() * HeapWordSize;
103 }
104
105 inline bool ShenandoahHeapRegion::has_live() const {
106 return get_live_data_words() != 0;
107 }
108
109 inline size_t ShenandoahHeapRegion::garbage() const {
110 assert(used() >= get_live_data_bytes(),
111 "Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT,
112 get_live_data_bytes(), used());
113
114 size_t result = used() - get_live_data_bytes();
115 return result;
116 }
117
118 inline HeapWord* ShenandoahHeapRegion::get_update_watermark() const {
119 HeapWord* watermark = Atomic::load_acquire(&_update_watermark);
120 assert(bottom() <= watermark && watermark <= top(), "within bounds");
121 return watermark;
122 }
123
124 inline void ShenandoahHeapRegion::set_update_watermark(HeapWord* w) {
125 assert(bottom() <= w && w <= top(), "within bounds");
126 Atomic::release_store(&_update_watermark, w);
127 }
128
129 // Fast version that avoids synchronization, only to be used at safepoints.
130 inline void ShenandoahHeapRegion::set_update_watermark_at_safepoint(HeapWord* w) {
131 assert(bottom() <= w && w <= top(), "within bounds");
132 assert(SafepointSynchronize::is_at_safepoint(), "Should be at Shenandoah safepoint");
133 _update_watermark = w;
134 }
135
136 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
|
1 /*
2 * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
28
29 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
33 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
34 #include "runtime/atomic.hpp"
35
36 HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocRequest &req, size_t alignment_in_bytes) {
37 shenandoah_assert_heaplocked_or_safepoint();
38 assert(req.is_lab_alloc(), "allocate_aligned() only applies to LAB allocations");
39 assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size);
40 assert(is_old(), "aligned allocations are only taken from OLD regions to support PLABs");
41 assert(is_aligned(alignment_in_bytes, HeapWordSize), "Expect heap word alignment");
42
43 HeapWord* orig_top = top();
44 size_t alignment_in_words = alignment_in_bytes / HeapWordSize;
45
46 // unalignment_words is the amount by which current top() exceeds the desired alignment point. We subtract this amount
47 // from alignment_in_words to determine padding required to next alignment point.
48
49 HeapWord* aligned_obj = (HeapWord*) align_up(orig_top, alignment_in_bytes);
50 size_t pad_words = aligned_obj - orig_top;
51 if ((pad_words > 0) && (pad_words < ShenandoahHeap::min_fill_size())) {
52 pad_words += alignment_in_words;
53 aligned_obj += alignment_in_words;
54 }
55
56 if (pointer_delta(end(), aligned_obj) < size) {
57 // Shrink size to fit within available space and align it
58 size = pointer_delta(end(), aligned_obj);
59 size = align_down(size, alignment_in_words);
60 }
61
62 // Both originally requested size and adjusted size must be properly aligned
63 assert (is_aligned(size, alignment_in_words), "Size must be multiple of alignment constraint");
64 if (size >= req.min_size()) {
65 // Even if req.min_size() may not be a multiple of card size, we know that size is.
66 if (pad_words > 0) {
67 assert(pad_words >= ShenandoahHeap::min_fill_size(), "pad_words expanded above to meet size constraint");
68 ShenandoahHeap::fill_with_object(orig_top, pad_words);
69 ShenandoahGenerationalHeap::heap()->old_generation()->card_scan()->register_object(orig_top);
70 }
71
72 make_regular_allocation(req.affiliation());
73 adjust_alloc_metadata(req.type(), size);
74
75 HeapWord* new_top = aligned_obj + size;
76 assert(new_top <= end(), "PLAB cannot span end of heap region");
77 set_top(new_top);
78 // We do not req.set_actual_size() here. The caller sets it.
79 req.set_waste(pad_words);
80 assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
81 assert(is_aligned(aligned_obj, alignment_in_bytes), "obj is not aligned: " PTR_FORMAT, p2i(aligned_obj));
82 return aligned_obj;
83 } else {
84 // The aligned size that fits in this region is smaller than min_size, so don't align top and don't allocate. Return failure.
85 return nullptr;
86 }
87 }
88
89 HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocRequest& req) {
90 shenandoah_assert_heaplocked_or_safepoint();
91 assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size);
92
93 HeapWord* obj = top();
94 if (pointer_delta(end(), obj) >= size) {
95 make_regular_allocation(req.affiliation());
96 adjust_alloc_metadata(req.type(), size);
97
98 HeapWord* new_top = obj + size;
99 set_top(new_top);
100
101 assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
102 assert(is_object_aligned(obj), "obj is not aligned: " PTR_FORMAT, p2i(obj));
103
104 return obj;
105 } else {
106 return nullptr;
107 }
108 }
109
110 inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t size) {
111 switch (type) {
112 case ShenandoahAllocRequest::_alloc_shared:
113 case ShenandoahAllocRequest::_alloc_shared_gc:
114 // Counted implicitly by tlab/gclab allocs
115 break;
116 case ShenandoahAllocRequest::_alloc_tlab:
117 _tlab_allocs += size;
118 break;
119 case ShenandoahAllocRequest::_alloc_gclab:
120 _gclab_allocs += size;
121 break;
122 case ShenandoahAllocRequest::_alloc_plab:
123 _plab_allocs += size;
124 break;
125 default:
126 ShouldNotReachHere();
127 }
128 }
129
130 inline void ShenandoahHeapRegion::increase_live_data_alloc_words(size_t s) {
131 internal_increase_live_data(s);
132 }
133
134 inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) {
135 internal_increase_live_data(s);
136 if (ShenandoahPacing) {
137 ShenandoahHeap::heap()->pacer()->report_mark(s);
138 }
139 }
140
141 inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
142 size_t new_live_data = Atomic::add(&_live_data, s, memory_order_relaxed);
143 }
144
145 inline void ShenandoahHeapRegion::clear_live_data() {
146 Atomic::store(&_live_data, (size_t)0);
147 }
148
149 inline size_t ShenandoahHeapRegion::get_live_data_words() const {
150 return Atomic::load(&_live_data);
151 }
152
153 inline size_t ShenandoahHeapRegion::get_live_data_bytes() const {
154 return get_live_data_words() * HeapWordSize;
155 }
156
157 inline bool ShenandoahHeapRegion::has_live() const {
158 return get_live_data_words() != 0;
159 }
160
161 inline size_t ShenandoahHeapRegion::garbage() const {
162 assert(used() >= get_live_data_bytes(),
163 "Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT,
164 get_live_data_bytes(), used());
165
166 size_t result = used() - get_live_data_bytes();
167 return result;
168 }
169
170 inline size_t ShenandoahHeapRegion::garbage_before_padded_for_promote() const {
171 assert(get_top_before_promote() != nullptr, "top before promote should not equal null");
172 size_t used_before_promote = byte_size(bottom(), get_top_before_promote());
173 assert(used_before_promote >= get_live_data_bytes(),
174 "Live Data must be a subset of used before promotion live: " SIZE_FORMAT " used: " SIZE_FORMAT,
175 get_live_data_bytes(), used_before_promote);
176 size_t result = used_before_promote - get_live_data_bytes();
177 return result;
178
179 }
180
181 inline HeapWord* ShenandoahHeapRegion::get_update_watermark() const {
182 HeapWord* watermark = Atomic::load_acquire(&_update_watermark);
183 assert(bottom() <= watermark && watermark <= top(), "within bounds");
184 return watermark;
185 }
186
187 inline void ShenandoahHeapRegion::set_update_watermark(HeapWord* w) {
188 assert(bottom() <= w && w <= top(), "within bounds");
189 Atomic::release_store(&_update_watermark, w);
190 }
191
192 // Fast version that avoids synchronization, only to be used at safepoints.
193 inline void ShenandoahHeapRegion::set_update_watermark_at_safepoint(HeapWord* w) {
194 assert(bottom() <= w && w <= top(), "within bounds");
195 assert(SafepointSynchronize::is_at_safepoint(), "Should be at Shenandoah safepoint");
196 _update_watermark = w;
197 }
198
199 inline ShenandoahAffiliation ShenandoahHeapRegion::affiliation() const {
200 return ShenandoahHeap::heap()->region_affiliation(this);
201 }
202
203 inline const char* ShenandoahHeapRegion::affiliation_name() const {
204 return shenandoah_affiliation_name(affiliation());
205 }
206
207 inline bool ShenandoahHeapRegion::is_young() const {
208 return affiliation() == YOUNG_GENERATION;
209 }
210
211 inline bool ShenandoahHeapRegion::is_old() const {
212 return affiliation() == OLD_GENERATION;
213 }
214
215 inline bool ShenandoahHeapRegion::is_affiliated() const {
216 return affiliation() != FREE;
217 }
218
219 inline void ShenandoahHeapRegion::save_top_before_promote() {
220 _top_before_promoted = _top;
221 }
222
223 inline void ShenandoahHeapRegion::restore_top_before_promote() {
224 _top = _top_before_promoted;
225 _top_before_promoted = nullptr;
226 }
227
228
229 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
|