1 /*
2 * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_GC_G1_HEAPREGION_INLINE_HPP
27
28 #include "gc/g1/heapRegion.hpp"
29
30 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
31 #include "gc/g1/g1CollectedHeap.inline.hpp"
32 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
33 #include "gc/g1/g1Predictions.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "runtime/atomic.hpp"
36 #include "runtime/prefetch.inline.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/globalDefinitions.hpp"
39
40 inline HeapWord* HeapRegion::allocate_impl(size_t min_word_size,
41 size_t desired_word_size,
42 size_t* actual_size) {
43 HeapWord* obj = top();
44 size_t available = pointer_delta(end(), obj);
45 size_t want_to_allocate = MIN2(available, desired_word_size);
46 if (want_to_allocate >= min_word_size) {
47 HeapWord* new_top = obj + want_to_allocate;
48 set_top(new_top);
49 assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment");
50 *actual_size = want_to_allocate;
51 return obj;
52 } else {
53 return NULL;
54 }
55 }
56
57 inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size,
58 size_t desired_word_size,
59 size_t* actual_size) {
60 do {
61 HeapWord* obj = top();
62 size_t available = pointer_delta(end(), obj);
63 size_t want_to_allocate = MIN2(available, desired_word_size);
64 if (want_to_allocate >= min_word_size) {
65 HeapWord* new_top = obj + want_to_allocate;
66 HeapWord* result = Atomic::cmpxchg(&_top, obj, new_top);
67 // result can be one of two:
68 // the old top value: the exchange succeeded
69 // otherwise: the new value of the top is returned.
70 if (result == obj) {
71 assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment");
72 *actual_size = want_to_allocate;
73 return obj;
74 }
75 } else {
76 return NULL;
77 }
78 } while (true);
79 }
80
81 inline HeapWord* HeapRegion::allocate(size_t min_word_size,
82 size_t desired_word_size,
83 size_t* actual_size) {
84 HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size);
85 if (res != NULL) {
86 _bot_part.alloc_block(res, *actual_size);
87 }
88 return res;
89 }
90
91 inline HeapWord* HeapRegion::allocate(size_t word_size) {
92 size_t temp;
93 return allocate(word_size, word_size, &temp);
94 }
95
96 inline HeapWord* HeapRegion::par_allocate(size_t word_size) {
97 size_t temp;
98 return par_allocate(word_size, word_size, &temp);
99 }
100
101 // Because of the requirement of keeping "_offsets" up to date with the
102 // allocations, we sequentialize these with a lock. Therefore, best if
103 // this is used for larger LAB allocations only.
104 inline HeapWord* HeapRegion::par_allocate(size_t min_word_size,
105 size_t desired_word_size,
106 size_t* actual_size) {
107 MutexLocker x(&_par_alloc_lock);
108 return allocate(min_word_size, desired_word_size, actual_size);
109 }
110
111 inline HeapWord* HeapRegion::block_start(const void* p) {
112 return _bot_part.block_start(p);
113 }
114
115 inline HeapWord* HeapRegion::block_start_const(const void* p) const {
116 return _bot_part.block_start_const(p);
117 }
118
119 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const {
120 HeapWord* addr = cast_from_oop<HeapWord*>(obj);
121
122 assert(addr < top(), "must be");
123 assert(!is_closed_archive(),
124 "Closed archive regions should not have references into other regions");
125 assert(!is_humongous(), "Humongous objects not handled here");
126 bool obj_is_dead = is_obj_dead(obj, prev_bitmap);
127
128 if (ClassUnloading && obj_is_dead) {
129 assert(!block_is_obj(addr), "must be");
130 *size = block_size_using_bitmap(addr, prev_bitmap);
131 } else {
132 assert(block_is_obj(addr), "must be");
133 *size = obj->size();
134 }
135 return obj_is_dead;
136 }
137
138 inline bool HeapRegion::block_is_obj(const HeapWord* p) const {
139 G1CollectedHeap* g1h = G1CollectedHeap::heap();
140
141 if (!this->is_in(p)) {
142 assert(is_continues_humongous(), "This case can only happen for humongous regions");
143 return (p == humongous_start_region()->bottom());
144 }
145 // When class unloading is enabled it is not safe to only consider top() to conclude if the
146 // given pointer is a valid object. The situation can occur both for class unloading in a
147 // Full GC and during a concurrent cycle.
148 // During a Full GC regions can be excluded from compaction due to high live ratio, and
149 // because of this there can be stale objects for unloaded classes left in these regions.
150 // During a concurrent cycle class unloading is done after marking is complete and objects
151 // for the unloaded classes will be stale until the regions are collected.
152 if (ClassUnloading) {
153 return !g1h->is_obj_dead(cast_to_oop(p), this);
154 }
155 return p < top();
156 }
157
158 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMap* const prev_bitmap) const {
159 assert(ClassUnloading,
160 "All blocks should be objects if class unloading isn't used, so this method should not be called. "
161 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
162 "addr: " PTR_FORMAT,
163 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
164
165 // Old regions' dead objects may have dead classes
166 // We need to find the next live object using the bitmap
167 HeapWord* next = prev_bitmap->get_next_marked_addr(addr, prev_top_at_mark_start());
168
169 assert(next > addr, "must get the next live object");
170 return pointer_delta(next, addr);
171 }
172
173 inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const {
174 assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj));
175 return !obj_allocated_since_prev_marking(obj) &&
176 !prev_bitmap->is_marked(obj) &&
177 !is_closed_archive();
178 }
179
180 template <bool RESOLVE>
181 inline size_t HeapRegion::block_size(const HeapWord *addr) const {
182 if (addr == top()) {
183 return pointer_delta(end(), addr);
184 }
185
186 if (block_is_obj(addr)) {
187 oop obj = cast_to_oop(addr);
188 #ifdef _LP64
189 #ifdef ASSERT
190 if (RESOLVE) {
191 assert(UseCompactObjectHeaders && !G1CollectedHeap::heap()->collector_state()->in_full_gc(), "Illegal/excessive resolve during full-GC");
192 } else {
193 assert(!UseCompactObjectHeaders || G1CollectedHeap::heap()->collector_state()->in_full_gc() || !obj->is_forwarded(), "Missing resolve when forwarded during normal GC");
194 }
195 #endif
196 if (RESOLVE && obj->is_forwarded()) {
197 obj = obj->forwardee();
198 }
199 #endif
200 return obj->size();
201 }
202
203 return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap());
204 }
205
206 inline void HeapRegion::reset_compaction_top_after_compaction() {
207 set_top(compaction_top());
208 _compaction_top = bottom();
209 }
210
211 inline void HeapRegion::reset_compacted_after_full_gc() {
212 assert(!is_pinned(), "must be");
213
214 reset_compaction_top_after_compaction();
215 // After a compaction the mark bitmap in a non-pinned regions is invalid.
216 // We treat all objects as being above PTAMS.
217 zero_marked_bytes();
218 init_top_at_mark_start();
219
220 reset_after_full_gc_common();
221 }
222
223 inline void HeapRegion::reset_skip_compacting_after_full_gc() {
224 assert(!is_free(), "must be");
225
226 assert(compaction_top() == bottom(),
227 "region %u compaction_top " PTR_FORMAT " must not be different from bottom " PTR_FORMAT,
228 hrm_index(), p2i(compaction_top()), p2i(bottom()));
229
230 _prev_top_at_mark_start = top(); // Keep existing top and usage.
231 _prev_marked_bytes = used();
232 _next_top_at_mark_start = bottom();
233 _next_marked_bytes = 0;
234
235 reset_after_full_gc_common();
236 }
237
238 inline void HeapRegion::reset_after_full_gc_common() {
239 if (is_empty()) {
240 reset_bot();
241 }
242
243 // Clear unused heap memory in debug builds.
244 if (ZapUnusedHeapArea) {
245 mangle_unused_area();
246 }
247 }
248
249 template<typename ApplyToMarkedClosure>
250 inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) {
251 HeapWord* limit = top();
252 HeapWord* next_addr = bottom();
253
254 while (next_addr < limit) {
255 Prefetch::write(next_addr, PrefetchScanIntervalInBytes);
256 // This explicit is_marked check is a way to avoid
257 // some extra work done by get_next_marked_addr for
258 // the case where next_addr is marked.
259 if (bitmap->is_marked(next_addr)) {
260 oop current = cast_to_oop(next_addr);
261 next_addr += closure->apply(current);
262 } else {
263 next_addr = bitmap->get_next_marked_addr(next_addr, limit);
264 }
265 }
266
267 assert(next_addr == limit, "Should stop the scan at the limit.");
268 }
269
270 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
271 size_t desired_word_size,
272 size_t* actual_word_size) {
273 assert(is_young(), "we can only skip BOT updates on young regions");
274 return par_allocate_impl(min_word_size, desired_word_size, actual_word_size);
275 }
276
277 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
278 size_t temp;
279 return allocate_no_bot_updates(word_size, word_size, &temp);
280 }
281
282 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
283 size_t desired_word_size,
284 size_t* actual_word_size) {
285 assert(is_young(), "we can only skip BOT updates on young regions");
286 return allocate_impl(min_word_size, desired_word_size, actual_word_size);
287 }
288
289 inline void HeapRegion::note_start_of_marking() {
290 _next_marked_bytes = 0;
291 _next_top_at_mark_start = top();
292 _gc_efficiency = -1.0;
293 }
294
295 inline void HeapRegion::note_end_of_marking() {
296 _prev_top_at_mark_start = _next_top_at_mark_start;
297 _next_top_at_mark_start = bottom();
298 _prev_marked_bytes = _next_marked_bytes;
299 _next_marked_bytes = 0;
300 }
301
302 inline bool HeapRegion::in_collection_set() const {
303 return G1CollectedHeap::heap()->is_in_cset(this);
304 }
305
306 template <class Closure, bool is_gc_active>
307 HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr,
308 Closure* cl,
309 G1CollectedHeap* g1h) {
310 assert(is_humongous(), "precondition");
311 HeapRegion* sr = humongous_start_region();
312 oop obj = cast_to_oop(sr->bottom());
313
314 // If concurrent and klass_or_null is NULL, then space has been
315 // allocated but the object has not yet been published by setting
316 // the klass. That can only happen if the card is stale. However,
317 // we've already set the card clean, so we must return failure,
318 // since the allocating thread could have performed a write to the
319 // card that might be missed otherwise.
320 if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) {
321 return NULL;
322 }
323
324 // We have a well-formed humongous object at the start of sr.
325 // Only filler objects follow a humongous object in the containing
326 // regions, and we can ignore those. So only process the one
327 // humongous object.
328 if (g1h->is_obj_dead(obj, sr)) {
329 // The object is dead. There can be no other object in this region, so return
330 // the end of that region.
331 return end();
332 }
333 if (obj->is_objArray() || (sr->bottom() < mr.start())) {
334 // objArrays are always marked precisely, so limit processing
335 // with mr. Non-objArrays might be precisely marked, and since
336 // it's humongous it's worthwhile avoiding full processing.
337 // However, the card could be stale and only cover filler
338 // objects. That should be rare, so not worth checking for;
339 // instead let it fall out from the bounded iteration.
340 obj->oop_iterate(cl, mr);
341 return mr.end();
342 } else {
343 // If obj is not an objArray and mr contains the start of the
344 // obj, then this could be an imprecise mark, and we need to
345 // process the entire object.
346 int size = obj->oop_iterate_size(cl);
347 // We have scanned to the end of the object, but since there can be no objects
348 // after this humongous object in the region, we can return the end of the
349 // region if it is greater.
350 return MAX2(cast_from_oop<HeapWord*>(obj) + size, mr.end());
351 }
352 }
353
354 template <bool is_gc_active, class Closure>
355 HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
356 Closure* cl) {
357 assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");
358 G1CollectedHeap* g1h = G1CollectedHeap::heap();
359
360 // Special handling for humongous regions.
361 if (is_humongous()) {
362 return do_oops_on_memregion_in_humongous<Closure, is_gc_active>(mr, cl, g1h);
363 }
364 assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str());
365
366 // Because mr has been trimmed to what's been allocated in this
367 // region, the parts of the heap that are examined here are always
368 // parsable; there's no need to use klass_or_null to detect
369 // in-progress allocation.
370
371 // Cache the boundaries of the memory region in some const locals
372 HeapWord* const start = mr.start();
373 HeapWord* const end = mr.end();
374
375 // Find the obj that extends onto mr.start().
376 // Update BOT as needed while finding start of (possibly dead)
377 // object containing the start of the region.
378 HeapWord* cur = block_start(start);
379
380 #ifdef ASSERT
381 {
382 assert(cur <= start,
383 "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start));
384 HeapWord* next = cur + block_size(cur);
385 assert(start < next,
386 "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next));
387 }
388 #endif
389
390 const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prev_mark_bitmap();
391 while (true) {
392 oop obj = cast_to_oop(cur);
393 assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur));
394 assert(obj->klass_or_null() != NULL,
395 "Unparsable heap at " PTR_FORMAT, p2i(cur));
396
397 size_t size;
398 bool is_dead = is_obj_dead_with_size(obj, bitmap, &size);
399 bool is_precise = false;
400
401 cur += size;
402 if (!is_dead) {
403 // Process live object's references.
404
405 // Non-objArrays are usually marked imprecise at the object
406 // start, in which case we need to iterate over them in full.
407 // objArrays are precisely marked, but can still be iterated
408 // over in full if completely covered.
409 if (!obj->is_objArray() || (cast_from_oop<HeapWord*>(obj) >= start && cur <= end)) {
410 obj->oop_iterate(cl);
411 } else {
412 obj->oop_iterate(cl, mr);
413 is_precise = true;
414 }
415 }
416 if (cur >= end) {
417 return is_precise ? end : cur;
418 }
419 }
420 }
421
422 inline int HeapRegion::age_in_surv_rate_group() const {
423 assert(has_surv_rate_group(), "pre-condition");
424 assert(has_valid_age_in_surv_rate(), "pre-condition");
425 return _surv_rate_group->age_in_group(_age_index);
426 }
427
428 inline bool HeapRegion::has_valid_age_in_surv_rate() const {
429 return G1SurvRateGroup::is_valid_age_index(_age_index);
430 }
431
432 inline bool HeapRegion::has_surv_rate_group() const {
433 return _surv_rate_group != NULL;
434 }
435
436 inline double HeapRegion::surv_rate_prediction(G1Predictions const& predictor) const {
437 assert(has_surv_rate_group(), "pre-condition");
438 return _surv_rate_group->surv_rate_pred(predictor, age_in_surv_rate_group());
439 }
440
441 inline void HeapRegion::install_surv_rate_group(G1SurvRateGroup* surv_rate_group) {
442 assert(surv_rate_group != NULL, "pre-condition");
443 assert(!has_surv_rate_group(), "pre-condition");
444 assert(is_young(), "pre-condition");
445
446 _surv_rate_group = surv_rate_group;
447 _age_index = surv_rate_group->next_age_index();
448 }
449
450 inline void HeapRegion::uninstall_surv_rate_group() {
451 if (has_surv_rate_group()) {
452 assert(has_valid_age_in_surv_rate(), "pre-condition");
453 assert(is_young(), "pre-condition");
454
455 _surv_rate_group = NULL;
456 _age_index = G1SurvRateGroup::InvalidAgeIndex;
457 } else {
458 assert(!has_valid_age_in_surv_rate(), "pre-condition");
459 }
460 }
461
462 inline void HeapRegion::record_surv_words_in_group(size_t words_survived) {
463 assert(has_surv_rate_group(), "pre-condition");
464 assert(has_valid_age_in_surv_rate(), "pre-condition");
465 int age_in_group = age_in_surv_rate_group();
466 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
467 }
468
469 #endif // SHARE_GC_G1_HEAPREGION_INLINE_HPP