1 /*
2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/space.inline.hpp"
27 #include "gc/shared/tlab_globals.hpp"
28 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
32 #include "jfr/jfrEvents.hpp"
33 #include "memory/allocation.hpp"
34 #include "memory/iterator.inline.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/globals_extension.hpp"
40 #include "runtime/java.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "runtime/os.hpp"
43 #include "runtime/safepoint.hpp"
44 #include "utilities/powerOfTwo.hpp"
45
46 size_t ShenandoahHeapRegion::RegionCount = 0;
47 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
48 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
49 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
50 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
51 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
52 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
53 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
54 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
55 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
56 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
57
58 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
59 _index(index),
60 _bottom(start),
61 _end(start + RegionSizeWords),
62 _new_top(NULL),
63 _empty_time(os::elapsedTime()),
64 _state(committed ? _empty_committed : _empty_uncommitted),
65 _top(start),
66 _tlab_allocs(0),
67 _gclab_allocs(0),
68 _live_data(0),
69 _critical_pins(0),
70 _update_watermark(start) {
71
72 assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
73 "invalid space boundaries");
74 if (ZapUnusedHeapArea && committed) {
75 SpaceMangler::mangle_region(MemRegion(_bottom, _end));
76 }
77 }
78
79 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
80 ResourceMark rm;
81 stringStream ss;
82 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method);
83 print_on(&ss);
84 fatal("%s", ss.as_string());
85 }
86
87 void ShenandoahHeapRegion::make_regular_allocation() {
88 shenandoah_assert_heaplocked();
89
90 switch (_state) {
91 case _empty_uncommitted:
92 do_commit();
93 case _empty_committed:
94 set_state(_regular);
95 case _regular:
96 case _pinned:
97 return;
98 default:
99 report_illegal_transition("regular allocation");
100 }
101 }
102
103 void ShenandoahHeapRegion::make_regular_bypass() {
104 shenandoah_assert_heaplocked();
105 assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
106 "only for full or degen GC");
107
108 switch (_state) {
109 case _empty_uncommitted:
110 do_commit();
111 case _empty_committed:
112 case _cset:
113 case _humongous_start:
114 case _humongous_cont:
115 set_state(_regular);
116 return;
117 case _pinned_cset:
118 set_state(_pinned);
119 return;
120 case _regular:
121 case _pinned:
122 return;
123 default:
124 report_illegal_transition("regular bypass");
125 }
126 }
127
128 void ShenandoahHeapRegion::make_humongous_start() {
129 shenandoah_assert_heaplocked();
130 switch (_state) {
131 case _empty_uncommitted:
132 do_commit();
133 case _empty_committed:
134 set_state(_humongous_start);
135 return;
136 default:
137 report_illegal_transition("humongous start allocation");
138 }
139 }
140
141 void ShenandoahHeapRegion::make_humongous_start_bypass() {
142 shenandoah_assert_heaplocked();
143 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
144
145 switch (_state) {
146 case _empty_committed:
147 case _regular:
148 case _humongous_start:
149 case _humongous_cont:
150 set_state(_humongous_start);
151 return;
152 default:
153 report_illegal_transition("humongous start bypass");
154 }
155 }
156
157 void ShenandoahHeapRegion::make_humongous_cont() {
158 shenandoah_assert_heaplocked();
159 switch (_state) {
160 case _empty_uncommitted:
161 do_commit();
162 case _empty_committed:
163 set_state(_humongous_cont);
164 return;
165 default:
166 report_illegal_transition("humongous continuation allocation");
167 }
168 }
169
170 void ShenandoahHeapRegion::make_humongous_cont_bypass() {
171 shenandoah_assert_heaplocked();
172 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
173
174 switch (_state) {
175 case _empty_committed:
176 case _regular:
177 case _humongous_start:
178 case _humongous_cont:
179 set_state(_humongous_cont);
180 return;
181 default:
182 report_illegal_transition("humongous continuation bypass");
183 }
184 }
185
186 void ShenandoahHeapRegion::make_pinned() {
187 shenandoah_assert_heaplocked();
188 assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
189
190 switch (_state) {
191 case _regular:
192 set_state(_pinned);
193 case _pinned_cset:
194 case _pinned:
195 return;
196 case _humongous_start:
197 set_state(_pinned_humongous_start);
198 case _pinned_humongous_start:
199 return;
200 case _cset:
201 _state = _pinned_cset;
202 return;
203 default:
204 report_illegal_transition("pinning");
205 }
206 }
207
208 void ShenandoahHeapRegion::make_unpinned() {
209 shenandoah_assert_heaplocked();
210 assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
211
212 switch (_state) {
213 case _pinned:
214 set_state(_regular);
215 return;
216 case _regular:
217 case _humongous_start:
218 return;
219 case _pinned_cset:
220 set_state(_cset);
221 return;
222 case _pinned_humongous_start:
223 set_state(_humongous_start);
224 return;
225 default:
226 report_illegal_transition("unpinning");
227 }
228 }
229
230 void ShenandoahHeapRegion::make_cset() {
231 shenandoah_assert_heaplocked();
232 switch (_state) {
233 case _regular:
234 set_state(_cset);
235 case _cset:
236 return;
237 default:
238 report_illegal_transition("cset");
239 }
240 }
241
242 void ShenandoahHeapRegion::make_trash() {
243 shenandoah_assert_heaplocked();
244 switch (_state) {
245 case _cset:
246 // Reclaiming cset regions
247 case _humongous_start:
248 case _humongous_cont:
249 // Reclaiming humongous regions
250 case _regular:
251 // Immediate region reclaim
252 set_state(_trash);
253 return;
254 default:
255 report_illegal_transition("trashing");
256 }
257 }
258
259 void ShenandoahHeapRegion::make_trash_immediate() {
260 make_trash();
261
262 // On this path, we know there are no marked objects in the region,
263 // tell marking context about it to bypass bitmap resets.
264 ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this);
265 }
266
267 void ShenandoahHeapRegion::make_empty() {
268 shenandoah_assert_heaplocked();
269 switch (_state) {
270 case _trash:
271 set_state(_empty_committed);
272 _empty_time = os::elapsedTime();
273 return;
274 default:
275 report_illegal_transition("emptying");
276 }
277 }
278
279 void ShenandoahHeapRegion::make_uncommitted() {
280 shenandoah_assert_heaplocked();
281 switch (_state) {
282 case _empty_committed:
283 do_uncommit();
284 set_state(_empty_uncommitted);
285 return;
286 default:
287 report_illegal_transition("uncommiting");
288 }
289 }
290
291 void ShenandoahHeapRegion::make_committed_bypass() {
292 shenandoah_assert_heaplocked();
293 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
294
295 switch (_state) {
296 case _empty_uncommitted:
297 do_commit();
298 set_state(_empty_committed);
299 return;
300 default:
301 report_illegal_transition("commit bypass");
302 }
303 }
304
305 void ShenandoahHeapRegion::reset_alloc_metadata() {
306 _tlab_allocs = 0;
307 _gclab_allocs = 0;
308 }
309
310 size_t ShenandoahHeapRegion::get_shared_allocs() const {
311 return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize;
312 }
313
314 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
315 return _tlab_allocs * HeapWordSize;
316 }
317
318 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
319 return _gclab_allocs * HeapWordSize;
320 }
321
322 void ShenandoahHeapRegion::set_live_data(size_t s) {
323 assert(Thread::current()->is_VM_thread(), "by VM thread");
324 _live_data = (s >> LogHeapWordSize);
325 }
326
327 void ShenandoahHeapRegion::print_on(outputStream* st) const {
328 st->print("|");
329 st->print(SIZE_FORMAT_W(5), this->_index);
330
331 switch (_state) {
332 case _empty_uncommitted:
333 st->print("|EU ");
334 break;
335 case _empty_committed:
336 st->print("|EC ");
337 break;
338 case _regular:
339 st->print("|R ");
340 break;
341 case _humongous_start:
345 st->print("|HP ");
346 break;
347 case _humongous_cont:
348 st->print("|HC ");
349 break;
350 case _cset:
351 st->print("|CS ");
352 break;
353 case _trash:
354 st->print("|T ");
355 break;
356 case _pinned:
357 st->print("|P ");
358 break;
359 case _pinned_cset:
360 st->print("|CSP");
361 break;
362 default:
363 ShouldNotReachHere();
364 }
365 st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
366 p2i(bottom()), p2i(top()), p2i(end()));
367 st->print("|TAMS " INTPTR_FORMAT_W(12),
368 p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
369 st->print("|UWM " INTPTR_FORMAT_W(12),
370 p2i(_update_watermark));
371 st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
372 st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs()));
373 st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs()));
374 st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs()));
375 st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
376 st->print("|CP " SIZE_FORMAT_W(3), pin_count());
377 st->cr();
378 }
379
380 void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) {
381 if (!is_active()) return;
382 if (is_humongous()) {
383 oop_iterate_humongous(blk);
384 } else {
385 oop_iterate_objects(blk);
386 }
387 }
388
389 void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) {
390 assert(! is_humongous(), "no humongous region here");
391 HeapWord* obj_addr = bottom();
392 HeapWord* t = top();
393 // Could call objects iterate, but this is easier.
394 while (obj_addr < t) {
395 oop obj = cast_to_oop(obj_addr);
396 obj_addr += obj->oop_iterate_size(blk);
397 }
398 }
399
400 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
401 assert(is_humongous(), "only humongous region here");
402 // Find head.
403 ShenandoahHeapRegion* r = humongous_start_region();
404 assert(r->is_humongous_start(), "need humongous head here");
405 oop obj = cast_to_oop(r->bottom());
406 obj->oop_iterate(blk, MemRegion(bottom(), top()));
407 }
408
409 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
410 ShenandoahHeap* heap = ShenandoahHeap::heap();
411 assert(is_humongous(), "Must be a part of the humongous region");
412 size_t i = index();
413 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
414 while (!r->is_humongous_start()) {
415 assert(i > 0, "Sanity");
416 i--;
417 r = heap->get_region(i);
418 assert(r->is_humongous(), "Must be a part of the humongous region");
419 }
420 assert(r->is_humongous_start(), "Must be");
421 return r;
422 }
423
424 void ShenandoahHeapRegion::recycle() {
425 set_top(bottom());
426 clear_live_data();
427
428 reset_alloc_metadata();
429
430 ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this);
431 set_update_watermark(bottom());
432
433 make_empty();
434
435 if (ZapUnusedHeapArea) {
436 SpaceMangler::mangle_region(MemRegion(bottom(), end()));
437 }
438 }
439
440 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
441 assert(MemRegion(bottom(), end()).contains(p),
442 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
443 p2i(p), p2i(bottom()), p2i(end()));
444 if (p >= top()) {
445 return top();
446 } else {
447 HeapWord* last = bottom();
448 HeapWord* cur = last;
449 while (cur <= p) {
450 last = cur;
451 cur += cast_to_oop(cur)->size();
452 }
453 shenandoah_assert_correct(NULL, cast_to_oop(last));
664 evt.set_used(used());
665 evt.set_from(_state);
666 evt.set_to(to);
667 evt.commit();
668 }
669 _state = to;
670 }
671
672 void ShenandoahHeapRegion::record_pin() {
673 Atomic::add(&_critical_pins, (size_t)1);
674 }
675
676 void ShenandoahHeapRegion::record_unpin() {
677 assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
678 Atomic::sub(&_critical_pins, (size_t)1);
679 }
680
681 size_t ShenandoahHeapRegion::pin_count() const {
682 return Atomic::load(&_critical_pins);
683 }
|
1 /*
2 * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/space.inline.hpp"
27 #include "gc/shared/tlab_globals.hpp"
28 #include "gc/shenandoah/shenandoahCardTable.hpp"
29 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
33 #include "gc/shenandoah/shenandoahGeneration.hpp"
34 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
36 #include "jfr/jfrEvents.hpp"
37 #include "memory/allocation.hpp"
38 #include "memory/iterator.inline.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "memory/universe.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/atomic.hpp"
43 #include "runtime/globals_extension.hpp"
44 #include "runtime/java.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/os.hpp"
47 #include "runtime/safepoint.hpp"
48 #include "utilities/powerOfTwo.hpp"
49
50
51 size_t ShenandoahHeapRegion::RegionCount = 0;
52 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
53 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
54 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
55 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
56 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
57 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
58 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
59 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
60 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
61 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
62
63 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
64 _index(index),
65 _bottom(start),
66 _end(start + RegionSizeWords),
67 _new_top(NULL),
68 _empty_time(os::elapsedTime()),
69 _state(committed ? _empty_committed : _empty_uncommitted),
70 _top(start),
71 _tlab_allocs(0),
72 _gclab_allocs(0),
73 _plab_allocs(0),
74 _has_young_lab(false),
75 _live_data(0),
76 _critical_pins(0),
77 _update_watermark(start),
78 _affiliation(FREE),
79 _age(0) {
80
81 assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
82 "invalid space boundaries");
83 if (ZapUnusedHeapArea && committed) {
84 SpaceMangler::mangle_region(MemRegion(_bottom, _end));
85 }
86 }
87
88 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
89 ResourceMark rm;
90 stringStream ss;
91 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method);
92 print_on(&ss);
93 fatal("%s", ss.as_string());
94 }
95
96 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahRegionAffiliation affiliation) {
97 shenandoah_assert_heaplocked();
98 reset_age();
99 switch (_state) {
100 case _empty_uncommitted:
101 do_commit();
102 case _empty_committed:
103 set_affiliation(affiliation);
104 set_state(_regular);
105 case _regular:
106 case _pinned:
107 return;
108 default:
109 report_illegal_transition("regular allocation");
110 }
111 }
112
113 void ShenandoahHeapRegion::make_regular_bypass() {
114 shenandoah_assert_heaplocked();
115 assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
116 "only for full or degen GC");
117 reset_age();
118 switch (_state) {
119 case _empty_uncommitted:
120 do_commit();
121 case _empty_committed:
122 case _cset:
123 case _humongous_start:
124 case _humongous_cont:
125 // TODO: Changing this region to young during compaction may not be
126 // technically correct here because it completely disregards the ages
127 // and origins of the objects being moved. It is, however, certainly
128 // more correct than putting live objects into a region without a
129 // generational affiliation.
130 set_affiliation(YOUNG_GENERATION);
131 set_state(_regular);
132 return;
133 case _pinned_cset:
134 set_state(_pinned);
135 return;
136 case _regular:
137 case _pinned:
138 return;
139 default:
140 report_illegal_transition("regular bypass");
141 }
142 }
143
144 void ShenandoahHeapRegion::make_humongous_start() {
145 shenandoah_assert_heaplocked();
146 reset_age();
147 switch (_state) {
148 case _empty_uncommitted:
149 do_commit();
150 case _empty_committed:
151 set_state(_humongous_start);
152 return;
153 default:
154 report_illegal_transition("humongous start allocation");
155 }
156 }
157
158 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahRegionAffiliation affiliation) {
159 shenandoah_assert_heaplocked();
160 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
161 set_affiliation(affiliation);
162 reset_age();
163 switch (_state) {
164 case _empty_committed:
165 case _regular:
166 case _humongous_start:
167 case _humongous_cont:
168 set_state(_humongous_start);
169 return;
170 default:
171 report_illegal_transition("humongous start bypass");
172 }
173 }
174
175 void ShenandoahHeapRegion::make_humongous_cont() {
176 shenandoah_assert_heaplocked();
177 reset_age();
178 switch (_state) {
179 case _empty_uncommitted:
180 do_commit();
181 case _empty_committed:
182 set_state(_humongous_cont);
183 return;
184 default:
185 report_illegal_transition("humongous continuation allocation");
186 }
187 }
188
189 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahRegionAffiliation affiliation) {
190 shenandoah_assert_heaplocked();
191 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
192 set_affiliation(affiliation);
193 reset_age();
194 switch (_state) {
195 case _empty_committed:
196 case _regular:
197 case _humongous_start:
198 case _humongous_cont:
199 set_state(_humongous_cont);
200 return;
201 default:
202 report_illegal_transition("humongous continuation bypass");
203 }
204 }
205
206 void ShenandoahHeapRegion::make_pinned() {
207 shenandoah_assert_heaplocked();
208 assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
209
210 switch (_state) {
211 case _regular:
212 set_state(_pinned);
213 case _pinned_cset:
214 case _pinned:
215 return;
216 case _humongous_start:
217 set_state(_pinned_humongous_start);
218 case _pinned_humongous_start:
219 return;
220 case _cset:
221 _state = _pinned_cset;
222 return;
223 default:
224 report_illegal_transition("pinning");
225 }
226 }
227
228 void ShenandoahHeapRegion::make_unpinned() {
229 shenandoah_assert_heaplocked();
230 assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
231
232 switch (_state) {
233 case _pinned:
234 assert(affiliation() != FREE, "Pinned region should not be FREE");
235 set_state(_regular);
236 return;
237 case _regular:
238 case _humongous_start:
239 return;
240 case _pinned_cset:
241 set_state(_cset);
242 return;
243 case _pinned_humongous_start:
244 set_state(_humongous_start);
245 return;
246 default:
247 report_illegal_transition("unpinning");
248 }
249 }
250
251 void ShenandoahHeapRegion::make_cset() {
252 shenandoah_assert_heaplocked();
253 // Leave age untouched. We need to consult the age when we are deciding whether to promote evacuated objects.
254 switch (_state) {
255 case _regular:
256 set_state(_cset);
257 case _cset:
258 return;
259 default:
260 report_illegal_transition("cset");
261 }
262 }
263
264 void ShenandoahHeapRegion::make_trash() {
265 shenandoah_assert_heaplocked();
266 reset_age();
267 switch (_state) {
268 case _cset:
269 // Reclaiming cset regions
270 case _humongous_start:
271 case _humongous_cont:
272 // Reclaiming humongous regions
273 case _regular:
274 // Immediate region reclaim
275 set_state(_trash);
276 return;
277 default:
278 report_illegal_transition("trashing");
279 }
280 }
281
282 void ShenandoahHeapRegion::make_trash_immediate() {
283 make_trash();
284
285 // On this path, we know there are no marked objects in the region,
286 // tell marking context about it to bypass bitmap resets.
287 assert(ShenandoahHeap::heap()->active_generation()->is_mark_complete(), "Marking should be complete here.");
288 ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
289 }
290
291 void ShenandoahHeapRegion::make_empty() {
292 shenandoah_assert_heaplocked();
293 reset_age();
294 switch (_state) {
295 case _trash:
296 set_state(_empty_committed);
297 _empty_time = os::elapsedTime();
298 return;
299 default:
300 report_illegal_transition("emptying");
301 }
302 }
303
304 void ShenandoahHeapRegion::make_uncommitted() {
305 shenandoah_assert_heaplocked();
306 switch (_state) {
307 case _empty_committed:
308 do_uncommit();
309 set_state(_empty_uncommitted);
310 return;
311 default:
312 report_illegal_transition("uncommiting");
313 }
314 }
315
316 void ShenandoahHeapRegion::make_committed_bypass() {
317 shenandoah_assert_heaplocked();
318 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
319
320 switch (_state) {
321 case _empty_uncommitted:
322 do_commit();
323 set_state(_empty_committed);
324 return;
325 default:
326 report_illegal_transition("commit bypass");
327 }
328 }
329
330 void ShenandoahHeapRegion::reset_alloc_metadata() {
331 _tlab_allocs = 0;
332 _gclab_allocs = 0;
333 _plab_allocs = 0;
334 }
335
336 size_t ShenandoahHeapRegion::get_shared_allocs() const {
337 return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
338 }
339
340 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
341 return _tlab_allocs * HeapWordSize;
342 }
343
344 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
345 return _gclab_allocs * HeapWordSize;
346 }
347
348 size_t ShenandoahHeapRegion::get_plab_allocs() const {
349 return _plab_allocs * HeapWordSize;
350 }
351
352 void ShenandoahHeapRegion::set_live_data(size_t s) {
353 assert(Thread::current()->is_VM_thread(), "by VM thread");
354 _live_data = (s >> LogHeapWordSize);
355 }
356
357 void ShenandoahHeapRegion::print_on(outputStream* st) const {
358 st->print("|");
359 st->print(SIZE_FORMAT_W(5), this->_index);
360
361 switch (_state) {
362 case _empty_uncommitted:
363 st->print("|EU ");
364 break;
365 case _empty_committed:
366 st->print("|EC ");
367 break;
368 case _regular:
369 st->print("|R ");
370 break;
371 case _humongous_start:
375 st->print("|HP ");
376 break;
377 case _humongous_cont:
378 st->print("|HC ");
379 break;
380 case _cset:
381 st->print("|CS ");
382 break;
383 case _trash:
384 st->print("|T ");
385 break;
386 case _pinned:
387 st->print("|P ");
388 break;
389 case _pinned_cset:
390 st->print("|CSP");
391 break;
392 default:
393 ShouldNotReachHere();
394 }
395 switch (_affiliation) {
396 case ShenandoahRegionAffiliation::FREE:
397 st->print("|F");
398 break;
399 case ShenandoahRegionAffiliation::YOUNG_GENERATION:
400 st->print("|Y");
401 break;
402 case ShenandoahRegionAffiliation::OLD_GENERATION:
403 st->print("|O");
404 break;
405 default:
406 ShouldNotReachHere();
407 }
408 st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
409 p2i(bottom()), p2i(top()), p2i(end()));
410 st->print("|TAMS " INTPTR_FORMAT_W(12),
411 p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
412 st->print("|UWM " INTPTR_FORMAT_W(12),
413 p2i(_update_watermark));
414 st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
415 st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs()));
416 st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs()));
417 if (ShenandoahHeap::heap()->mode()->is_generational()) {
418 st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()), proper_unit_for_byte_size(get_plab_allocs()));
419 }
420 st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs()));
421 st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
422 st->print("|CP " SIZE_FORMAT_W(3), pin_count());
423 st->cr();
424 }
425
426 // oop_iterate without closure and without cancellation. always return true.
427 bool ShenandoahHeapRegion::oop_fill_and_coalesce_wo_cancel() {
428 HeapWord* obj_addr = resume_coalesce_and_fill();
429
430 assert(!is_humongous(), "No need to fill or coalesce humongous regions");
431 if (!is_active()) {
432 end_preemptible_coalesce_and_fill();
433 return true;
434 }
435
436 ShenandoahHeap* heap = ShenandoahHeap::heap();
437 ShenandoahMarkingContext* marking_context = heap->marking_context();
438 // All objects above TAMS are considered live even though their mark bits will not be set. Note that young-
439 // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
440 // while the old-gen concurrent marking is ongoing. These newly promoted objects will reside above TAMS
441 // and will be treated as live during the current old-gen marking pass, even though they will not be
442 // explicitly marked.
443 HeapWord* t = marking_context->top_at_mark_start(this);
444
445 // Expect marking to be completed before these threads invoke this service.
446 assert(heap->active_generation()->is_mark_complete(), "sanity");
447 while (obj_addr < t) {
448 oop obj = cast_to_oop(obj_addr);
449 if (marking_context->is_marked(obj)) {
450 assert(obj->klass() != NULL, "klass should not be NULL");
451 obj_addr += obj->size();
452 } else {
453 // Object is not marked. Coalesce and fill dead object with dead neighbors.
454 HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
455 assert(next_marked_obj <= t, "next marked object cannot exceed top");
456 size_t fill_size = next_marked_obj - obj_addr;
457 ShenandoahHeap::fill_with_object(obj_addr, fill_size);
458 heap->card_scan()->coalesce_objects(obj_addr, fill_size);
459 obj_addr = next_marked_obj;
460 }
461 }
462 // Mark that this region has been coalesced and filled
463 end_preemptible_coalesce_and_fill();
464 return true;
465 }
466
467 // oop_iterate without closure, return true if completed without cancellation
468 bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
469 HeapWord* obj_addr = resume_coalesce_and_fill();
470 // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
471 const size_t preemption_stride = 128;
472
473 assert(!is_humongous(), "No need to fill or coalesce humongous regions");
474 if (!is_active()) {
475 end_preemptible_coalesce_and_fill();
476 return true;
477 }
478
479 ShenandoahHeap* heap = ShenandoahHeap::heap();
480 ShenandoahMarkingContext* marking_context = heap->marking_context();
481 // All objects above TAMS are considered live even though their mark bits will not be set. Note that young-
482 // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
483 // while the old-gen concurrent marking is ongoing. These newly promoted objects will reside above TAMS
484 // and will be treated as live during the current old-gen marking pass, even though they will not be
485 // explicitly marked.
486 HeapWord* t = marking_context->top_at_mark_start(this);
487
488 // Expect marking to be completed before these threads invoke this service.
489 assert(heap->active_generation()->is_mark_complete(), "sanity");
490
491 size_t ops_before_preempt_check = preemption_stride;
492 while (obj_addr < t) {
493 oop obj = cast_to_oop(obj_addr);
494 if (marking_context->is_marked(obj)) {
495 assert(obj->klass() != NULL, "klass should not be NULL");
496 obj_addr += obj->size();
497 } else {
498 // Object is not marked. Coalesce and fill dead object with dead neighbors.
499 HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
500 assert(next_marked_obj <= t, "next marked object cannot exceed top");
501 size_t fill_size = next_marked_obj - obj_addr;
502 ShenandoahHeap::fill_with_object(obj_addr, fill_size);
503 heap->card_scan()->coalesce_objects(obj_addr, fill_size);
504 obj_addr = next_marked_obj;
505 }
506 if (ops_before_preempt_check-- == 0) {
507 if (heap->cancelled_gc()) {
508 suspend_coalesce_and_fill(obj_addr);
509 return false;
510 }
511 ops_before_preempt_check = preemption_stride;
512 }
513 }
514 // Mark that this region has been coalesced and filled
515 end_preemptible_coalesce_and_fill();
516 return true;
517 }
518
519 void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) {
520 if (!is_active()) return;
521 if (is_humongous()) {
522 // No need to fill dead within humongous regions. Either the entire region is dead, or the entire region is
523 // unchanged. A humongous region holds no more than one humongous object.
524 oop_iterate_humongous(blk);
525 } else {
526 global_oop_iterate_objects_and_fill_dead(blk);
527 }
528 }
529
530 void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) {
531 assert(!is_humongous(), "no humongous region here");
532 HeapWord* obj_addr = bottom();
533
534 ShenandoahHeap* heap = ShenandoahHeap::heap();
535 ShenandoahMarkingContext* marking_context = heap->marking_context();
536 RememberedScanner* rem_set_scanner = heap->card_scan();
537 // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts.
538 HeapWord* t = marking_context->top_at_mark_start(this);
539
540 assert(heap->active_generation()->is_mark_complete(), "sanity");
541
542 while (obj_addr < t) {
543 oop obj = cast_to_oop(obj_addr);
544 if (marking_context->is_marked(obj)) {
545 assert(obj->klass() != NULL, "klass should not be NULL");
546 // when promoting an entire region, we have to register the marked objects as well
547 obj_addr += obj->oop_iterate_size(blk);
548 } else {
549 // Object is not marked. Coalesce and fill dead object with dead neighbors.
550 HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
551 assert(next_marked_obj <= t, "next marked object cannot exceed top");
552 size_t fill_size = next_marked_obj - obj_addr;
553 ShenandoahHeap::fill_with_object(obj_addr, fill_size);
554
555 // coalesce_objects() unregisters all but first object subsumed within coalesced range.
556 rem_set_scanner->coalesce_objects(obj_addr, fill_size);
557 obj_addr = next_marked_obj;
558 }
559 }
560
561 // Any object above TAMS and below top() is considered live.
562 t = top();
563 while (obj_addr < t) {
564 oop obj = cast_to_oop(obj_addr);
565 obj_addr += obj->oop_iterate_size(blk);
566 }
567 }
568
569 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
570 assert(is_humongous(), "only humongous region here");
571 // Find head.
572 ShenandoahHeapRegion* r = humongous_start_region();
573 assert(r->is_humongous_start(), "need humongous head here");
574 oop obj = cast_to_oop(r->bottom());
575 obj->oop_iterate(blk, MemRegion(bottom(), top()));
576 }
577
578 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
579 ShenandoahHeap* heap = ShenandoahHeap::heap();
580 assert(is_humongous(), "Must be a part of the humongous region");
581 size_t i = index();
582 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
583 while (!r->is_humongous_start()) {
584 assert(i > 0, "Sanity");
585 i--;
586 r = heap->get_region(i);
587 assert(r->is_humongous(), "Must be a part of the humongous region");
588 }
589 assert(r->is_humongous_start(), "Must be");
590 return r;
591 }
592
593 void ShenandoahHeapRegion::recycle() {
594 ShenandoahHeap* heap = ShenandoahHeap::heap();
595
596 if (affiliation() == YOUNG_GENERATION) {
597 heap->young_generation()->decrease_used(used());
598 } else if (affiliation() == OLD_GENERATION) {
599 heap->old_generation()->decrease_used(used());
600 }
601
602 set_top(bottom());
603 clear_live_data();
604
605 reset_alloc_metadata();
606
607 heap->marking_context()->reset_top_at_mark_start(this);
608 set_update_watermark(bottom());
609
610 make_empty();
611 set_affiliation(FREE);
612
613 if (ZapUnusedHeapArea) {
614 SpaceMangler::mangle_region(MemRegion(bottom(), end()));
615 }
616 }
617
618 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
619 assert(MemRegion(bottom(), end()).contains(p),
620 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
621 p2i(p), p2i(bottom()), p2i(end()));
622 if (p >= top()) {
623 return top();
624 } else {
625 HeapWord* last = bottom();
626 HeapWord* cur = last;
627 while (cur <= p) {
628 last = cur;
629 cur += cast_to_oop(cur)->size();
630 }
631 shenandoah_assert_correct(NULL, cast_to_oop(last));
842 evt.set_used(used());
843 evt.set_from(_state);
844 evt.set_to(to);
845 evt.commit();
846 }
847 _state = to;
848 }
849
850 void ShenandoahHeapRegion::record_pin() {
851 Atomic::add(&_critical_pins, (size_t)1);
852 }
853
854 void ShenandoahHeapRegion::record_unpin() {
855 assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
856 Atomic::sub(&_critical_pins, (size_t)1);
857 }
858
859 size_t ShenandoahHeapRegion::pin_count() const {
860 return Atomic::load(&_critical_pins);
861 }
862
863 void ShenandoahHeapRegion::set_affiliation(ShenandoahRegionAffiliation new_affiliation) {
864 ShenandoahHeap* heap = ShenandoahHeap::heap();
865
866 {
867 ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
868 log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
869 ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
870 index(), affiliation_name(_affiliation), affiliation_name(new_affiliation),
871 p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
872 }
873
874 #ifdef ASSERT
875 {
876 // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
877 ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
878 size_t idx = this->index();
879 HeapWord* top_bitmap = ctx->top_bitmap(this);
880
881 assert(ctx->is_bitmap_clear_range(top_bitmap, _end),
882 "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
883 p2i(top_bitmap), p2i(_end));
884 }
885 #endif
886
887 if (_affiliation == new_affiliation) {
888 return;
889 }
890
891 if (!heap->mode()->is_generational()) {
892 _affiliation = new_affiliation;
893 return;
894 }
895
896 log_trace(gc)("Changing affiliation of region %zu from %s to %s",
897 index(), affiliation_name(_affiliation), affiliation_name(new_affiliation));
898
899 if (_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
900 heap->young_generation()->decrement_affiliated_region_count();
901 } else if (_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
902 heap->old_generation()->decrement_affiliated_region_count();
903 }
904
905 switch (new_affiliation) {
906 case FREE:
907 assert(!has_live(), "Free region should not have live data");
908 break;
909 case YOUNG_GENERATION:
910 reset_age();
911 heap->young_generation()->increment_affiliated_region_count();
912 break;
913 case OLD_GENERATION:
914 heap->old_generation()->increment_affiliated_region_count();
915 break;
916 default:
917 ShouldNotReachHere();
918 return;
919 }
920 _affiliation = new_affiliation;
921 }
922
923 size_t ShenandoahHeapRegion::promote_humongous() {
924 ShenandoahHeap* heap = ShenandoahHeap::heap();
925 ShenandoahMarkingContext* marking_context = heap->marking_context();
926 assert(heap->active_generation()->is_mark_complete(), "sanity");
927 assert(is_young(), "Only young regions can be promoted");
928 assert(is_humongous_start(), "Should not promote humongous continuation in isolation");
929 assert(age() >= InitialTenuringThreshold, "Only promote regions that are sufficiently aged");
930
931 ShenandoahGeneration* old_generation = heap->old_generation();
932 ShenandoahGeneration* young_generation = heap->young_generation();
933
934 oop obj = cast_to_oop(bottom());
935 assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
936
937 size_t spanned_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize);
938 size_t index_limit = index() + spanned_regions;
939
940 log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
941
942 // Since this region may have served previously as OLD, it may hold obsolete object range info.
943 heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words());
944 // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
945 heap->card_scan()->register_object_wo_lock(bottom());
946
947 // For this region and each humongous continuation region spanned by this humongous object, change
948 // affiliation to OLD_GENERATION and adjust the generation-use tallies. The remnant of memory
949 // in the last humongous region that is not spanned by obj is currently not used.
950 for (size_t i = index(); i < index_limit; i++) {
951 ShenandoahHeapRegion* r = heap->get_region(i);
952 log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
953 r->index(), p2i(r->bottom()), p2i(r->top()));
954 // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
955 r->set_affiliation(OLD_GENERATION);
956 old_generation->increase_used(r->used());
957 young_generation->decrease_used(r->used());
958 }
959 if (obj->is_typeArray()) {
960 // Primitive arrays don't need to be scanned. See above TODO question about requiring
961 // region promotion at safepoint.
962 log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
963 index(), p2i(bottom()), p2i(bottom() + obj->size()));
964 heap->card_scan()->mark_range_as_clean(bottom(), obj->size());
965 } else {
966 log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
967 index(), p2i(bottom()), p2i(bottom() + obj->size()));
968 heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
969 }
970 return index_limit - index();
971 }
|