< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp

Print this page

  1 /*
  2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/space.inline.hpp"
 27 #include "gc/shared/tlab_globals.hpp"

 28 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"



 32 #include "jfr/jfrEvents.hpp"
 33 #include "memory/allocation.hpp"
 34 #include "memory/iterator.inline.hpp"
 35 #include "memory/resourceArea.hpp"
 36 #include "memory/universe.hpp"
 37 #include "oops/oop.inline.hpp"
 38 #include "runtime/atomic.hpp"
 39 #include "runtime/globals_extension.hpp"
 40 #include "runtime/java.hpp"
 41 #include "runtime/mutexLocker.hpp"
 42 #include "runtime/os.hpp"
 43 #include "runtime/safepoint.hpp"
 44 #include "utilities/powerOfTwo.hpp"
 45 

 46 size_t ShenandoahHeapRegion::RegionCount = 0;
 47 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
 48 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
 49 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
 50 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
 51 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
 52 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
 53 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
 54 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
 55 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
 56 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
 57 
 58 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
 59   _index(index),
 60   _bottom(start),
 61   _end(start + RegionSizeWords),
 62   _new_top(NULL),
 63   _empty_time(os::elapsedTime()),
 64   _state(committed ? _empty_committed : _empty_uncommitted),
 65   _top(start),
 66   _tlab_allocs(0),
 67   _gclab_allocs(0),


 68   _live_data(0),
 69   _critical_pins(0),
 70   _update_watermark(start) {


 71 
 72   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
 73          "invalid space boundaries");
 74   if (ZapUnusedHeapArea && committed) {
 75     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
 76   }
 77 }
 78 
 79 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
 80   ResourceMark rm;
 81   stringStream ss;
 82   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
 83   print_on(&ss);
 84   fatal("%s", ss.as_string());
 85 }
 86 
 87 void ShenandoahHeapRegion::make_regular_allocation() {
 88   shenandoah_assert_heaplocked();
 89 
 90   switch (_state) {
 91     case _empty_uncommitted:
 92       do_commit();
 93     case _empty_committed:

 94       set_state(_regular);
 95     case _regular:
 96     case _pinned:
 97       return;
 98     default:
 99       report_illegal_transition("regular allocation");
100   }
101 }
102 
103 void ShenandoahHeapRegion::make_regular_bypass() {
104   shenandoah_assert_heaplocked();
105   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
106           "only for full or degen GC");
107 
108   switch (_state) {
109     case _empty_uncommitted:
110       do_commit();
111     case _empty_committed:
112     case _cset:
113     case _humongous_start:
114     case _humongous_cont:






115       set_state(_regular);
116       return;
117     case _pinned_cset:
118       set_state(_pinned);
119       return;
120     case _regular:
121     case _pinned:
122       return;
123     default:
124       report_illegal_transition("regular bypass");
125   }
126 }
127 
128 void ShenandoahHeapRegion::make_humongous_start() {
129   shenandoah_assert_heaplocked();

130   switch (_state) {
131     case _empty_uncommitted:
132       do_commit();
133     case _empty_committed:
134       set_state(_humongous_start);
135       return;
136     default:
137       report_illegal_transition("humongous start allocation");
138   }
139 }
140 
141 void ShenandoahHeapRegion::make_humongous_start_bypass() {
142   shenandoah_assert_heaplocked();
143   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
144 

145   switch (_state) {
146     case _empty_committed:
147     case _regular:
148     case _humongous_start:
149     case _humongous_cont:
150       set_state(_humongous_start);
151       return;
152     default:
153       report_illegal_transition("humongous start bypass");
154   }
155 }
156 
157 void ShenandoahHeapRegion::make_humongous_cont() {
158   shenandoah_assert_heaplocked();

159   switch (_state) {
160     case _empty_uncommitted:
161       do_commit();
162     case _empty_committed:
163      set_state(_humongous_cont);
164       return;
165     default:
166       report_illegal_transition("humongous continuation allocation");
167   }
168 }
169 
170 void ShenandoahHeapRegion::make_humongous_cont_bypass() {
171   shenandoah_assert_heaplocked();
172   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
173 

174   switch (_state) {
175     case _empty_committed:
176     case _regular:
177     case _humongous_start:
178     case _humongous_cont:
179       set_state(_humongous_cont);
180       return;
181     default:
182       report_illegal_transition("humongous continuation bypass");
183   }
184 }
185 
186 void ShenandoahHeapRegion::make_pinned() {
187   shenandoah_assert_heaplocked();
188   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
189 
190   switch (_state) {
191     case _regular:
192       set_state(_pinned);
193     case _pinned_cset:
194     case _pinned:
195       return;
196     case _humongous_start:
197       set_state(_pinned_humongous_start);
198     case _pinned_humongous_start:
199       return;
200     case _cset:
201       _state = _pinned_cset;
202       return;
203     default:
204       report_illegal_transition("pinning");
205   }
206 }
207 
208 void ShenandoahHeapRegion::make_unpinned() {
209   shenandoah_assert_heaplocked();
210   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
211 
212   switch (_state) {
213     case _pinned:

214       set_state(_regular);
215       return;
216     case _regular:
217     case _humongous_start:
218       return;
219     case _pinned_cset:
220       set_state(_cset);
221       return;
222     case _pinned_humongous_start:
223       set_state(_humongous_start);
224       return;
225     default:
226       report_illegal_transition("unpinning");
227   }
228 }
229 
230 void ShenandoahHeapRegion::make_cset() {
231   shenandoah_assert_heaplocked();

232   switch (_state) {
233     case _regular:
234       set_state(_cset);
235     case _cset:
236       return;
237     default:
238       report_illegal_transition("cset");
239   }
240 }
241 
242 void ShenandoahHeapRegion::make_trash() {
243   shenandoah_assert_heaplocked();

244   switch (_state) {
245     case _cset:
246       // Reclaiming cset regions
247     case _humongous_start:
248     case _humongous_cont:
249       // Reclaiming humongous regions
250     case _regular:
251       // Immediate region reclaim
252       set_state(_trash);
253       return;
254     default:
255       report_illegal_transition("trashing");
256   }
257 }
258 
259 void ShenandoahHeapRegion::make_trash_immediate() {
260   make_trash();
261 
262   // On this path, we know there are no marked objects in the region,
263   // tell marking context about it to bypass bitmap resets.
264   ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this);



265 }
266 
267 void ShenandoahHeapRegion::make_empty() {
268   shenandoah_assert_heaplocked();

269   switch (_state) {
270     case _trash:
271       set_state(_empty_committed);
272       _empty_time = os::elapsedTime();
273       return;
274     default:
275       report_illegal_transition("emptying");
276   }
277 }
278 
279 void ShenandoahHeapRegion::make_uncommitted() {
280   shenandoah_assert_heaplocked();
281   switch (_state) {
282     case _empty_committed:
283       do_uncommit();
284       set_state(_empty_uncommitted);
285       return;
286     default:
287       report_illegal_transition("uncommiting");
288   }
289 }
290 
291 void ShenandoahHeapRegion::make_committed_bypass() {
292   shenandoah_assert_heaplocked();
293   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
294 
295   switch (_state) {
296     case _empty_uncommitted:
297       do_commit();
298       set_state(_empty_committed);
299       return;
300     default:
301       report_illegal_transition("commit bypass");
302   }
303 }
304 
305 void ShenandoahHeapRegion::reset_alloc_metadata() {
306   _tlab_allocs = 0;
307   _gclab_allocs = 0;

308 }
309 
310 size_t ShenandoahHeapRegion::get_shared_allocs() const {
311   return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize;
312 }
313 
314 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
315   return _tlab_allocs * HeapWordSize;
316 }
317 
318 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
319   return _gclab_allocs * HeapWordSize;
320 }
321 




322 void ShenandoahHeapRegion::set_live_data(size_t s) {
323   assert(Thread::current()->is_VM_thread(), "by VM thread");
324   _live_data = (s >> LogHeapWordSize);
325 }
326 
327 void ShenandoahHeapRegion::print_on(outputStream* st) const {
328   st->print("|");
329   st->print(SIZE_FORMAT_W(5), this->_index);
330 
331   switch (_state) {
332     case _empty_uncommitted:
333       st->print("|EU ");
334       break;
335     case _empty_committed:
336       st->print("|EC ");
337       break;
338     case _regular:
339       st->print("|R  ");
340       break;
341     case _humongous_start:

345       st->print("|HP ");
346       break;
347     case _humongous_cont:
348       st->print("|HC ");
349       break;
350     case _cset:
351       st->print("|CS ");
352       break;
353     case _trash:
354       st->print("|T  ");
355       break;
356     case _pinned:
357       st->print("|P  ");
358       break;
359     case _pinned_cset:
360       st->print("|CSP");
361       break;
362     default:
363       ShouldNotReachHere();
364   }













365   st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
366             p2i(bottom()), p2i(top()), p2i(end()));
367   st->print("|TAMS " INTPTR_FORMAT_W(12),
368             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
369   st->print("|UWM " INTPTR_FORMAT_W(12),
370             p2i(_update_watermark));
371   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
372   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
373   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));



374   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
375   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
376   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
377   st->cr();
378 }
379 
380 void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) {




















































381   if (!is_active()) return;
382   if (is_humongous()) {


383     oop_iterate_humongous(blk);
384   } else {
385     oop_iterate_objects(blk);
386   }
387 }
388 
389 void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) {
390   assert(! is_humongous(), "no humongous region here");
391   HeapWord* obj_addr = bottom();
392   HeapWord* t = top();
393   // Could call objects iterate, but this is easier.




























394   while (obj_addr < t) {
395     oop obj = cast_to_oop(obj_addr);
396     obj_addr += obj->oop_iterate_size(blk);
397   }
398 }
399 
400 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
401   assert(is_humongous(), "only humongous region here");
402   // Find head.
403   ShenandoahHeapRegion* r = humongous_start_region();
404   assert(r->is_humongous_start(), "need humongous head here");
405   oop obj = cast_to_oop(r->bottom());
406   obj->oop_iterate(blk, MemRegion(bottom(), top()));
407 }
408 
409 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
410   ShenandoahHeap* heap = ShenandoahHeap::heap();
411   assert(is_humongous(), "Must be a part of the humongous region");
412   size_t i = index();
413   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
414   while (!r->is_humongous_start()) {
415     assert(i > 0, "Sanity");
416     i--;
417     r = heap->get_region(i);
418     assert(r->is_humongous(), "Must be a part of the humongous region");
419   }
420   assert(r->is_humongous_start(), "Must be");
421   return r;
422 }
423 
424 void ShenandoahHeapRegion::recycle() {








425   set_top(bottom());
426   clear_live_data();
427 
428   reset_alloc_metadata();
429 
430   ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this);
431   set_update_watermark(bottom());
432 
433   make_empty();



434 
435   if (ZapUnusedHeapArea) {
436     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
437   }
438 }
439 
440 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
441   assert(MemRegion(bottom(), end()).contains(p),
442          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
443          p2i(p), p2i(bottom()), p2i(end()));
444   if (p >= top()) {
445     return top();
446   } else {
447     HeapWord* last = bottom();
448     HeapWord* cur = last;
449     while (cur <= p) {
450       last = cur;
451       cur += cast_to_oop(cur)->size();
452     }
453     shenandoah_assert_correct(NULL, cast_to_oop(last));

664     evt.set_used(used());
665     evt.set_from(_state);
666     evt.set_to(to);
667     evt.commit();
668   }
669   _state = to;
670 }
671 
672 void ShenandoahHeapRegion::record_pin() {
673   Atomic::add(&_critical_pins, (size_t)1);
674 }
675 
676 void ShenandoahHeapRegion::record_unpin() {
677   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
678   Atomic::sub(&_critical_pins, (size_t)1);
679 }
680 
681 size_t ShenandoahHeapRegion::pin_count() const {
682   return Atomic::load(&_critical_pins);
683 }















































































































  1 /*
  2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/space.inline.hpp"
 27 #include "gc/shared/tlab_globals.hpp"
 28 #include "gc/shenandoah/shenandoahCardTable.hpp"
 29 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 32 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 33 #include "gc/shenandoah/shenandoahGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 36 #include "jfr/jfrEvents.hpp"
 37 #include "memory/allocation.hpp"
 38 #include "memory/iterator.inline.hpp"
 39 #include "memory/resourceArea.hpp"
 40 #include "memory/universe.hpp"
 41 #include "oops/oop.inline.hpp"
 42 #include "runtime/atomic.hpp"
 43 #include "runtime/globals_extension.hpp"
 44 #include "runtime/java.hpp"
 45 #include "runtime/mutexLocker.hpp"
 46 #include "runtime/os.hpp"
 47 #include "runtime/safepoint.hpp"
 48 #include "utilities/powerOfTwo.hpp"
 49 
 50 
 51 size_t ShenandoahHeapRegion::RegionCount = 0;
 52 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
 53 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
 54 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
 55 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
 56 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
 57 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
 58 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
 59 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
 60 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
 61 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
 62 
 63 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
 64   _index(index),
 65   _bottom(start),
 66   _end(start + RegionSizeWords),
 67   _new_top(NULL),
 68   _empty_time(os::elapsedTime()),
 69   _state(committed ? _empty_committed : _empty_uncommitted),
 70   _top(start),
 71   _tlab_allocs(0),
 72   _gclab_allocs(0),
 73   _plab_allocs(0),
 74   _has_young_lab(false),
 75   _live_data(0),
 76   _critical_pins(0),
 77   _update_watermark(start),
 78   _affiliation(FREE),
 79   _age(0) {
 80 
 81   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
 82          "invalid space boundaries");
 83   if (ZapUnusedHeapArea && committed) {
 84     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
 85   }
 86 }
 87 
 88 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
 89   ResourceMark rm;
 90   stringStream ss;
 91   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
 92   print_on(&ss);
 93   fatal("%s", ss.as_string());
 94 }
 95 
 96 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahRegionAffiliation affiliation) {
 97   shenandoah_assert_heaplocked();
 98   reset_age();
 99   switch (_state) {
100     case _empty_uncommitted:
101       do_commit();
102     case _empty_committed:
103       set_affiliation(affiliation);
104       set_state(_regular);
105     case _regular:
106     case _pinned:
107       return;
108     default:
109       report_illegal_transition("regular allocation");
110   }
111 }
112 
113 void ShenandoahHeapRegion::make_regular_bypass() {
114   shenandoah_assert_heaplocked();
115   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
116           "only for full or degen GC");
117   reset_age();
118   switch (_state) {
119     case _empty_uncommitted:
120       do_commit();
121     case _empty_committed:
122     case _cset:
123     case _humongous_start:
124     case _humongous_cont:
125       // TODO: Changing this region to young during compaction may not be
126       // technically correct here because it completely disregards the ages
127       // and origins of the objects being moved. It is, however, certainly
128       // more correct than putting live objects into a region without a
129       // generational affiliation.
130       set_affiliation(YOUNG_GENERATION);
131       set_state(_regular);
132       return;
133     case _pinned_cset:
134       set_state(_pinned);
135       return;
136     case _regular:
137     case _pinned:
138       return;
139     default:
140       report_illegal_transition("regular bypass");
141   }
142 }
143 
144 void ShenandoahHeapRegion::make_humongous_start() {
145   shenandoah_assert_heaplocked();
146   reset_age();
147   switch (_state) {
148     case _empty_uncommitted:
149       do_commit();
150     case _empty_committed:
151       set_state(_humongous_start);
152       return;
153     default:
154       report_illegal_transition("humongous start allocation");
155   }
156 }
157 
158 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahRegionAffiliation affiliation) {
159   shenandoah_assert_heaplocked();
160   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
161   set_affiliation(affiliation);
162   reset_age();
163   switch (_state) {
164     case _empty_committed:
165     case _regular:
166     case _humongous_start:
167     case _humongous_cont:
168       set_state(_humongous_start);
169       return;
170     default:
171       report_illegal_transition("humongous start bypass");
172   }
173 }
174 
175 void ShenandoahHeapRegion::make_humongous_cont() {
176   shenandoah_assert_heaplocked();
177   reset_age();
178   switch (_state) {
179     case _empty_uncommitted:
180       do_commit();
181     case _empty_committed:
182      set_state(_humongous_cont);
183       return;
184     default:
185       report_illegal_transition("humongous continuation allocation");
186   }
187 }
188 
189 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahRegionAffiliation affiliation) {
190   shenandoah_assert_heaplocked();
191   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
192   set_affiliation(affiliation);
193   reset_age();
194   switch (_state) {
195     case _empty_committed:
196     case _regular:
197     case _humongous_start:
198     case _humongous_cont:
199       set_state(_humongous_cont);
200       return;
201     default:
202       report_illegal_transition("humongous continuation bypass");
203   }
204 }
205 
206 void ShenandoahHeapRegion::make_pinned() {
207   shenandoah_assert_heaplocked();
208   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
209 
210   switch (_state) {
211     case _regular:
212       set_state(_pinned);
213     case _pinned_cset:
214     case _pinned:
215       return;
216     case _humongous_start:
217       set_state(_pinned_humongous_start);
218     case _pinned_humongous_start:
219       return;
220     case _cset:
221       _state = _pinned_cset;
222       return;
223     default:
224       report_illegal_transition("pinning");
225   }
226 }
227 
228 void ShenandoahHeapRegion::make_unpinned() {
229   shenandoah_assert_heaplocked();
230   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
231 
232   switch (_state) {
233     case _pinned:
234       assert(affiliation() != FREE, "Pinned region should not be FREE");
235       set_state(_regular);
236       return;
237     case _regular:
238     case _humongous_start:
239       return;
240     case _pinned_cset:
241       set_state(_cset);
242       return;
243     case _pinned_humongous_start:
244       set_state(_humongous_start);
245       return;
246     default:
247       report_illegal_transition("unpinning");
248   }
249 }
250 
251 void ShenandoahHeapRegion::make_cset() {
252   shenandoah_assert_heaplocked();
253   reset_age();
254   switch (_state) {
255     case _regular:
256       set_state(_cset);
257     case _cset:
258       return;
259     default:
260       report_illegal_transition("cset");
261   }
262 }
263 
264 void ShenandoahHeapRegion::make_trash() {
265   shenandoah_assert_heaplocked();
266   reset_age();
267   switch (_state) {
268     case _cset:
269       // Reclaiming cset regions
270     case _humongous_start:
271     case _humongous_cont:
272       // Reclaiming humongous regions
273     case _regular:
274       // Immediate region reclaim
275       set_state(_trash);
276       return;
277     default:
278       report_illegal_transition("trashing");
279   }
280 }
281 
282 void ShenandoahHeapRegion::make_trash_immediate() {
283   make_trash();
284 
285   // On this path, we know there are no marked objects in the region,
286   // tell marking context about it to bypass bitmap resets.
287   assert(ShenandoahHeap::heap()->active_generation()->is_mark_complete(), "Marking should be complete here.");
288   // Leave top_bitmap alone.  If it is greater than bottom(), then we still need to clear between bottom() and top_bitmap()
289   // when this FREE region is repurposed for YOUNG or OLD.
290   // ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
291 }
292 
293 void ShenandoahHeapRegion::make_empty() {
294   shenandoah_assert_heaplocked();
295   reset_age();
296   switch (_state) {
297     case _trash:
298       set_state(_empty_committed);
299       _empty_time = os::elapsedTime();
300       return;
301     default:
302       report_illegal_transition("emptying");
303   }
304 }
305 
306 void ShenandoahHeapRegion::make_uncommitted() {
307   shenandoah_assert_heaplocked();
308   switch (_state) {
309     case _empty_committed:
310       do_uncommit();
311       set_state(_empty_uncommitted);
312       return;
313     default:
314       report_illegal_transition("uncommiting");
315   }
316 }
317 
318 void ShenandoahHeapRegion::make_committed_bypass() {
319   shenandoah_assert_heaplocked();
320   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
321 
322   switch (_state) {
323     case _empty_uncommitted:
324       do_commit();
325       set_state(_empty_committed);
326       return;
327     default:
328       report_illegal_transition("commit bypass");
329   }
330 }
331 
332 void ShenandoahHeapRegion::reset_alloc_metadata() {
333   _tlab_allocs = 0;
334   _gclab_allocs = 0;
335   _plab_allocs = 0;
336 }
337 
338 size_t ShenandoahHeapRegion::get_shared_allocs() const {
339   return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
340 }
341 
342 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
343   return _tlab_allocs * HeapWordSize;
344 }
345 
346 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
347   return _gclab_allocs * HeapWordSize;
348 }
349 
350 size_t ShenandoahHeapRegion::get_plab_allocs() const {
351   return _plab_allocs * HeapWordSize;
352 }
353 
354 void ShenandoahHeapRegion::set_live_data(size_t s) {
355   assert(Thread::current()->is_VM_thread(), "by VM thread");
356   _live_data = (s >> LogHeapWordSize);
357 }
358 
359 void ShenandoahHeapRegion::print_on(outputStream* st) const {
360   st->print("|");
361   st->print(SIZE_FORMAT_W(5), this->_index);
362 
363   switch (_state) {
364     case _empty_uncommitted:
365       st->print("|EU ");
366       break;
367     case _empty_committed:
368       st->print("|EC ");
369       break;
370     case _regular:
371       st->print("|R  ");
372       break;
373     case _humongous_start:

377       st->print("|HP ");
378       break;
379     case _humongous_cont:
380       st->print("|HC ");
381       break;
382     case _cset:
383       st->print("|CS ");
384       break;
385     case _trash:
386       st->print("|T  ");
387       break;
388     case _pinned:
389       st->print("|P  ");
390       break;
391     case _pinned_cset:
392       st->print("|CSP");
393       break;
394     default:
395       ShouldNotReachHere();
396   }
397   switch (_affiliation) {
398     case ShenandoahRegionAffiliation::FREE:
399       st->print("|F");
400       break;
401     case ShenandoahRegionAffiliation::YOUNG_GENERATION:
402       st->print("|Y");
403       break;
404     case ShenandoahRegionAffiliation::OLD_GENERATION:
405       st->print("|O");
406       break;
407     default:
408       ShouldNotReachHere();
409   }
410   st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
411             p2i(bottom()), p2i(top()), p2i(end()));
412   st->print("|TAMS " INTPTR_FORMAT_W(12),
413             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
414   st->print("|UWM " INTPTR_FORMAT_W(12),
415             p2i(_update_watermark));
416   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
417   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
418   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
419   if (ShenandoahHeap::heap()->mode()->is_generational()) {
420     st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()),   proper_unit_for_byte_size(get_plab_allocs()));
421   }
422   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
423   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
424   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
425   st->cr();
426 }
427 
428 // oop_iterate without closure, return true if completed without cancellation
429 bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
430   HeapWord* obj_addr = resume_coalesce_and_fill();
431   // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
432   const size_t preemption_stride = 128;
433 
434   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
435   if (!is_active()) {
436     end_preemptible_coalesce_and_fill();
437     return true;
438   }
439 
440   ShenandoahHeap* heap = ShenandoahHeap::heap();
441   ShenandoahMarkingContext* marking_context = heap->marking_context();
442   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
443   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
444   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
445   // and will be treated as live during the current old-gen marking pass, even though they will not be
446   // explicitly marked.
447   HeapWord* t = marking_context->top_at_mark_start(this);
448 
449   // Expect marking to be completed before these threads invoke this service.
450   assert(heap->active_generation()->is_mark_complete(), "sanity");
451 
452   size_t ops_before_preempt_check = preemption_stride;
453   while (obj_addr < t) {
454     oop obj = cast_to_oop(obj_addr);
455     if (marking_context->is_marked(obj)) {
456       assert(obj->klass() != NULL, "klass should not be NULL");
457       obj_addr += obj->size();
458     } else {
459       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
460       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
461       assert(next_marked_obj <= t, "next marked object cannot exceed top");
462       size_t fill_size = next_marked_obj - obj_addr;
463       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
464       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
465       obj_addr = next_marked_obj;
466     }
467     if (ops_before_preempt_check-- == 0) {
468       if (heap->cancelled_gc()) {
469         suspend_coalesce_and_fill(obj_addr);
470         return false;
471       }
472       ops_before_preempt_check = preemption_stride;
473     }
474   }
475   // Mark that this region has been coalesced and filled
476   end_preemptible_coalesce_and_fill();
477   return true;
478 }
479 
480 void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) {
481   if (!is_active()) return;
482   if (is_humongous()) {
483     // No need to fill dead within humongous regions.  Either the entire region is dead, or the entire region is
484     // unchanged.  A humongous region holds no more than one humongous object.
485     oop_iterate_humongous(blk);
486   } else {
487     global_oop_iterate_objects_and_fill_dead(blk);
488   }
489 }
490 
491 void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) {
492   assert(!is_humongous(), "no humongous region here");
493   HeapWord* obj_addr = bottom();
494 
495   ShenandoahHeap* heap = ShenandoahHeap::heap();
496   ShenandoahMarkingContext* marking_context = heap->marking_context();
497   RememberedScanner* rem_set_scanner = heap->card_scan();
498   // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts.
499   HeapWord* t = marking_context->top_at_mark_start(this);
500 
501   assert(heap->active_generation()->is_mark_complete(), "sanity");
502 
503   while (obj_addr < t) {
504     oop obj = cast_to_oop(obj_addr);
505     if (marking_context->is_marked(obj)) {
506       assert(obj->klass() != NULL, "klass should not be NULL");
507       // when promoting an entire region, we have to register the marked objects as well
508       obj_addr += obj->oop_iterate_size(blk);
509     } else {
510       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
511       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
512       assert(next_marked_obj <= t, "next marked object cannot exceed top");
513       size_t fill_size = next_marked_obj - obj_addr;
514       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
515 
516       // coalesce_objects() unregisters all but first object subsumed within coalesced range.
517       rem_set_scanner->coalesce_objects(obj_addr, fill_size);
518       obj_addr = next_marked_obj;
519     }
520   }
521 
522   // Any object above TAMS and below top() is considered live.
523   t = top();
524   while (obj_addr < t) {
525     oop obj = cast_to_oop(obj_addr);
526     obj_addr += obj->oop_iterate_size(blk);
527   }
528 }
529 
530 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
531   assert(is_humongous(), "only humongous region here");
532   // Find head.
533   ShenandoahHeapRegion* r = humongous_start_region();
534   assert(r->is_humongous_start(), "need humongous head here");
535   oop obj = cast_to_oop(r->bottom());
536   obj->oop_iterate(blk, MemRegion(bottom(), top()));
537 }
538 
539 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
540   ShenandoahHeap* heap = ShenandoahHeap::heap();
541   assert(is_humongous(), "Must be a part of the humongous region");
542   size_t i = index();
543   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
544   while (!r->is_humongous_start()) {
545     assert(i > 0, "Sanity");
546     i--;
547     r = heap->get_region(i);
548     assert(r->is_humongous(), "Must be a part of the humongous region");
549   }
550   assert(r->is_humongous_start(), "Must be");
551   return r;
552 }
553 
554 void ShenandoahHeapRegion::recycle() {
555   ShenandoahHeap* heap = ShenandoahHeap::heap();
556 
557   if (affiliation() == YOUNG_GENERATION) {
558     heap->young_generation()->decrease_used(used());
559   } else if (affiliation() == OLD_GENERATION) {
560     heap->old_generation()->decrease_used(used());
561   }
562 
563   set_top(bottom());
564   clear_live_data();
565 
566   reset_alloc_metadata();
567 
568   heap->marking_context()->reset_top_at_mark_start(this);
569   set_update_watermark(bottom());
570 
571   make_empty();
572   set_affiliation(FREE);
573 
574   heap->clear_cards_for(this);
575 
576   if (ZapUnusedHeapArea) {
577     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
578   }
579 }
580 
581 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
582   assert(MemRegion(bottom(), end()).contains(p),
583          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
584          p2i(p), p2i(bottom()), p2i(end()));
585   if (p >= top()) {
586     return top();
587   } else {
588     HeapWord* last = bottom();
589     HeapWord* cur = last;
590     while (cur <= p) {
591       last = cur;
592       cur += cast_to_oop(cur)->size();
593     }
594     shenandoah_assert_correct(NULL, cast_to_oop(last));

805     evt.set_used(used());
806     evt.set_from(_state);
807     evt.set_to(to);
808     evt.commit();
809   }
810   _state = to;
811 }
812 
813 void ShenandoahHeapRegion::record_pin() {
814   Atomic::add(&_critical_pins, (size_t)1);
815 }
816 
817 void ShenandoahHeapRegion::record_unpin() {
818   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
819   Atomic::sub(&_critical_pins, (size_t)1);
820 }
821 
822 size_t ShenandoahHeapRegion::pin_count() const {
823   return Atomic::load(&_critical_pins);
824 }
825 
826 void ShenandoahHeapRegion::set_affiliation(ShenandoahRegionAffiliation new_affiliation) {
827   ShenandoahHeap* heap = ShenandoahHeap::heap();
828 
829   {
830     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
831     log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
832                   ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT "\n",
833                   index(), affiliation_name(_affiliation), affiliation_name(new_affiliation),
834                   p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
835   }
836 
837 #ifdef ASSERT
838   {
839     // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
840     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
841     size_t idx = this->index();
842     HeapWord* top_bitmap = ctx->top_bitmap(this);
843 
844     assert(ctx->is_bitmap_clear_range(top_bitmap, _end),
845            "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
846            p2i(top_bitmap), p2i(_end));
847   }
848 #endif
849 
850   if (_affiliation == new_affiliation) {
851     return;
852   }
853 
854   if (!heap->mode()->is_generational()) {
855     _affiliation = new_affiliation;
856     return;
857   }
858 
859   log_trace(gc)("Changing affiliation of region %zu from %s to %s",
860     index(), affiliation_name(_affiliation), affiliation_name(new_affiliation));
861 
862   if (_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
863     heap->young_generation()->decrement_affiliated_region_count();
864   } else if (_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
865     heap->old_generation()->decrement_affiliated_region_count();
866   }
867 
868   switch (new_affiliation) {
869     case FREE:
870       assert(!has_live(), "Free region should not have live data");
871       break;
872     case YOUNG_GENERATION:
873       reset_age();
874       heap->young_generation()->increment_affiliated_region_count();
875       break;
876     case OLD_GENERATION:
877       heap->old_generation()->increment_affiliated_region_count();
878       break;
879     default:
880       ShouldNotReachHere();
881       return;
882   }
883   _affiliation = new_affiliation;
884 }
885 
886 size_t ShenandoahHeapRegion::promote_humongous() {
887   ShenandoahHeap* heap = ShenandoahHeap::heap();
888   ShenandoahMarkingContext* marking_context = heap->marking_context();
889   assert(heap->active_generation()->is_mark_complete(), "sanity");
890   assert(is_young(), "Only young regions can be promoted");
891   assert(is_humongous_start(), "Should not promote humongous continuation in isolation");
892   assert(age() >= InitialTenuringThreshold, "Only promote regions that are sufficiently aged");
893 
894   ShenandoahGeneration* old_generation = heap->old_generation();
895   ShenandoahGeneration* young_generation = heap->young_generation();
896 
897   oop obj = cast_to_oop(bottom());
898   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
899 
900   size_t spanned_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize);
901   size_t index_limit = index() + spanned_regions;
902 
903   log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
904 
905   // Since this region may have served previously as OLD, it may hold obsolete object range info.
906   heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words());
907   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
908   heap->card_scan()->register_object_wo_lock(bottom());
909 
910   // For this region and each humongous continuation region spanned by this humongous object, change
911   // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
912   // in the last humongous region that is not spanned by obj is currently not used.
913   for (size_t i = index(); i < index_limit; i++) {
914     ShenandoahHeapRegion* r = heap->get_region(i);
915     log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
916                   r->index(), p2i(r->bottom()), p2i(r->top()));
917     // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
918     r->set_affiliation(OLD_GENERATION);
919     old_generation->increase_used(r->used());
920     young_generation->decrease_used(r->used());
921   }
922   if (obj->is_typeArray()) {
923     // Primitive arrays don't need to be scanned.  See above TODO question about requiring
924     // region promotion at safepoint.
925     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
926                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
927     heap->card_scan()->mark_range_as_clean(bottom(), obj->size());
928   } else {
929     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
930                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
931     heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
932   }
933   return index_limit - index();
934 }
< prev index next >