< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp

Print this page
@@ -1,7 +1,7 @@
  /*
-  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
+  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   *
   * This code is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License version 2 only, as
   * published by the Free Software Foundation.

@@ -23,14 +23,18 @@
   */
  
  #include "precompiled.hpp"
  #include "gc/shared/space.inline.hpp"
  #include "gc/shared/tlab_globals.hpp"
+ #include "gc/shenandoah/shenandoahCardTable.hpp"
  #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+ #include "gc/shenandoah/shenandoahGeneration.hpp"
+ #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
+ #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  #include "jfr/jfrEvents.hpp"
  #include "memory/allocation.hpp"
  #include "memory/iterator.inline.hpp"
  #include "memory/resourceArea.hpp"
  #include "memory/universe.hpp"

@@ -41,10 +45,11 @@
  #include "runtime/mutexLocker.hpp"
  #include "runtime/os.hpp"
  #include "runtime/safepoint.hpp"
  #include "utilities/powerOfTwo.hpp"
  
+ 
  size_t ShenandoahHeapRegion::RegionCount = 0;
  size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;

@@ -63,13 +68,17 @@
    _empty_time(os::elapsedTime()),
    _state(committed ? _empty_committed : _empty_uncommitted),
    _top(start),
    _tlab_allocs(0),
    _gclab_allocs(0),
+   _plab_allocs(0),
+   _has_young_lab(false),
    _live_data(0),
    _critical_pins(0),
-   _update_watermark(start) {
+   _update_watermark(start),
+   _affiliation(FREE),
+   _age(0) {
  
    assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
           "invalid space boundaries");
    if (ZapUnusedHeapArea && committed) {
      SpaceMangler::mangle_region(MemRegion(_bottom, _end));

@@ -82,17 +91,18 @@
    ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
    print_on(&ss);
    fatal("%s", ss.as_string());
  }
  
- void ShenandoahHeapRegion::make_regular_allocation() {
+ void ShenandoahHeapRegion::make_regular_allocation(ShenandoahRegionAffiliation affiliation) {
    shenandoah_assert_heaplocked();
- 
+   reset_age();
    switch (_state) {
      case _empty_uncommitted:
        do_commit();
      case _empty_committed:
+       set_affiliation(affiliation);
        set_state(_regular);
      case _regular:
      case _pinned:
        return;
      default:

@@ -102,18 +112,24 @@
  
  void ShenandoahHeapRegion::make_regular_bypass() {
    shenandoah_assert_heaplocked();
    assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
            "only for full or degen GC");
- 
+   reset_age();
    switch (_state) {
      case _empty_uncommitted:
        do_commit();
      case _empty_committed:
      case _cset:
      case _humongous_start:
      case _humongous_cont:
+       // TODO: Changing this region to young during compaction may not be
+       // technically correct here because it completely disregards the ages
+       // and origins of the objects being moved. It is, however, certainly
+       // more correct than putting live objects into a region without a
+       // generational affiliation.
+       set_affiliation(YOUNG_GENERATION);
        set_state(_regular);
        return;
      case _pinned_cset:
        set_state(_pinned);
        return;

@@ -125,10 +141,11 @@
    }
  }
  
  void ShenandoahHeapRegion::make_humongous_start() {
    shenandoah_assert_heaplocked();
+   reset_age();
    switch (_state) {
      case _empty_uncommitted:
        do_commit();
      case _empty_committed:
        set_state(_humongous_start);

@@ -136,14 +153,15 @@
      default:
        report_illegal_transition("humongous start allocation");
    }
  }
  
- void ShenandoahHeapRegion::make_humongous_start_bypass() {
+ void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahRegionAffiliation affiliation) {
    shenandoah_assert_heaplocked();
    assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
- 
+   set_affiliation(affiliation);
+   reset_age();
    switch (_state) {
      case _empty_committed:
      case _regular:
      case _humongous_start:
      case _humongous_cont:

@@ -154,10 +172,11 @@
    }
  }
  
  void ShenandoahHeapRegion::make_humongous_cont() {
    shenandoah_assert_heaplocked();
+   reset_age();
    switch (_state) {
      case _empty_uncommitted:
        do_commit();
      case _empty_committed:
       set_state(_humongous_cont);

@@ -165,14 +184,15 @@
      default:
        report_illegal_transition("humongous continuation allocation");
    }
  }
  
- void ShenandoahHeapRegion::make_humongous_cont_bypass() {
+ void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahRegionAffiliation affiliation) {
    shenandoah_assert_heaplocked();
    assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
- 
+   set_affiliation(affiliation);
+   reset_age();
    switch (_state) {
      case _empty_committed:
      case _regular:
      case _humongous_start:
      case _humongous_cont:

@@ -209,10 +229,11 @@
    shenandoah_assert_heaplocked();
    assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
  
    switch (_state) {
      case _pinned:
+       assert(affiliation() != FREE, "Pinned region should not be FREE");
        set_state(_regular);
        return;
      case _regular:
      case _humongous_start:
        return;

@@ -227,10 +248,11 @@
    }
  }
  
  void ShenandoahHeapRegion::make_cset() {
    shenandoah_assert_heaplocked();
+   reset_age();
    switch (_state) {
      case _regular:
        set_state(_cset);
      case _cset:
        return;

@@ -239,10 +261,11 @@
    }
  }
  
  void ShenandoahHeapRegion::make_trash() {
    shenandoah_assert_heaplocked();
+   reset_age();
    switch (_state) {
      case _cset:
        // Reclaiming cset regions
      case _humongous_start:
      case _humongous_cont:

@@ -259,15 +282,19 @@
  void ShenandoahHeapRegion::make_trash_immediate() {
    make_trash();
  
    // On this path, we know there are no marked objects in the region,
    // tell marking context about it to bypass bitmap resets.
-   ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this);
+   assert(ShenandoahHeap::heap()->active_generation()->is_mark_complete(), "Marking should be complete here.");
+   // Leave top_bitmap alone.  If it is greater than bottom(), then we still need to clear between bottom() and top_bitmap()
+   // when this FREE region is repurposed for YOUNG or OLD.
+   // ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
  }
  
  void ShenandoahHeapRegion::make_empty() {
    shenandoah_assert_heaplocked();
+   reset_age();
    switch (_state) {
      case _trash:
        set_state(_empty_committed);
        _empty_time = os::elapsedTime();
        return;

@@ -303,24 +330,29 @@
  }
  
  void ShenandoahHeapRegion::reset_alloc_metadata() {
    _tlab_allocs = 0;
    _gclab_allocs = 0;
+   _plab_allocs = 0;
  }
  
  size_t ShenandoahHeapRegion::get_shared_allocs() const {
-   return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize;
+   return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
  }
  
  size_t ShenandoahHeapRegion::get_tlab_allocs() const {
    return _tlab_allocs * HeapWordSize;
  }
  
  size_t ShenandoahHeapRegion::get_gclab_allocs() const {
    return _gclab_allocs * HeapWordSize;
  }
  
+ size_t ShenandoahHeapRegion::get_plab_allocs() const {
+   return _plab_allocs * HeapWordSize;
+ }
+ 
  void ShenandoahHeapRegion::set_live_data(size_t s) {
    assert(Thread::current()->is_VM_thread(), "by VM thread");
    _live_data = (s >> LogHeapWordSize);
  }
  

@@ -360,39 +392,137 @@
        st->print("|CSP");
        break;
      default:
        ShouldNotReachHere();
    }
+   switch (_affiliation) {
+     case ShenandoahRegionAffiliation::FREE:
+       st->print("|F");
+       break;
+     case ShenandoahRegionAffiliation::YOUNG_GENERATION:
+       st->print("|Y");
+       break;
+     case ShenandoahRegionAffiliation::OLD_GENERATION:
+       st->print("|O");
+       break;
+     default:
+       ShouldNotReachHere();
+   }
    st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
              p2i(bottom()), p2i(top()), p2i(end()));
    st->print("|TAMS " INTPTR_FORMAT_W(12),
              p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
    st->print("|UWM " INTPTR_FORMAT_W(12),
              p2i(_update_watermark));
    st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
    st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
    st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
+   if (ShenandoahHeap::heap()->mode()->is_generational()) {
+     st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()),   proper_unit_for_byte_size(get_plab_allocs()));
+   }
    st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
    st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
    st->print("|CP " SIZE_FORMAT_W(3), pin_count());
    st->cr();
  }
  
- void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) {
+ // oop_iterate without closure, return true if completed without cancellation
+ bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
+   HeapWord* obj_addr = resume_coalesce_and_fill();
+   // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
+   const size_t preemption_stride = 128;
+ 
+   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
+   if (!is_active()) {
+     end_preemptible_coalesce_and_fill();
+     return true;
+   }
+ 
+   ShenandoahHeap* heap = ShenandoahHeap::heap();
+   ShenandoahMarkingContext* marking_context = heap->marking_context();
+   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
+   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
+   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
+   // and will be treated as live during the current old-gen marking pass, even though they will not be
+   // explicitly marked.
+   HeapWord* t = marking_context->top_at_mark_start(this);
+ 
+   // Expect marking to be completed before these threads invoke this service.
+   assert(heap->active_generation()->is_mark_complete(), "sanity");
+ 
+   size_t ops_before_preempt_check = preemption_stride;
+   while (obj_addr < t) {
+     oop obj = cast_to_oop(obj_addr);
+     if (marking_context->is_marked(obj)) {
+       assert(obj->klass() != NULL, "klass should not be NULL");
+       obj_addr += obj->size();
+     } else {
+       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
+       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
+       assert(next_marked_obj <= t, "next marked object cannot exceed top");
+       size_t fill_size = next_marked_obj - obj_addr;
+       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
+       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
+       obj_addr = next_marked_obj;
+     }
+     if (ops_before_preempt_check-- == 0) {
+       if (heap->cancelled_gc()) {
+         suspend_coalesce_and_fill(obj_addr);
+         return false;
+       }
+       ops_before_preempt_check = preemption_stride;
+     }
+   }
+   // Mark that this region has been coalesced and filled
+   end_preemptible_coalesce_and_fill();
+   return true;
+ }
+ 
+ void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) {
    if (!is_active()) return;
    if (is_humongous()) {
+     // No need to fill dead within humongous regions.  Either the entire region is dead, or the entire region is
+     // unchanged.  A humongous region holds no more than one humongous object.
      oop_iterate_humongous(blk);
    } else {
-     oop_iterate_objects(blk);
+     global_oop_iterate_objects_and_fill_dead(blk);
    }
  }
  
- void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) {
-   assert(! is_humongous(), "no humongous region here");
+ void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) {
+   assert(!is_humongous(), "no humongous region here");
    HeapWord* obj_addr = bottom();
-   HeapWord* t = top();
-   // Could call objects iterate, but this is easier.
+ 
+   ShenandoahHeap* heap = ShenandoahHeap::heap();
+   ShenandoahMarkingContext* marking_context = heap->marking_context();
+   RememberedScanner* rem_set_scanner = heap->card_scan();
+   // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts.
+   HeapWord* t = marking_context->top_at_mark_start(this);
+ 
+   assert(heap->active_generation()->is_mark_complete(), "sanity");
+ 
+   while (obj_addr < t) {
+     oop obj = cast_to_oop(obj_addr);
+     if (marking_context->is_marked(obj)) {
+       assert(obj->klass() != NULL, "klass should not be NULL");
+       // when promoting an entire region, we have to register the marked objects as well
+       obj_addr += obj->oop_iterate_size(blk);
+     } else {
+       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
+       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
+       assert(next_marked_obj <= t, "next marked object cannot exceed top");
+       size_t fill_size = next_marked_obj - obj_addr;
+       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
+ 
+       // coalesce_objects() unregisters all but first object subsumed within coalesced range.
+       rem_set_scanner->coalesce_objects(obj_addr, fill_size);
+       obj_addr = next_marked_obj;
+     }
+   }
+ 
+   // Any object above TAMS and below top() is considered live.
+   t = top();
    while (obj_addr < t) {
      oop obj = cast_to_oop(obj_addr);
      obj_addr += obj->oop_iterate_size(blk);
    }
  }

@@ -420,19 +550,30 @@
    assert(r->is_humongous_start(), "Must be");
    return r;
  }
  
  void ShenandoahHeapRegion::recycle() {
+   ShenandoahHeap* heap = ShenandoahHeap::heap();
+ 
+   if (affiliation() == YOUNG_GENERATION) {
+     heap->young_generation()->decrease_used(used());
+   } else if (affiliation() == OLD_GENERATION) {
+     heap->old_generation()->decrease_used(used());
+   }
+ 
    set_top(bottom());
    clear_live_data();
  
    reset_alloc_metadata();
  
-   ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this);
+   heap->marking_context()->reset_top_at_mark_start(this);
    set_update_watermark(bottom());
  
    make_empty();
+   set_affiliation(FREE);
+ 
+   heap->clear_cards_for(this);
  
    if (ZapUnusedHeapArea) {
      SpaceMangler::mangle_region(MemRegion(bottom(), end()));
    }
  }

@@ -679,5 +820,115 @@
  }
  
  size_t ShenandoahHeapRegion::pin_count() const {
    return Atomic::load(&_critical_pins);
  }
+ 
+ void ShenandoahHeapRegion::set_affiliation(ShenandoahRegionAffiliation new_affiliation) {
+   ShenandoahHeap* heap = ShenandoahHeap::heap();
+ 
+   {
+     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
+     log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
+                   ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT "\n",
+                   index(), affiliation_name(_affiliation), affiliation_name(new_affiliation),
+                   p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
+   }
+ 
+ #ifdef ASSERT
+   {
+     // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
+     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
+     size_t idx = this->index();
+     HeapWord* top_bitmap = ctx->top_bitmap(this);
+ 
+     assert(ctx->is_bitmap_clear_range(top_bitmap, _end),
+            "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
+            p2i(top_bitmap), p2i(_end));
+   }
+ #endif
+ 
+   if (_affiliation == new_affiliation) {
+     return;
+   }
+ 
+   if (!heap->mode()->is_generational()) {
+     _affiliation = new_affiliation;
+     return;
+   }
+ 
+   log_trace(gc)("Changing affiliation of region %zu from %s to %s",
+     index(), affiliation_name(_affiliation), affiliation_name(new_affiliation));
+ 
+   if (_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
+     heap->young_generation()->decrement_affiliated_region_count();
+   } else if (_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
+     heap->old_generation()->decrement_affiliated_region_count();
+   }
+ 
+   switch (new_affiliation) {
+     case FREE:
+       assert(!has_live(), "Free region should not have live data");
+       break;
+     case YOUNG_GENERATION:
+       reset_age();
+       heap->young_generation()->increment_affiliated_region_count();
+       break;
+     case OLD_GENERATION:
+       heap->old_generation()->increment_affiliated_region_count();
+       break;
+     default:
+       ShouldNotReachHere();
+       return;
+   }
+   _affiliation = new_affiliation;
+ }
+ 
+ size_t ShenandoahHeapRegion::promote_humongous() {
+   ShenandoahHeap* heap = ShenandoahHeap::heap();
+   ShenandoahMarkingContext* marking_context = heap->marking_context();
+   assert(heap->active_generation()->is_mark_complete(), "sanity");
+   assert(is_young(), "Only young regions can be promoted");
+   assert(is_humongous_start(), "Should not promote humongous continuation in isolation");
+   assert(age() >= InitialTenuringThreshold, "Only promote regions that are sufficiently aged");
+ 
+   ShenandoahGeneration* old_generation = heap->old_generation();
+   ShenandoahGeneration* young_generation = heap->young_generation();
+ 
+   oop obj = cast_to_oop(bottom());
+   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
+ 
+   size_t spanned_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize);
+   size_t index_limit = index() + spanned_regions;
+ 
+   log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
+ 
+   // Since this region may have served previously as OLD, it may hold obsolete object range info.
+   heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words());
+   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
+   heap->card_scan()->register_object_wo_lock(bottom());
+ 
+   // For this region and each humongous continuation region spanned by this humongous object, change
+   // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
+   // in the last humongous region that is not spanned by obj is currently not used.
+   for (size_t i = index(); i < index_limit; i++) {
+     ShenandoahHeapRegion* r = heap->get_region(i);
+     log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
+                   r->index(), p2i(r->bottom()), p2i(r->top()));
+     // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
+     r->set_affiliation(OLD_GENERATION);
+     old_generation->increase_used(r->used());
+     young_generation->decrease_used(r->used());
+   }
+   if (obj->is_typeArray()) {
+     // Primitive arrays don't need to be scanned.  See above TODO question about requiring
+     // region promotion at safepoint.
+     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
+                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
+     heap->card_scan()->mark_range_as_clean(bottom(), obj->size());
+   } else {
+     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
+                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
+     heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
+   }
+   return index_limit - index();
+ }
< prev index next >