< prev index next > src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
Print this page
#define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
#include "gc/shared/markBitMap.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "gc/shared/collectedHeap.hpp"
+ #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahAllocRequest.hpp"
#include "gc/shenandoah/shenandoahLock.hpp"
#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
+ #include "gc/shenandoah/shenandoahEvacTracker.hpp"
+ #include "gc/shenandoah/shenandoahMmuTracker.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "gc/shenandoah/shenandoahSharedVariables.hpp"
#include "gc/shenandoah/shenandoahUnload.hpp"
+ #include "gc/shenandoah/shenandoahScanRemembered.hpp"
#include "memory/metaspace.hpp"
#include "services/memoryManager.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/stack.hpp"
class ConcurrentGCTimer;
class ObjectIterateScanRootClosure;
+ class PLAB;
class ShenandoahCollectorPolicy;
class ShenandoahControlThread;
+ class ShenandoahRegulatorThread;
class ShenandoahGCSession;
class ShenandoahGCStateResetter;
+ class ShenandoahGeneration;
+ class ShenandoahYoungGeneration;
+ class ShenandoahOldGeneration;
class ShenandoahHeuristics;
+ class ShenandoahOldHeuristics;
class ShenandoahMarkingContext;
- class ShenandoahMode;
class ShenandoahPhaseTimings;
class ShenandoahHeap;
class ShenandoahHeapRegion;
class ShenandoahHeapRegionClosure;
class ShenandoahCollectionSet;
public:
virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
virtual bool is_thread_safe() { return false; }
};
+ template<GenerationMode GENERATION>
+ class ShenandoahGenerationRegionClosure : public ShenandoahHeapRegionClosure {
+ public:
+ explicit ShenandoahGenerationRegionClosure(ShenandoahHeapRegionClosure* cl) : _cl(cl) {}
+ void heap_region_do(ShenandoahHeapRegion* r);
+ virtual bool is_thread_safe() { return _cl->is_thread_safe(); }
+ private:
+ ShenandoahHeapRegionClosure* _cl;
+ };
+
typedef ShenandoahLock ShenandoahHeapLock;
typedef ShenandoahLocker ShenandoahHeapLocker;
typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
// Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
friend class ShenandoahGCStateResetter;
friend class ShenandoahParallelObjectIterator;
friend class ShenandoahSafepoint;
// Supported GC
friend class ShenandoahConcurrentGC;
+ friend class ShenandoahOldGC;
friend class ShenandoahDegenGC;
friend class ShenandoahFullGC;
friend class ShenandoahUnload;
// ---------- Locks that guard important data structures in Heap
//
private:
ShenandoahHeapLock _lock;
+ ShenandoahGeneration* _gc_generation;
+
+ // true iff we are concurrently coalescing and filling old-gen HeapRegions
+ bool _prepare_for_old_mark;
public:
ShenandoahHeapLock* lock() {
return &_lock;
}
+ ShenandoahGeneration* active_generation() const {
+ // last or latest generation might be a better name here.
+ return _gc_generation;
+ }
+
+ void set_gc_generation(ShenandoahGeneration* generation) {
+ _gc_generation = generation;
+ }
+
+ ShenandoahOldHeuristics* old_heuristics();
+
+ bool doing_mixed_evacuations();
+ bool is_old_bitmap_stable() const;
+ bool is_gc_generation_young() const;
+
// ---------- Initialization, termination, identification, printing routines
//
public:
static ShenandoahHeap* heap();
ShenandoahHeap(ShenandoahCollectorPolicy* policy);
jint initialize() override;
void post_initialize() override;
void initialize_mode();
void initialize_heuristics();
+ void initialize_generations();
void initialize_serviceability() override;
void print_on(outputStream* st) const override;
void print_extended_on(outputStream *st) const override;
void stop() override;
void prepare_for_verify() override;
void verify(VerifyOption vo) override;
+ void verify_rem_set_at_mark();
+ void verify_rem_set_at_update_ref();
+ void verify_rem_set_after_full_gc();
// WhiteBox testing support.
bool supports_concurrent_gc_breakpoints() const override {
return true;
}
size_t _minimum_size;
volatile size_t _soft_max_size;
shenandoah_padding(0);
volatile size_t _used;
volatile size_t _committed;
- volatile size_t _bytes_allocated_since_gc_start;
shenandoah_padding(1);
+ void help_verify_region_rem_set(ShenandoahHeapRegion* r, ShenandoahMarkingContext* ctx,
+ HeapWord* from, HeapWord* top, HeapWord* update_watermark, const char* message);
+
public:
void increase_used(size_t bytes);
void decrease_used(size_t bytes);
void set_used(size_t bytes);
void increase_committed(size_t bytes);
void decrease_committed(size_t bytes);
- void increase_allocated(size_t bytes);
- size_t bytes_allocated_since_gc_start();
void reset_bytes_allocated_since_gc_start();
size_t min_capacity() const;
size_t max_capacity() const override;
size_t soft_max_capacity() const;
private:
MemRegion _heap_region;
bool _heap_region_special;
size_t _num_regions;
ShenandoahHeapRegion** _regions;
+ uint8_t* _affiliations; // Holds array of enum ShenandoahRegionAffiliation, including FREE status in non-generational mode
ShenandoahRegionIterator _update_refs_iterator;
public:
inline HeapWord* base() const { return _heap_region.start(); }
public:
enum GCStateBitPos {
// Heap has forwarded objects: needs LRB barriers.
HAS_FORWARDED_BITPOS = 0,
- // Heap is under marking: needs SATB barriers.
- MARKING_BITPOS = 1,
+ // Young regions are under marking: needs SATB barriers.
+ YOUNG_MARKING_BITPOS = 1,
// Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
EVACUATION_BITPOS = 2,
// Heap is under updating: needs no additional barriers.
UPDATEREFS_BITPOS = 3,
// Heap is under weak-reference/roots processing: needs weak-LRB barriers.
WEAK_ROOTS_BITPOS = 4,
+
+ // Old regions are under marking, still need SATB barriers.
+ OLD_MARKING_BITPOS = 5
};
enum GCState {
STABLE = 0,
HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
- MARKING = 1 << MARKING_BITPOS,
+ YOUNG_MARKING = 1 << YOUNG_MARKING_BITPOS,
EVACUATION = 1 << EVACUATION_BITPOS,
UPDATEREFS = 1 << UPDATEREFS_BITPOS,
WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS,
+ OLD_MARKING = 1 << OLD_MARKING_BITPOS
};
private:
ShenandoahSharedBitmap _gc_state;
ShenandoahSharedFlag _degenerated_gc_in_progress;
ShenandoahSharedFlag _full_gc_in_progress;
ShenandoahSharedFlag _full_gc_move_in_progress;
ShenandoahSharedFlag _progress_last_gc;
ShenandoahSharedFlag _concurrent_strong_root_in_progress;
+ // _alloc_supplement_reserve is a supplemental budget for new_memory allocations. During evacuation and update-references,
+ // mutator allocation requests are "authorized" iff young_gen->available() plus _alloc_supplement_reserve minus
+ // _young_evac_reserve is greater than request size. The values of _alloc_supplement_reserve and _young_evac_reserve
+ // are zero except during evacuation and update-reference phases of GC. Both of these values are established at
+ // the start of evacuation, and they remain constant throughout the duration of these two phases of GC. Since these
+ // two values are constant throughout each GC phases, we introduce a new service into ShenandoahGeneration. This service
+ // provides adjusted_available() based on an adjusted capacity. At the start of evacuation, we adjust young capacity by
+ // adding the amount to be borrowed from old-gen and subtracting the _young_evac_reserve, we adjust old capacity by
+ // subtracting the amount to be loaned to young-gen.
+ //
+ // We always use adjusted capacities to determine permission to allocate within young and to promote into old. Note
+ // that adjusted capacities equal traditional capacities except during evacuation and update refs.
+ //
+ // During evacuation, we assure that _old_evac_expended does not exceed _old_evac_reserve.
+ //
+ // At the end of update references, we perform the following bookkeeping activities:
+ //
+ // 1. Unadjust the capacity within young-gen and old-gen to undo the effects of borrowing memory from old-gen. Note that
+ // the entirety of the collection set is now available, so allocation capacity naturally increase at this time.
+ // 2. Clear (reset to zero) _alloc_supplement_reserve, _young_evac_reserve, _old_evac_reserve, and _promoted_reserve
+ //
+ // _young_evac_reserve and _old_evac_reserve are only non-zero during evacuation and update-references.
+ //
+ // Allocation of old GCLABs assures that _old_evac_expended + request-size < _old_evac_reserved. If the allocation
+ // is authorized, increment _old_evac_expended by request size. This allocation ignores old_gen->available().
+ //
+ // Note that the typical total expenditure on evacuation is less than the associated evacuation reserve because we generally
+ // reserve ShenandoahEvacWaste (> 1.0) times the anticipated evacuation need. In the case that there is an excessive amount
+ // of waste, it may be that one thread fails to grab a new GCLAB, this does not necessarily doom the associated evacuation
+ // effort. If this happens, the requesting thread blocks until some other thread manages to evacuate the offending object.
+ // Only after "all" threads fail to evacuate an object do we consider the evacuation effort to have failed.
+
+ intptr_t _alloc_supplement_reserve; // Bytes reserved for young allocations during evac and update refs
+ size_t _promoted_reserve; // Bytes reserved within old-gen to hold the results of promotion
+ volatile size_t _promoted_expended; // Bytes of old-gen memory expended on promotions
+
+ size_t _old_evac_reserve; // Bytes reserved within old-gen to hold evacuated objects from old-gen collection set
+ volatile size_t _old_evac_expended; // Bytes of old-gen memory expended on old-gen evacuations
+
+ size_t _young_evac_reserve; // Bytes reserved within young-gen to hold evacuated objects from young-gen collection set
+
+ size_t _captured_old_usage; // What was old usage (bytes) when last captured?
+
+ size_t _previous_promotion; // Bytes promoted during previous evacuation
+
+ bool _upgraded_to_full;
+
void set_gc_state_all_threads(char state);
void set_gc_state_mask(uint mask, bool value);
+
+
public:
char gc_state() const;
static address gc_state_addr();
- void set_concurrent_mark_in_progress(bool in_progress);
+ void set_concurrent_young_mark_in_progress(bool in_progress);
+ void set_concurrent_old_mark_in_progress(bool in_progress);
void set_evacuation_in_progress(bool in_progress);
void set_update_refs_in_progress(bool in_progress);
void set_degenerated_gc_in_progress(bool in_progress);
void set_full_gc_in_progress(bool in_progress);
void set_full_gc_move_in_progress(bool in_progress);
void set_has_forwarded_objects(bool cond);
void set_concurrent_strong_root_in_progress(bool cond);
void set_concurrent_weak_root_in_progress(bool cond);
+ void set_prepare_for_old_mark_in_progress(bool cond);
+ void set_aging_cycle(bool cond);
inline bool is_stable() const;
inline bool is_idle() const;
inline bool is_concurrent_mark_in_progress() const;
+ inline bool is_concurrent_young_mark_in_progress() const;
+ inline bool is_concurrent_old_mark_in_progress() const;
inline bool is_update_refs_in_progress() const;
inline bool is_evacuation_in_progress() const;
inline bool is_degenerated_gc_in_progress() const;
inline bool is_full_gc_in_progress() const;
inline bool is_full_gc_move_in_progress() const;
inline bool has_forwarded_objects() const;
inline bool is_gc_in_progress_mask(uint mask) const;
inline bool is_stw_gc_in_progress() const;
inline bool is_concurrent_strong_root_in_progress() const;
inline bool is_concurrent_weak_root_in_progress() const;
+ inline bool is_prepare_for_old_mark_in_progress() const;
+ inline bool is_aging_cycle() const;
+ inline bool upgraded_to_full() { return _upgraded_to_full; }
+ inline void start_conc_gc() { _upgraded_to_full = false; }
+ inline void record_upgrade_to_full() { _upgraded_to_full = true; }
+
+ inline size_t capture_old_usage(size_t usage);
+ inline void set_previous_promotion(size_t promoted_bytes);
+ inline size_t get_previous_promotion() const;
+
+ // Returns previous value
+ inline size_t set_promoted_reserve(size_t new_val);
+ inline size_t get_promoted_reserve() const;
+
+ inline void reset_promoted_expended();
+ inline size_t expend_promoted(size_t increment);
+ inline size_t unexpend_promoted(size_t decrement);
+ inline size_t get_promoted_expended();
+
+ // Returns previous value
+ inline size_t set_old_evac_reserve(size_t new_val);
+ inline size_t get_old_evac_reserve() const;
+
+ inline void reset_old_evac_expended();
+ inline size_t expend_old_evac(size_t increment);
+ inline size_t get_old_evac_expended();
+
+ // Returns previous value
+ inline size_t set_young_evac_reserve(size_t new_val);
+ inline size_t get_young_evac_reserve() const;
+
+ // Returns previous value. This is a signed value because it is the amount borrowed minus the amount reserved for
+ // young-gen evacuation. In case we cannot borrow much, this value might be negative.
+ inline intptr_t set_alloc_supplement_reserve(intptr_t new_val);
+ inline intptr_t get_alloc_supplement_reserve() const;
private:
+ void manage_satb_barrier(bool active);
+
enum CancelState {
// Normal state. GC has not been cancelled and is open for cancellation.
// Worker threads can suspend for safepoint.
CANCELLABLE,
// one worker thread checks for pending safepoint and may suspend
// if a safepoint is pending.
NOT_CANCELLED
};
+ double _cancel_requested_time;
ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
+
+ // Returns true if cancel request was successfully communicated.
+ // Returns false if some other thread already communicated cancel
+ // request. A true return value does not mean GC has been
+ // cancelled, only that the process of cancelling GC has begun.
bool try_cancel_gc();
public:
- static address cancelled_gc_addr();
-
inline bool cancelled_gc() const;
inline bool check_cancelled_gc_and_yield(bool sts_active = true);
- inline void clear_cancelled_gc();
+ inline void clear_cancelled_gc(bool clear_oom_handler = true);
+ void cancel_concurrent_mark();
void cancel_gc(GCCause::Cause cause);
public:
// Elastic heap support
void entry_uncommit(double shrink_before, size_t shrink_until);
void op_uncommit(double shrink_before, size_t shrink_until);
private:
// GC support
- // Reset bitmap, prepare regions for new GC cycle
- void prepare_gc();
- void prepare_regions_and_collection_set(bool concurrent);
// Evacuation
- void prepare_evacuation(bool concurrent);
void evacuate_collection_set(bool concurrent);
// Concurrent root processing
void prepare_concurrent_roots();
void finish_concurrent_roots();
// Concurrent class unloading support
void notify_gc_no_progress() { _progress_last_gc.unset(); }
//
// Mark support
private:
+ ShenandoahYoungGeneration* _young_generation;
+ ShenandoahGeneration* _global_generation;
+ ShenandoahOldGeneration* _old_generation;
+
ShenandoahControlThread* _control_thread;
+ ShenandoahRegulatorThread* _regulator_thread;
ShenandoahCollectorPolicy* _shenandoah_policy;
ShenandoahMode* _gc_mode;
- ShenandoahHeuristics* _heuristics;
ShenandoahFreeSet* _free_set;
ShenandoahPacer* _pacer;
ShenandoahVerifier* _verifier;
- ShenandoahPhaseTimings* _phase_timings;
+ ShenandoahPhaseTimings* _phase_timings;
+ ShenandoahEvacuationTracker* _evac_tracker;
+ ShenandoahMmuTracker _mmu_tracker;
+ ShenandoahGenerationSizer _generation_sizer;
- ShenandoahControlThread* control_thread() { return _control_thread; }
+ ShenandoahRegulatorThread* regulator_thread() { return _regulator_thread; }
public:
+ ShenandoahControlThread* control_thread() { return _control_thread; }
+ ShenandoahYoungGeneration* young_generation() const { return _young_generation; }
+ ShenandoahGeneration* global_generation() const { return _global_generation; }
+ ShenandoahOldGeneration* old_generation() const { return _old_generation; }
+ ShenandoahGeneration* generation_for(ShenandoahRegionAffiliation affiliation) const;
+ const ShenandoahGenerationSizer* generation_sizer() const { return &_generation_sizer; }
+
+ size_t max_size_for(ShenandoahGeneration* generation) const;
+ size_t min_size_for(ShenandoahGeneration* generation) const;
+
ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
ShenandoahMode* mode() const { return _gc_mode; }
- ShenandoahHeuristics* heuristics() const { return _heuristics; }
ShenandoahFreeSet* free_set() const { return _free_set; }
ShenandoahPacer* pacer() const { return _pacer; }
- ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
+ ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
+ ShenandoahEvacuationTracker* evac_tracker() const { return _evac_tracker; }
+
+ void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation);
+ void on_cycle_end(ShenandoahGeneration* generation);
ShenandoahVerifier* verifier();
// ---------- VM subsystem bindings
//
private:
ShenandoahMonitoringSupport* _monitoring_support;
MemoryPool* _memory_pool;
+ MemoryPool* _young_gen_memory_pool;
+ MemoryPool* _old_gen_memory_pool;
+
GCMemoryManager _stw_memory_manager;
GCMemoryManager _cycle_memory_manager;
ConcurrentGCTimer* _gc_timer;
SoftRefPolicy _soft_ref_policy;
// For exporting to SA
int _log_min_obj_alignment_in_bytes;
public:
- ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; }
+ ShenandoahMonitoringSupport* monitoring_support() const { return _monitoring_support; }
GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
SoftRefPolicy* soft_ref_policy() override { return &_soft_ref_policy; }
GrowableArray<GCMemoryManager*> memory_managers() override;
GrowableArray<MemoryPool*> memory_pools() override;
MemoryUsage memory_usage() override;
GCTracer* tracer();
ConcurrentGCTimer* gc_timer() const;
- // ---------- Reference processing
- //
- private:
- ShenandoahReferenceProcessor* const _ref_processor;
-
- public:
- ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; }
-
// ---------- Class Unloading
//
private:
+ ShenandoahSharedFlag _is_aging_cycle;
ShenandoahSharedFlag _unload_classes;
ShenandoahUnload _unloader;
public:
void set_unload_classes(bool uc);
private:
void stw_unload_classes(bool full_gc);
void stw_process_weak_roots(bool full_gc);
void stw_weak_refs(bool full_gc);
+ inline void assert_lock_for_affiliation(ShenandoahRegionAffiliation orig_affiliation,
+ ShenandoahRegionAffiliation new_affiliation);
+
// Heap iteration support
void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
bool prepare_aux_bitmap_for_iteration();
void reclaim_aux_bitmap_for_iteration();
// and can be stubbed out.
//
public:
bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false);
- bool is_in(const void* p) const override;
+ inline bool is_in(const void* p) const override;
+
+ inline bool is_in_active_generation(oop obj) const;
+ inline bool is_in_young(const void* p) const;
+ inline bool is_in_old(const void* p) const;
+ inline bool is_old(oop pobj) const;
+
+ inline ShenandoahRegionAffiliation region_affiliation(const ShenandoahHeapRegion* r);
+ inline void set_affiliation(ShenandoahHeapRegion* r, ShenandoahRegionAffiliation new_affiliation);
+
+ inline ShenandoahRegionAffiliation region_affiliation(size_t index);
+ inline void set_affiliation(size_t index, ShenandoahRegionAffiliation new_affiliation);
bool requires_barriers(stackChunkOop obj) const override;
MemRegion reserved_region() const { return _reserved; }
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
bool uses_stack_watermark_barrier() const override { return true; }
// ---------- Allocation support
//
private:
- HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
+ HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region, bool is_promotion);
+
inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
+ inline HeapWord* allocate_from_plab(Thread* thread, size_t size, bool is_promotion);
+ HeapWord* allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion);
+ HeapWord* allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size);
+
public:
- HeapWord* allocate_memory(ShenandoahAllocRequest& request);
+ HeapWord* allocate_memory(ShenandoahAllocRequest& request, bool is_promotion);
HeapWord* mem_allocate(size_t size, bool* what) override;
MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
size_t size,
Metaspace::MetadataType mdtype) override;
void labs_make_parsable();
void tlabs_retire(bool resize);
void gclabs_retire(bool resize);
+ void set_young_lab_region_flags();
+
// ---------- Marking support
//
private:
ShenandoahMarkingContext* _marking_context;
MemRegion _bitmap_region;
ShenandoahLiveData** _liveness_cache;
public:
inline ShenandoahMarkingContext* complete_marking_context() const;
inline ShenandoahMarkingContext* marking_context() const;
- inline void mark_complete_marking_context();
- inline void mark_incomplete_marking_context();
template<class T>
inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
template<class T>
inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
template<class T>
inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
- void reset_mark_bitmap();
-
// SATB barriers hooks
inline bool requires_marking(const void* entry) const;
// Support for bitmap uncommits
bool commit_bitmap_slice(ShenandoahHeapRegion *r);
// ---------- Evacuation support
//
private:
ShenandoahCollectionSet* _collection_set;
ShenandoahEvacOOMHandler _oom_evac_handler;
+ ShenandoahSharedFlag _old_gen_oom_evac;
+
+ inline oop try_evacuate_object(oop src, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahRegionAffiliation target_gen);
+ void handle_old_evacuation(HeapWord* obj, size_t words, bool promotion);
+ void handle_old_evacuation_failure();
public:
+ void handle_promotion_failure();
+ void report_promotion_failure(Thread* thread, size_t size);
+
static address in_cset_fast_test_addr();
ShenandoahCollectionSet* collection_set() const { return _collection_set; }
// Checks if object is in the collection set.
inline bool in_collection_set(oop obj) const;
// Checks if location is in the collection set. Can be interior pointer, not the oop itself.
inline bool in_collection_set_loc(void* loc) const;
- // Evacuates object src. Returns the evacuated object, either evacuated
+ // Evacuates or promotes object src. Returns the evacuated object, either evacuated
// by this thread, or by some other thread.
inline oop evacuate_object(oop src, Thread* thread);
// Call before/after evacuation.
inline void enter_evacuation(Thread* t);
inline void leave_evacuation(Thread* t);
+ inline bool clear_old_evacuation_failure();
+
+ // ---------- Generational support
+ //
+ private:
+ RememberedScanner* _card_scan;
+
+ public:
+ inline RememberedScanner* card_scan() { return _card_scan; }
+ void clear_cards_for(ShenandoahHeapRegion* region);
+ void dirty_cards(HeapWord* start, HeapWord* end);
+ void clear_cards(HeapWord* start, HeapWord* end);
+ void mark_card_as_dirty(void* location);
+ void retire_plab(PLAB* plab);
+ void retire_plab(PLAB* plab, Thread* thread);
+ void cancel_old_gc();
+ bool is_old_gc_active();
+ void coalesce_and_fill_old_regions();
+ bool adjust_generation_sizes();
+
// ---------- Helper functions
//
public:
template <class T>
inline void conc_update_with_forwarded(T* p);
static inline void atomic_clear_oop( oop* addr, oop compare);
static inline void atomic_clear_oop(narrowOop* addr, oop compare);
static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
- void trash_humongous_region_at(ShenandoahHeapRegion *r);
+ size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
+
+ static inline void increase_object_age(oop obj, uint additional_age);
+ static inline uint get_object_age(oop obj);
+
+ void transfer_old_pointers_from_satb();
+
+ void log_heap_status(const char *msg) const;
private:
void trash_cset_regions();
// ---------- Testing helpers functions
< prev index next >