< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp

Print this page
@@ -1,8 +1,9 @@
  /*
   * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
+  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   *
   * This code is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License version 2 only, as
   * published by the Free Software Foundation.

@@ -24,44 +25,58 @@
   */
  
  #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
  #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
  
+ #include "gc/shared/ageTable.hpp"
  #include "gc/shared/markBitMap.hpp"
  #include "gc/shared/softRefPolicy.hpp"
  #include "gc/shared/collectedHeap.hpp"
+ #include "gc/shenandoah/shenandoahAgeCensus.hpp"
  #include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp"
  #include "gc/shenandoah/shenandoahAsserts.hpp"
  #include "gc/shenandoah/shenandoahAllocRequest.hpp"
+ #include "gc/shenandoah/shenandoahAsserts.hpp"
+ #include "gc/shenandoah/shenandoahController.hpp"
  #include "gc/shenandoah/shenandoahLock.hpp"
  #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
+ #include "gc/shenandoah/shenandoahEvacTracker.hpp"
+ #include "gc/shenandoah/shenandoahGenerationType.hpp"
+ #include "gc/shenandoah/shenandoahMmuTracker.hpp"
  #include "gc/shenandoah/shenandoahPadding.hpp"
+ #include "gc/shenandoah/shenandoahScanRemembered.hpp"
  #include "gc/shenandoah/shenandoahSharedVariables.hpp"
  #include "gc/shenandoah/shenandoahUnload.hpp"
  #include "memory/metaspace.hpp"
  #include "services/memoryManager.hpp"
  #include "utilities/globalDefinitions.hpp"
  #include "utilities/stack.hpp"
  
  class ConcurrentGCTimer;
  class ObjectIterateScanRootClosure;
+ class PLAB;
  class ShenandoahCollectorPolicy;
- class ShenandoahControlThread;
+ class ShenandoahRegulatorThread;
  class ShenandoahGCSession;
  class ShenandoahGCStateResetter;
+ class ShenandoahGeneration;
+ class ShenandoahYoungGeneration;
+ class ShenandoahOldGeneration;
  class ShenandoahHeuristics;
+ class ShenandoahOldHeuristics;
+ class ShenandoahYoungHeuristics;
  class ShenandoahMarkingContext;
- class ShenandoahMode;
  class ShenandoahPhaseTimings;
  class ShenandoahHeap;
  class ShenandoahHeapRegion;
  class ShenandoahHeapRegionClosure;
  class ShenandoahCollectionSet;
  class ShenandoahFreeSet;
  class ShenandoahConcurrentMark;
  class ShenandoahFullGC;
  class ShenandoahMonitoringSupport;
+ class ShenandoahMode;
  class ShenandoahPacer;
  class ShenandoahReferenceProcessor;
  class ShenandoahVerifier;
  class ShenandoahWorkerThreads;
  class VMStructs;

@@ -115,34 +130,47 @@
  
  // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
  // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
  // See ShenandoahControlThread for GC cycle structure.
  //
- class ShenandoahHeap : public CollectedHeap, public ShenandoahSpaceInfo {
+ class ShenandoahHeap : public CollectedHeap {
    friend class ShenandoahAsserts;
    friend class VMStructs;
    friend class ShenandoahGCSession;
    friend class ShenandoahGCStateResetter;
    friend class ShenandoahParallelObjectIterator;
    friend class ShenandoahSafepoint;
  
    // Supported GC
    friend class ShenandoahConcurrentGC;
+   friend class ShenandoahOldGC;
    friend class ShenandoahDegenGC;
    friend class ShenandoahFullGC;
    friend class ShenandoahUnload;
  
  // ---------- Locks that guard important data structures in Heap
  //
  private:
    ShenandoahHeapLock _lock;
+   ShenandoahGeneration* _gc_generation;
  
  public:
    ShenandoahHeapLock* lock() {
      return &_lock;
    }
  
+   ShenandoahGeneration* active_generation() const {
+     // last or latest generation might be a better name here.
+     return _gc_generation;
+   }
+ 
+   void set_gc_generation(ShenandoahGeneration* generation) {
+     _gc_generation = generation;
+   }
+ 
+   ShenandoahHeuristics* heuristics();
+ 
  // ---------- Initialization, termination, identification, printing routines
  //
  public:
    static ShenandoahHeap* heap();
  

@@ -150,13 +178,12 @@
    ShenandoahHeap::Name kind() const override { return CollectedHeap::Shenandoah; }
  
    ShenandoahHeap(ShenandoahCollectorPolicy* policy);
    jint initialize() override;
    void post_initialize() override;
-   void initialize_mode();
-   void initialize_heuristics();
- 
+   void initialize_heuristics_generations();
+   virtual void print_init_logger() const;
    void initialize_serviceability() override;
  
    void print_on(outputStream* st)              const override;
    void print_extended_on(outputStream *st)     const override;
    void print_tracing_info()                    const override;

@@ -165,47 +192,49 @@
    void stop() override;
  
    void prepare_for_verify() override;
    void verify(VerifyOption vo) override;
  
+   bool verify_generation_usage(bool verify_old, size_t old_regions, size_t old_bytes, size_t old_waste,
+                                bool verify_young, size_t young_regions, size_t young_bytes, size_t young_waste);
+ 
  // WhiteBox testing support.
    bool supports_concurrent_gc_breakpoints() const override {
      return true;
    }
  
  // ---------- Heap counters and metrics
  //
  private:
-            size_t _initial_size;
-            size_t _minimum_size;
+   size_t _initial_size;
+   size_t _minimum_size;
+ 
    volatile size_t _soft_max_size;
    shenandoah_padding(0);
-   volatile size_t _used;
    volatile size_t _committed;
-   volatile size_t _bytes_allocated_since_gc_start;
    shenandoah_padding(1);
  
+   void increase_used(const ShenandoahAllocRequest& req);
+ 
  public:
-   void increase_used(size_t bytes);
-   void decrease_used(size_t bytes);
-   void set_used(size_t bytes);
+   void increase_used(ShenandoahGeneration* generation, size_t bytes);
+   void decrease_used(ShenandoahGeneration* generation, size_t bytes);
+   void increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes);
+   void decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes);
  
    void increase_committed(size_t bytes);
    void decrease_committed(size_t bytes);
-   void increase_allocated(size_t bytes);
  
-   size_t bytes_allocated_since_gc_start() const override;
    void reset_bytes_allocated_since_gc_start();
  
    size_t min_capacity()      const;
    size_t max_capacity()      const override;
-   size_t soft_max_capacity() const override;
+   size_t soft_max_capacity() const;
    size_t initial_capacity()  const;
    size_t capacity()          const override;
    size_t used()              const override;
    size_t committed()         const;
-   size_t available()         const override;
  
    void set_soft_max_capacity(size_t v);
  
  // ---------- Periodic Tasks
  //

@@ -221,10 +250,12 @@
  private:
    uint _max_workers;
    ShenandoahWorkerThreads* _workers;
    ShenandoahWorkerThreads* _safepoint_workers;
  
+   virtual void initialize_controller();
+ 
  public:
    uint max_workers();
    void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
  
    WorkerThreads* workers() const;

@@ -237,10 +268,11 @@
  private:
    MemRegion _heap_region;
    bool      _heap_region_special;
    size_t    _num_regions;
    ShenandoahHeapRegion** _regions;
+   uint8_t* _affiliations;       // Holds array of enum ShenandoahAffiliation, including FREE status in non-generational mode
    ShenandoahRegionIterator _update_refs_iterator;
  
  public:
  
    inline HeapWord* base() const { return _heap_region.start(); }

@@ -254,10 +286,12 @@
    inline ShenandoahHeapRegion* get_region(size_t region_idx) const;
  
    void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
    void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
  
+   inline ShenandoahMmuTracker* mmu_tracker() { return &_mmu_tracker; };
+ 
  // ---------- GC state machinery
  //
  // GC state describes the important parts of collector state, that may be
  // used to make barrier selection decisions in the native and generated code.
  // Multiple bits can be set at once.

@@ -269,36 +303,43 @@
    enum GCStateBitPos {
      // Heap has forwarded objects: needs LRB barriers.
      HAS_FORWARDED_BITPOS   = 0,
  
      // Heap is under marking: needs SATB barriers.
+     // For generational mode, it means either young or old marking, or both.
      MARKING_BITPOS    = 1,
  
      // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
      EVACUATION_BITPOS = 2,
  
      // Heap is under updating: needs no additional barriers.
      UPDATEREFS_BITPOS = 3,
  
      // Heap is under weak-reference/roots processing: needs weak-LRB barriers.
      WEAK_ROOTS_BITPOS  = 4,
+ 
+     // Young regions are under marking, need SATB barriers.
+     YOUNG_MARKING_BITPOS = 5,
+ 
+     // Old regions are under marking, need SATB barriers.
+     OLD_MARKING_BITPOS = 6
    };
  
    enum GCState {
      STABLE        = 0,
      HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
      MARKING       = 1 << MARKING_BITPOS,
      EVACUATION    = 1 << EVACUATION_BITPOS,
      UPDATEREFS    = 1 << UPDATEREFS_BITPOS,
      WEAK_ROOTS    = 1 << WEAK_ROOTS_BITPOS,
+     YOUNG_MARKING = 1 << YOUNG_MARKING_BITPOS,
+     OLD_MARKING   = 1 << OLD_MARKING_BITPOS
    };
  
  private:
    bool _gc_state_changed;
    ShenandoahSharedBitmap _gc_state;
- 
-   // tracks if new regions have been allocated or retired since last check
    ShenandoahSharedFlag   _heap_changed;
    ShenandoahSharedFlag   _degenerated_gc_in_progress;
    ShenandoahSharedFlag   _full_gc_in_progress;
    ShenandoahSharedFlag   _full_gc_move_in_progress;
    ShenandoahSharedFlag   _concurrent_strong_root_in_progress;

@@ -306,10 +347,12 @@
    size_t _gc_no_progress_count;
  
    // This updates the singlular, global gc state. This must happen on a safepoint.
    void set_gc_state(uint mask, bool value);
  
+   ShenandoahAgeCensus* _age_census;    // Age census used for adapting tenuring threshold in generational mode
+ 
  public:
    char gc_state() const;
  
    // This copies the global gc state into a thread local variable for java threads.
    // It is primarily intended to support quick access at barriers.

@@ -323,55 +366,74 @@
    // uncommitted since the previous calls. This call will reset the flag to false.
    bool has_changed() {
      return _heap_changed.try_unset();
    }
  
-   void set_concurrent_mark_in_progress(bool in_progress);
+   void set_concurrent_young_mark_in_progress(bool in_progress);
+   void set_concurrent_old_mark_in_progress(bool in_progress);
    void set_evacuation_in_progress(bool in_progress);
    void set_update_refs_in_progress(bool in_progress);
    void set_degenerated_gc_in_progress(bool in_progress);
    void set_full_gc_in_progress(bool in_progress);
    void set_full_gc_move_in_progress(bool in_progress);
    void set_has_forwarded_objects(bool cond);
    void set_concurrent_strong_root_in_progress(bool cond);
    void set_concurrent_weak_root_in_progress(bool cond);
  
+   void set_aging_cycle(bool cond);
+ 
    inline bool is_stable() const;
    inline bool is_idle() const;
+ 
    inline bool is_concurrent_mark_in_progress() const;
+   inline bool is_concurrent_young_mark_in_progress() const;
+   inline bool is_concurrent_old_mark_in_progress() const;
    inline bool is_update_refs_in_progress() const;
    inline bool is_evacuation_in_progress() const;
    inline bool is_degenerated_gc_in_progress() const;
    inline bool is_full_gc_in_progress() const;
    inline bool is_full_gc_move_in_progress() const;
    inline bool has_forwarded_objects() const;
  
    inline bool is_stw_gc_in_progress() const;
    inline bool is_concurrent_strong_root_in_progress() const;
    inline bool is_concurrent_weak_root_in_progress() const;
+   bool is_prepare_for_old_mark_in_progress() const;
+   inline bool is_aging_cycle() const;
+ 
+   // Return the age census object for young gen (in generational mode)
+   inline ShenandoahAgeCensus* age_census() const;
  
  private:
+   void manage_satb_barrier(bool active);
+ 
    enum CancelState {
      // Normal state. GC has not been cancelled and is open for cancellation.
      // Worker threads can suspend for safepoint.
      CANCELLABLE,
  
      // GC has been cancelled. Worker threads can not suspend for
      // safepoint but must finish their work as soon as possible.
      CANCELLED
    };
  
+   double _cancel_requested_time;
    ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
+ 
+   // Returns true if cancel request was successfully communicated.
+   // Returns false if some other thread already communicated cancel
+   // request.  A true return value does not mean GC has been
+   // cancelled, only that the process of cancelling GC has begun.
    bool try_cancel_gc();
  
  public:
- 
    inline bool cancelled_gc() const;
    inline bool check_cancelled_gc_and_yield(bool sts_active = true);
  
-   inline void clear_cancelled_gc();
+   inline void clear_cancelled_gc(bool clear_oom_handler = true);
  
+   void cancel_concurrent_mark();
    void cancel_gc(GCCause::Cause cause);
  
  public:
    // These will uncommit empty regions if heap::committed > shrink_until
    // and there exists at least one region which was made empty before shrink_before.

@@ -381,13 +443,10 @@
    // Returns true if the soft maximum heap has been changed using management APIs.
    bool check_soft_max_changed();
  
  private:
    // GC support
-   // Reset bitmap, prepare regions for new GC cycle
-   void prepare_gc();
-   void prepare_regions_and_collection_set(bool concurrent);
    // Evacuation
    void evacuate_collection_set(bool concurrent);
    // Concurrent root processing
    void prepare_concurrent_roots();
    void finish_concurrent_roots();

@@ -396,42 +455,62 @@
    // Reference updating
    void prepare_update_heap_references(bool concurrent);
    void update_heap_references(bool concurrent);
    // Final update region states
    void update_heap_region_states(bool concurrent);
-   void rebuild_free_set(bool concurrent);
  
    void rendezvous_threads();
    void recycle_trash();
  public:
+   void rebuild_free_set(bool concurrent);
    void notify_gc_progress();
    void notify_gc_no_progress();
    size_t get_gc_no_progress_count() const;
  
  //
  // Mark support
  private:
-   ShenandoahControlThread*   _control_thread;
+   ShenandoahYoungGeneration* _young_generation;
+   ShenandoahGeneration*      _global_generation;
+   ShenandoahOldGeneration*   _old_generation;
+ 
+ protected:
+   ShenandoahController*  _control_thread;
+ 
+ private:
    ShenandoahCollectorPolicy* _shenandoah_policy;
    ShenandoahMode*            _gc_mode;
-   ShenandoahHeuristics*      _heuristics;
    ShenandoahFreeSet*         _free_set;
    ShenandoahPacer*           _pacer;
    ShenandoahVerifier*        _verifier;
  
-   ShenandoahPhaseTimings*    _phase_timings;
- 
-   ShenandoahControlThread*   control_thread()          { return _control_thread;    }
+   ShenandoahPhaseTimings*       _phase_timings;
+   ShenandoahEvacuationTracker*  _evac_tracker;
+   ShenandoahMmuTracker          _mmu_tracker;
+   ShenandoahGenerationSizer     _generation_sizer;
  
  public:
+   ShenandoahController*   control_thread() { return _control_thread; }
+ 
+   ShenandoahYoungGeneration* young_generation()  const { return _young_generation;  }
+   ShenandoahGeneration*      global_generation() const { return _global_generation; }
+   ShenandoahOldGeneration*   old_generation()    const { return _old_generation;    }
+   ShenandoahGeneration*      generation_for(ShenandoahAffiliation affiliation) const;
+   const ShenandoahGenerationSizer* generation_sizer()  const { return &_generation_sizer;  }
+ 
    ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
    ShenandoahMode*            mode()              const { return _gc_mode;           }
-   ShenandoahHeuristics*      heuristics()        const { return _heuristics;        }
    ShenandoahFreeSet*         free_set()          const { return _free_set;          }
    ShenandoahPacer*           pacer()             const { return _pacer;             }
  
-   ShenandoahPhaseTimings*    phase_timings()     const { return _phase_timings;     }
+   ShenandoahPhaseTimings*      phase_timings()   const { return _phase_timings;     }
+   ShenandoahEvacuationTracker* evac_tracker()    const { return _evac_tracker;      }
+ 
+   ShenandoahEvacOOMHandler* oom_evac_handler() { return &_oom_evac_handler; }
+ 
+   void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation);
+   void on_cycle_end(ShenandoahGeneration* generation);
  
    ShenandoahVerifier*        verifier();
  
  // ---------- VM subsystem bindings
  //

@@ -442,31 +521,24 @@
    GCMemoryManager              _cycle_memory_manager;
    ConcurrentGCTimer*           _gc_timer;
    // For exporting to SA
    int                          _log_min_obj_alignment_in_bytes;
  public:
-   ShenandoahMonitoringSupport* monitoring_support()          { return _monitoring_support;    }
+   ShenandoahMonitoringSupport* monitoring_support() const    { return _monitoring_support;    }
    GCMemoryManager* cycle_memory_manager()                    { return &_cycle_memory_manager; }
    GCMemoryManager* stw_memory_manager()                      { return &_stw_memory_manager;   }
  
    GrowableArray<GCMemoryManager*> memory_managers() override;
    GrowableArray<MemoryPool*> memory_pools() override;
    MemoryUsage memory_usage() override;
    GCTracer* tracer();
    ConcurrentGCTimer* gc_timer() const;
  
- // ---------- Reference processing
- //
- private:
-   ShenandoahReferenceProcessor* const _ref_processor;
- 
- public:
-   ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; }
- 
  // ---------- Class Unloading
  //
  private:
+   ShenandoahSharedFlag  _is_aging_cycle;
    ShenandoahSharedFlag _unload_classes;
    ShenandoahUnload     _unloader;
  
  public:
    void set_unload_classes(bool uc);

@@ -478,10 +550,13 @@
  private:
    void stw_unload_classes(bool full_gc);
    void stw_process_weak_roots(bool full_gc);
    void stw_weak_refs(bool full_gc);
  
+   inline void assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation,
+                                           ShenandoahAffiliation new_affiliation);
+ 
    // Heap iteration support
    void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
    bool prepare_aux_bitmap_for_iteration();
    void reclaim_aux_bitmap_for_iteration();
  

@@ -491,11 +566,21 @@
  // and can be stubbed out.
  //
  public:
    bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false);
  
-   bool is_in(const void* p) const override;
+   inline bool is_in(const void* p) const override;
+ 
+   inline bool is_in_active_generation(oop obj) const;
+   inline bool is_in_young(const void* p) const;
+   inline bool is_in_old(const void* p) const;
+   inline bool is_old(oop pobj) const;
+ 
+   inline ShenandoahAffiliation region_affiliation(const ShenandoahHeapRegion* r);
+   inline void set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation);
+ 
+   inline ShenandoahAffiliation region_affiliation(size_t index);
  
    bool requires_barriers(stackChunkOop obj) const override;
  
    MemRegion reserved_region() const { return _reserved; }
    bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }

@@ -539,24 +624,26 @@
    void sync_pinned_region_status();
    void assert_pinned_region_status() NOT_DEBUG_RETURN;
  
  // ---------- Allocation support
  //
+ protected:
+   inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
+ 
  private:
    HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
-   inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
    HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
    HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
  
  public:
    HeapWord* allocate_memory(ShenandoahAllocRequest& request);
    HeapWord* mem_allocate(size_t size, bool* what) override;
    MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
                                                 size_t size,
                                                 Metaspace::MetadataType mdtype) override;
  
-   void notify_mutator_alloc_words(size_t words, bool waste);
+   void notify_mutator_alloc_words(size_t words, size_t waste);
  
    HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
    size_t tlab_capacity(Thread *thr) const override;
    size_t unsafe_max_tlab_alloc(Thread *thread) const override;
    size_t max_tlab_size() const override;

@@ -590,24 +677,20 @@
    ShenandoahLiveData** _liveness_cache;
  
  public:
    inline ShenandoahMarkingContext* complete_marking_context() const;
    inline ShenandoahMarkingContext* marking_context() const;
-   inline void mark_complete_marking_context();
-   inline void mark_incomplete_marking_context();
  
    template<class T>
    inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
  
    template<class T>
    inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
  
    template<class T>
    inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
  
-   void reset_mark_bitmap();
- 
    // SATB barriers hooks
    inline bool requires_marking(const void* entry) const;
  
    // Support for bitmap uncommits
    bool commit_bitmap_slice(ShenandoahHeapRegion *r);

@@ -624,29 +707,42 @@
  //
  private:
    ShenandoahCollectionSet* _collection_set;
    ShenandoahEvacOOMHandler _oom_evac_handler;
  
+   oop try_evacuate_object(oop src, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahAffiliation target_gen);
  public:
+ 
    static address in_cset_fast_test_addr();
  
    ShenandoahCollectionSet* collection_set() const { return _collection_set; }
  
    // Checks if object is in the collection set.
    inline bool in_collection_set(oop obj) const;
  
    // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
    inline bool in_collection_set_loc(void* loc) const;
  
-   // Evacuates object src. Returns the evacuated object, either evacuated
+   // Evacuates or promotes object src. Returns the evacuated object, either evacuated
    // by this thread, or by some other thread.
-   inline oop evacuate_object(oop src, Thread* thread);
+   virtual oop evacuate_object(oop src, Thread* thread);
  
    // Call before/after evacuation.
    inline void enter_evacuation(Thread* t);
    inline void leave_evacuation(Thread* t);
  
+ // ---------- Generational support
+ //
+ private:
+   RememberedScanner* _card_scan;
+ 
+ public:
+   inline RememberedScanner* card_scan() { return _card_scan; }
+   void clear_cards_for(ShenandoahHeapRegion* region);
+   void mark_card_as_dirty(void* location);
+   void cancel_old_gc();
+ 
  // ---------- Helper functions
  //
  public:
    template <class T>
    inline void conc_update_with_forwarded(T* p);

@@ -664,11 +760,22 @@
  
    static inline void atomic_clear_oop(      oop* addr,       oop compare);
    static inline void atomic_clear_oop(narrowOop* addr,       oop compare);
    static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
  
-   void trash_humongous_region_at(ShenandoahHeapRegion *r);
+   size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
+ 
+   static inline void increase_object_age(oop obj, uint additional_age);
+ 
+   // Return the object's age, or a sentinel value when the age can't
+   // necessarily be determined because of concurrent locking by the
+   // mutator
+   static inline uint get_object_age(oop obj);
+ 
+   void transfer_old_pointers_from_satb();
+ 
+   void log_heap_status(const char *msg) const;
  
  private:
    void trash_cset_regions();
  
  // ---------- Testing helpers functions
< prev index next >