1 /*
  2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
 28 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
 29 
 30 #include "gc/shared/markBitMap.hpp"
 31 #include "gc/shared/softRefPolicy.hpp"
 32 #include "gc/shared/collectedHeap.hpp"
 33 #include "gc/shenandoah/shenandoahAsserts.hpp"
 34 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
 35 #include "gc/shenandoah/shenandoahLock.hpp"
 36 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
 37 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
 38 #include "gc/shenandoah/shenandoahGenerationType.hpp"
 39 #include "gc/shenandoah/shenandoahMmuTracker.hpp"
 40 #include "gc/shenandoah/shenandoahPadding.hpp"
 41 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
 42 #include "gc/shenandoah/shenandoahUnload.hpp"
 43 #include "gc/shenandoah/shenandoahScanRemembered.hpp"
 44 #include "memory/metaspace.hpp"
 45 #include "services/memoryManager.hpp"
 46 #include "utilities/globalDefinitions.hpp"
 47 #include "utilities/stack.hpp"
 48 
 49 class ConcurrentGCTimer;
 50 class ObjectIterateScanRootClosure;
 51 class PLAB;
 52 class ShenandoahCollectorPolicy;
 53 class ShenandoahControlThread;
 54 class ShenandoahRegulatorThread;
 55 class ShenandoahGCSession;
 56 class ShenandoahGCStateResetter;
 57 class ShenandoahGeneration;
 58 class ShenandoahYoungGeneration;
 59 class ShenandoahOldGeneration;
 60 class ShenandoahHeuristics;
 61 class ShenandoahOldHeuristics;
 62 class ShenandoahMarkingContext;
 63 class ShenandoahPhaseTimings;
 64 class ShenandoahHeap;
 65 class ShenandoahHeapRegion;
 66 class ShenandoahHeapRegionClosure;
 67 class ShenandoahCollectionSet;
 68 class ShenandoahFreeSet;
 69 class ShenandoahConcurrentMark;
 70 class ShenandoahFullGC;
 71 class ShenandoahMonitoringSupport;
 72 class ShenandoahMode;
 73 class ShenandoahPacer;
 74 class ShenandoahReferenceProcessor;
 75 class ShenandoahVerifier;
 76 class ShenandoahWorkerThreads;
 77 class VMStructs;
 78 
 79 // Used for buffering per-region liveness data.
 80 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
 81 // The ShenandoahHeap array has max-workers elements, each of which is an array of
 82 // uint16_t * max_regions. The choice of uint16_t is not accidental:
 83 // there is a tradeoff between static/dynamic footprint that translates
 84 // into cache pressure (which is already high during marking), and
 85 // too many atomic updates. uint32_t is too large, uint8_t is too small.
 86 typedef uint16_t ShenandoahLiveData;
 87 #define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
 88 
 89 class ShenandoahRegionIterator : public StackObj {
 90 private:
 91   ShenandoahHeap* _heap;
 92 
 93   shenandoah_padding(0);
 94   volatile size_t _index;
 95   shenandoah_padding(1);
 96 
 97   // No implicit copying: iterators should be passed by reference to capture the state
 98   NONCOPYABLE(ShenandoahRegionIterator);
 99 
100 public:
101   ShenandoahRegionIterator();
102   ShenandoahRegionIterator(ShenandoahHeap* heap);
103 
104   // Reset iterator to default state
105   void reset();
106 
107   // Returns next region, or null if there are no more regions.
108   // This is multi-thread-safe.
109   inline ShenandoahHeapRegion* next();
110 
111   // This is *not* MT safe. However, in the absence of multithreaded access, it
112   // can be used to determine if there is more work to do.
113   bool has_next() const;
114 };
115 
116 class ShenandoahHeapRegionClosure : public StackObj {
117 public:
118   virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
119   virtual bool is_thread_safe() { return false; }
120 };
121 
122 template<ShenandoahGenerationType GENERATION>
123 class ShenandoahGenerationRegionClosure : public ShenandoahHeapRegionClosure {
124  public:
125   explicit ShenandoahGenerationRegionClosure(ShenandoahHeapRegionClosure* cl) : _cl(cl) {}
126   void heap_region_do(ShenandoahHeapRegion* r);
127   virtual bool is_thread_safe() { return _cl->is_thread_safe(); }
128  private:
129   ShenandoahHeapRegionClosure* _cl;
130 };
131 
132 typedef ShenandoahLock    ShenandoahHeapLock;
133 typedef ShenandoahLocker  ShenandoahHeapLocker;
134 typedef Stack<oop, mtGC>  ShenandoahScanObjectStack;
135 
136 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
137 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
138 // See ShenandoahControlThread for GC cycle structure.
139 //
140 class ShenandoahHeap : public CollectedHeap {
141   friend class ShenandoahAsserts;
142   friend class VMStructs;
143   friend class ShenandoahGCSession;
144   friend class ShenandoahGCStateResetter;
145   friend class ShenandoahParallelObjectIterator;
146   friend class ShenandoahSafepoint;
147   // Supported GC
148   friend class ShenandoahConcurrentGC;
149   friend class ShenandoahOldGC;
150   friend class ShenandoahDegenGC;
151   friend class ShenandoahFullGC;
152   friend class ShenandoahUnload;
153 
154 // ---------- Locks that guard important data structures in Heap
155 //
156 private:
157   ShenandoahHeapLock _lock;
158   ShenandoahGeneration* _gc_generation;
159 
160   // true iff we are concurrently coalescing and filling old-gen HeapRegions
161   bool _prepare_for_old_mark;
162 
163 public:
164   ShenandoahHeapLock* lock() {
165     return &_lock;
166   }
167 
168   ShenandoahGeneration* active_generation() const {
169     // last or latest generation might be a better name here.
170     return _gc_generation;
171   }
172 
173   void set_gc_generation(ShenandoahGeneration* generation) {
174     _gc_generation = generation;
175   }
176 
177   ShenandoahOldHeuristics* old_heuristics();
178 
179   bool doing_mixed_evacuations();
180   bool is_old_bitmap_stable() const;
181   bool is_gc_generation_young() const;
182 
183 // ---------- Initialization, termination, identification, printing routines
184 //
185 public:
186   static ShenandoahHeap* heap();
187 
188   const char* name()          const override { return "Shenandoah"; }
189   ShenandoahHeap::Name kind() const override { return CollectedHeap::Shenandoah; }
190 
191   ShenandoahHeap(ShenandoahCollectorPolicy* policy);
192   jint initialize() override;
193   void post_initialize() override;
194   void initialize_heuristics_generations();
195 
196   void initialize_serviceability() override;
197 
198   void print_on(outputStream* st)              const override;
199   void print_extended_on(outputStream *st)     const override;
200   void print_tracing_info()                    const override;
201   void print_heap_regions_on(outputStream* st) const;
202 
203   void stop() override;
204 
205   void prepare_for_verify() override;
206   void verify(VerifyOption vo) override;
207 
208   bool verify_generation_usage(bool verify_old, size_t old_regions, size_t old_bytes, size_t old_waste,
209                                bool verify_young, size_t young_regions, size_t young_bytes, size_t young_waste);
210 
211 // WhiteBox testing support.
212   bool supports_concurrent_gc_breakpoints() const override {
213     return true;
214   }
215 
216 // ---------- Heap counters and metrics
217 //
218 private:
219            size_t _initial_size;
220            size_t _minimum_size;
221            size_t _promotion_potential;
222            size_t _promotion_in_place_potential;
223            size_t _pad_for_promote_in_place;    // bytes of filler
224            size_t _promotable_humongous_regions;
225            size_t _promotable_humongous_usage;
226            size_t _regular_regions_promoted_in_place;
227            size_t _regular_usage_promoted_in_place;
228 
229   volatile size_t _soft_max_size;
230   shenandoah_padding(0);
231   volatile size_t _committed;
232   shenandoah_padding(1);
233 
234   void increase_used(const ShenandoahAllocRequest& req);
235 
236 public:
237   void increase_used(ShenandoahGeneration* generation, size_t bytes);
238   void decrease_used(ShenandoahGeneration* generation, size_t bytes);
239   void increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes);
240   void decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes);
241 
242   void increase_committed(size_t bytes);
243   void decrease_committed(size_t bytes);
244 
245   void reset_bytes_allocated_since_gc_start();
246 
247   size_t min_capacity()      const;
248   size_t max_capacity()      const override;
249   size_t soft_max_capacity() const;
250   size_t initial_capacity()  const;
251   size_t capacity()          const override;
252   size_t used()              const override;
253   size_t committed()         const;
254 
255   void set_soft_max_capacity(size_t v);
256 
257 // ---------- Workers handling
258 //
259 private:
260   uint _max_workers;
261   ShenandoahWorkerThreads* _workers;
262   ShenandoahWorkerThreads* _safepoint_workers;
263 
264 public:
265   uint max_workers();
266   void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
267 
268   WorkerThreads* workers() const;
269   WorkerThreads* safepoint_workers() override;
270 
271   void gc_threads_do(ThreadClosure* tcl) const override;
272 
273 // ---------- Heap regions handling machinery
274 //
275 private:
276   MemRegion _heap_region;
277   bool      _heap_region_special;
278   size_t    _num_regions;
279   ShenandoahHeapRegion** _regions;
280   uint8_t* _affiliations;       // Holds array of enum ShenandoahAffiliation, including FREE status in non-generational mode
281   ShenandoahRegionIterator _update_refs_iterator;
282 
283 public:
284 
285   inline HeapWord* base() const { return _heap_region.start(); }
286 
287   inline size_t num_regions() const { return _num_regions; }
288   inline bool is_heap_region_special() { return _heap_region_special; }
289 
290   inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
291   inline size_t heap_region_index_containing(const void* addr) const;
292 
293   inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
294 
295   void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
296   void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
297 
298   inline ShenandoahMmuTracker* const mmu_tracker() { return &_mmu_tracker; };
299 
300 // ---------- GC state machinery
301 //
302 // GC state describes the important parts of collector state, that may be
303 // used to make barrier selection decisions in the native and generated code.
304 // Multiple bits can be set at once.
305 //
306 // Important invariant: when GC state is zero, the heap is stable, and no barriers
307 // are required.
308 //
309 public:
310   enum GCStateBitPos {
311     // Heap has forwarded objects: needs LRB barriers.
312     HAS_FORWARDED_BITPOS   = 0,
313 
314     // Young regions are under marking: needs SATB barriers.
315     YOUNG_MARKING_BITPOS    = 1,
316 
317     // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
318     EVACUATION_BITPOS = 2,
319 
320     // Heap is under updating: needs no additional barriers.
321     UPDATEREFS_BITPOS = 3,
322 
323     // Heap is under weak-reference/roots processing: needs weak-LRB barriers.
324     WEAK_ROOTS_BITPOS  = 4,
325 
326     // Old regions are under marking, still need SATB barriers.
327     OLD_MARKING_BITPOS = 5
328   };
329 
330   enum GCState {
331     STABLE        = 0,
332     HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
333     YOUNG_MARKING = 1 << YOUNG_MARKING_BITPOS,
334     EVACUATION    = 1 << EVACUATION_BITPOS,
335     UPDATEREFS    = 1 << UPDATEREFS_BITPOS,
336     WEAK_ROOTS    = 1 << WEAK_ROOTS_BITPOS,
337     OLD_MARKING   = 1 << OLD_MARKING_BITPOS
338   };
339 
340 private:
341   ShenandoahSharedBitmap _gc_state;
342   ShenandoahSharedFlag   _degenerated_gc_in_progress;
343   ShenandoahSharedFlag   _full_gc_in_progress;
344   ShenandoahSharedFlag   _full_gc_move_in_progress;
345   ShenandoahSharedFlag   _progress_last_gc;
346   ShenandoahSharedFlag   _concurrent_strong_root_in_progress;
347 
348   // TODO: Revisit the following comment.  It may not accurately represent the true behavior when evacuations fail due to
349   // difficulty finding memory to hold evacuated objects.
350   //
351   // Note that the typical total expenditure on evacuation is less than the associated evacuation reserve because we generally
352   // reserve ShenandoahEvacWaste (> 1.0) times the anticipated evacuation need.  In the case that there is an excessive amount
353   // of waste, it may be that one thread fails to grab a new GCLAB, this does not necessarily doom the associated evacuation
354   // effort.  If this happens, the requesting thread blocks until some other thread manages to evacuate the offending object.
355   // Only after "all" threads fail to evacuate an object do we consider the evacuation effort to have failed.
356 
357   // How many full-gc cycles have been completed?
358   volatile size_t _completed_fullgc_cycles;
359 
360   size_t _promoted_reserve;            // Bytes reserved within old-gen to hold the results of promotion
361   volatile size_t _promoted_expended;  // Bytes of old-gen memory expended on promotions
362 
363   // Allocation of old GCLABs (aka PLABs) assures that _old_evac_expended + request-size < _old_evac_reserved.  If the allocation
364   //  is authorized, increment _old_evac_expended by request size.  This allocation ignores old_gen->available().
365 
366   size_t _old_evac_reserve;            // Bytes reserved within old-gen to hold evacuated objects from old-gen collection set
367   volatile size_t _old_evac_expended;  // Bytes of old-gen memory expended on old-gen evacuations
368 
369   size_t _young_evac_reserve;          // Bytes reserved within young-gen to hold evacuated objects from young-gen collection set
370 
371   size_t _captured_old_usage;          // What was old usage (bytes) when last captured?
372 
373   size_t _previous_promotion;          // Bytes promoted during previous evacuation
374 
375   bool _upgraded_to_full;
376 
377   // At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to
378   // hold the results of evacuating to young-gen and to old-gen.  These quantitites, stored in _promoted_reserve,
379   // _old_evac_reserve, and _young_evac_reserve, are consulted prior to rebuilding the free set (ShenandoahFreeSet)
380   // in preparation for evacuation.  When the free set is rebuilt, we make sure to reserve sufficient memory in the
381   // collector and old_collector sets to hold if _has_evacuation_reserve_quantities is true.  The other time we
382   // rebuild the freeset is at the end of GC, as we prepare to idle GC until the next trigger.  In this case,
383   // _has_evacuation_reserve_quantities is false because we don't yet know how much memory will need to be evacuated
384   // in the next GC cycle.  When _has_evacuation_reserve_quantities is false, the free set rebuild operation reserves
385   // for the collector and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve,
386   // ShenandoahOldEvacReserve, and ShenandoahOldCompactionReserve.  In a future planned enhancement, the reserve
387   // for old_collector set when not _has_evacuation_reserve_quantities is based in part on anticipated promotion as
388   // determined by analysis of live data found during the previous GC pass which is one less than the current tenure age.
389   bool _has_evacuation_reserve_quantities;
390 
391   void set_gc_state_all_threads(char state);
392   void set_gc_state_mask(uint mask, bool value);
393 
394 public:
395   char gc_state() const;
396   static address gc_state_addr();
397 
398   void set_evacuation_reserve_quantities(bool is_valid);
399   void set_concurrent_young_mark_in_progress(bool in_progress);
400   void set_concurrent_old_mark_in_progress(bool in_progress);
401   void set_evacuation_in_progress(bool in_progress);
402   void set_update_refs_in_progress(bool in_progress);
403   void set_degenerated_gc_in_progress(bool in_progress);
404   void set_full_gc_in_progress(bool in_progress);
405   void set_full_gc_move_in_progress(bool in_progress);
406   void set_has_forwarded_objects(bool cond);
407   void set_concurrent_strong_root_in_progress(bool cond);
408   void set_concurrent_weak_root_in_progress(bool cond);
409   void set_prepare_for_old_mark_in_progress(bool cond);
410   void set_aging_cycle(bool cond);
411 
412 
413   inline bool is_stable() const;
414   inline bool is_idle() const;
415   inline bool has_evacuation_reserve_quantities() const;
416   inline bool is_concurrent_mark_in_progress() const;
417   inline bool is_concurrent_young_mark_in_progress() const;
418   inline bool is_concurrent_old_mark_in_progress() const;
419   inline bool is_update_refs_in_progress() const;
420   inline bool is_evacuation_in_progress() const;
421   inline bool is_degenerated_gc_in_progress() const;
422   inline bool is_full_gc_in_progress() const;
423   inline bool is_full_gc_move_in_progress() const;
424   inline bool has_forwarded_objects() const;
425   inline bool is_gc_in_progress_mask(uint mask) const;
426   inline bool is_stw_gc_in_progress() const;
427   inline bool is_concurrent_strong_root_in_progress() const;
428   inline bool is_concurrent_weak_root_in_progress() const;
429   inline bool is_prepare_for_old_mark_in_progress() const;
430   inline bool is_aging_cycle() const;
431   inline bool upgraded_to_full() { return _upgraded_to_full; }
432   inline void start_conc_gc() { _upgraded_to_full = false; }
433   inline void record_upgrade_to_full() { _upgraded_to_full = true; }
434 
435   inline size_t capture_old_usage(size_t usage);
436   inline void set_previous_promotion(size_t promoted_bytes);
437   inline size_t get_previous_promotion() const;
438 
439   inline void clear_promotion_potential() { _promotion_potential = 0; };
440   inline void set_promotion_potential(size_t val) { _promotion_potential = val; };
441   inline size_t get_promotion_potential() { return _promotion_potential; };
442 
443   inline void clear_promotion_in_place_potential() { _promotion_in_place_potential = 0; };
444   inline void set_promotion_in_place_potential(size_t val) { _promotion_in_place_potential = val; };
445   inline size_t get_promotion_in_place_potential() { return _promotion_in_place_potential; };
446 
447   inline void set_pad_for_promote_in_place(size_t pad) { _pad_for_promote_in_place = pad; }
448   inline size_t get_pad_for_promote_in_place() { return _pad_for_promote_in_place; }
449 
450   inline void reserve_promotable_humongous_regions(size_t region_count) { _promotable_humongous_regions = region_count; }
451   inline void reserve_promotable_humongous_usage(size_t bytes) { _promotable_humongous_usage = bytes; }
452   inline void reserve_promotable_regular_regions(size_t region_count) { _regular_regions_promoted_in_place = region_count; }
453   inline void reserve_promotable_regular_usage(size_t used_bytes) { _regular_usage_promoted_in_place = used_bytes; }
454 
455   inline size_t get_promotable_humongous_regions() { return _promotable_humongous_regions; }
456   inline size_t get_promotable_humongous_usage() { return _promotable_humongous_usage; }
457   inline size_t get_regular_regions_promoted_in_place() { return _regular_regions_promoted_in_place; }
458   inline size_t get_regular_usage_promoted_in_place() { return _regular_usage_promoted_in_place; }
459 
460   // Returns previous value
461   inline size_t set_promoted_reserve(size_t new_val);
462   inline size_t get_promoted_reserve() const;
463   inline void augment_promo_reserve(size_t increment);
464 
465   inline void reset_promoted_expended();
466   inline size_t expend_promoted(size_t increment);
467   inline size_t unexpend_promoted(size_t decrement);
468   inline size_t get_promoted_expended();
469 
470   // Returns previous value
471   inline size_t set_old_evac_reserve(size_t new_val);
472   inline size_t get_old_evac_reserve() const;
473   inline void augment_old_evac_reserve(size_t increment);
474 
475   inline void reset_old_evac_expended();
476   inline size_t expend_old_evac(size_t increment);
477   inline size_t get_old_evac_expended();
478 
479   // Returns previous value
480   inline size_t set_young_evac_reserve(size_t new_val);
481   inline size_t get_young_evac_reserve() const;
482 
483 private:
484   void manage_satb_barrier(bool active);
485 
486   enum CancelState {
487     // Normal state. GC has not been cancelled and is open for cancellation.
488     // Worker threads can suspend for safepoint.
489     CANCELLABLE,
490 
491     // GC has been cancelled. Worker threads can not suspend for
492     // safepoint but must finish their work as soon as possible.
493     CANCELLED
494   };
495 
496   double _cancel_requested_time;
497   ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
498 
499   // Returns true if cancel request was successfully communicated.
500   // Returns false if some other thread already communicated cancel
501   // request.  A true return value does not mean GC has been
502   // cancelled, only that the process of cancelling GC has begun.
503   bool try_cancel_gc();
504 
505 public:
506   inline bool cancelled_gc() const;
507   inline bool check_cancelled_gc_and_yield(bool sts_active = true);
508 
509   inline void clear_cancelled_gc(bool clear_oom_handler = true);
510 
511   void cancel_concurrent_mark();
512   void cancel_gc(GCCause::Cause cause);
513 
514 public:
515   // Elastic heap support
516   void entry_uncommit(double shrink_before, size_t shrink_until);
517   void op_uncommit(double shrink_before, size_t shrink_until);
518 
519 private:
520   // GC support
521   // Evacuation
522   void evacuate_collection_set(bool concurrent);
523   // Concurrent root processing
524   void prepare_concurrent_roots();
525   void finish_concurrent_roots();
526   // Concurrent class unloading support
527   void do_class_unloading();
528   // Reference updating
529   void prepare_update_heap_references(bool concurrent);
530   void update_heap_references(bool concurrent);
531   // Final update region states
532   void update_heap_region_states(bool concurrent);
533 
534   void rendezvous_threads();
535   void recycle_trash();
536 public:
537   void rebuild_free_set(bool concurrent);
538   void notify_gc_progress()    { _progress_last_gc.set();   }
539   void notify_gc_no_progress() { _progress_last_gc.unset(); }
540 
541 //
542 // Mark support
543 private:
544   ShenandoahYoungGeneration* _young_generation;
545   ShenandoahGeneration*      _global_generation;
546   ShenandoahOldGeneration*   _old_generation;
547 
548   ShenandoahControlThread*   _control_thread;
549   ShenandoahRegulatorThread* _regulator_thread;
550   ShenandoahCollectorPolicy* _shenandoah_policy;
551   ShenandoahMode*            _gc_mode;
552   ShenandoahFreeSet*         _free_set;
553   ShenandoahPacer*           _pacer;
554   ShenandoahVerifier*        _verifier;
555 
556   ShenandoahPhaseTimings*       _phase_timings;
557   ShenandoahEvacuationTracker*  _evac_tracker;
558   ShenandoahMmuTracker          _mmu_tracker;
559   ShenandoahGenerationSizer     _generation_sizer;
560 
561   ShenandoahRegulatorThread* regulator_thread()        { return _regulator_thread;  }
562 
563 public:
564   ShenandoahControlThread*   control_thread()          { return _control_thread;    }
565   ShenandoahYoungGeneration* young_generation()  const { return _young_generation;  }
566   ShenandoahGeneration*      global_generation() const { return _global_generation; }
567   ShenandoahOldGeneration*   old_generation()    const { return _old_generation;    }
568   ShenandoahGeneration*      generation_for(ShenandoahAffiliation affiliation) const;
569   const ShenandoahGenerationSizer* generation_sizer()  const { return &_generation_sizer;  }
570 
571   size_t max_size_for(ShenandoahGeneration* generation) const;
572   size_t min_size_for(ShenandoahGeneration* generation) const;
573 
574   ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
575   ShenandoahMode*            mode()              const { return _gc_mode;           }
576   ShenandoahFreeSet*         free_set()          const { return _free_set;          }
577   ShenandoahPacer*           pacer()             const { return _pacer;             }
578 
579   ShenandoahPhaseTimings*      phase_timings()   const { return _phase_timings;     }
580   ShenandoahEvacuationTracker* evac_tracker()    const { return  _evac_tracker;     }
581 
582   void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation);
583   void on_cycle_end(ShenandoahGeneration* generation);
584 
585   ShenandoahVerifier*        verifier();
586 
587 // ---------- VM subsystem bindings
588 //
589 private:
590   ShenandoahMonitoringSupport* _monitoring_support;
591   MemoryPool*                  _memory_pool;
592   MemoryPool*                  _young_gen_memory_pool;
593   MemoryPool*                  _old_gen_memory_pool;
594 
595   GCMemoryManager              _stw_memory_manager;
596   GCMemoryManager              _cycle_memory_manager;
597   ConcurrentGCTimer*           _gc_timer;
598   SoftRefPolicy                _soft_ref_policy;
599 
600   // For exporting to SA
601   int                          _log_min_obj_alignment_in_bytes;
602 public:
603   ShenandoahMonitoringSupport* monitoring_support() const    { return _monitoring_support;    }
604   GCMemoryManager* cycle_memory_manager()                    { return &_cycle_memory_manager; }
605   GCMemoryManager* stw_memory_manager()                      { return &_stw_memory_manager;   }
606   SoftRefPolicy* soft_ref_policy()                  override { return &_soft_ref_policy;      }
607 
608   GrowableArray<GCMemoryManager*> memory_managers() override;
609   GrowableArray<MemoryPool*> memory_pools() override;
610   MemoryUsage memory_usage() override;
611   GCTracer* tracer();
612   ConcurrentGCTimer* gc_timer() const;
613 
614 // ---------- Class Unloading
615 //
616 private:
617   ShenandoahSharedFlag  _is_aging_cycle;
618   ShenandoahSharedFlag _unload_classes;
619   ShenandoahUnload     _unloader;
620 
621 public:
622   void set_unload_classes(bool uc);
623   bool unload_classes() const;
624 
625   // Perform STW class unloading and weak root cleaning
626   void parallel_cleaning(bool full_gc);
627 
628 private:
629   void stw_unload_classes(bool full_gc);
630   void stw_process_weak_roots(bool full_gc);
631   void stw_weak_refs(bool full_gc);
632 
633   inline void assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation,
634                                           ShenandoahAffiliation new_affiliation);
635 
636   // Heap iteration support
637   void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
638   bool prepare_aux_bitmap_for_iteration();
639   void reclaim_aux_bitmap_for_iteration();
640 
641 // ---------- Generic interface hooks
642 // Minor things that super-interface expects us to implement to play nice with
643 // the rest of runtime. Some of the things here are not required to be implemented,
644 // and can be stubbed out.
645 //
646 public:
647   bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false);
648 
649   inline bool is_in(const void* p) const override;
650 
651   inline bool is_in_active_generation(oop obj) const;
652   inline bool is_in_young(const void* p) const;
653   inline bool is_in_old(const void* p) const;
654   inline bool is_old(oop pobj) const;
655 
656   inline ShenandoahAffiliation region_affiliation(const ShenandoahHeapRegion* r);
657   inline void set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation);
658 
659   inline ShenandoahAffiliation region_affiliation(size_t index);
660   inline void set_affiliation(size_t index, ShenandoahAffiliation new_affiliation);
661 
662   bool requires_barriers(stackChunkOop obj) const override;
663 
664   MemRegion reserved_region() const { return _reserved; }
665   bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
666 
667   void collect(GCCause::Cause cause) override;
668   void do_full_collection(bool clear_all_soft_refs) override;
669 
670   // Used for parsing heap during error printing
671   HeapWord* block_start(const void* addr) const;
672   bool block_is_obj(const HeapWord* addr) const;
673   bool print_location(outputStream* st, void* addr) const override;
674 
675   // Used for native heap walkers: heap dumpers, mostly
676   void object_iterate(ObjectClosure* cl) override;
677   // Parallel heap iteration support
678   ParallelObjectIteratorImpl* parallel_object_iterator(uint workers) override;
679 
680   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
681   void keep_alive(oop obj) override;
682 
683 // ---------- Safepoint interface hooks
684 //
685 public:
686   void safepoint_synchronize_begin() override;
687   void safepoint_synchronize_end() override;
688 
689 // ---------- Code roots handling hooks
690 //
691 public:
692   void register_nmethod(nmethod* nm) override;
693   void unregister_nmethod(nmethod* nm) override;
694   void verify_nmethod(nmethod* nm) override {}
695 
696 // ---------- Pinning hooks
697 //
698 public:
699   // Shenandoah supports per-object (per-region) pinning
700   void pin_object(JavaThread* thread, oop obj) override;
701   void unpin_object(JavaThread* thread, oop obj) override;
702 
703   void sync_pinned_region_status();
704   void assert_pinned_region_status() NOT_DEBUG_RETURN;
705 
706 // ---------- Concurrent Stack Processing support
707 //
708 public:
709   bool uses_stack_watermark_barrier() const override { return true; }
710 
711 // ---------- Allocation support
712 //
713 private:
714   // How many bytes to transfer between old and young after we have finished recycling collection set regions?
715   size_t _old_regions_surplus;
716   size_t _old_regions_deficit;
717 
718   HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region, bool is_promotion);
719 
720   inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
721   HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
722   HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
723 
724   inline HeapWord* allocate_from_plab(Thread* thread, size_t size, bool is_promotion);
725   HeapWord* allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion);
726   HeapWord* allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size);
727 
728 public:
729   HeapWord* allocate_memory(ShenandoahAllocRequest& request, bool is_promotion);
730   HeapWord* mem_allocate(size_t size, bool* what) override;
731   MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
732                                                size_t size,
733                                                Metaspace::MetadataType mdtype) override;
734 
735   void notify_mutator_alloc_words(size_t words, size_t waste);
736 
737   HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
738   size_t tlab_capacity(Thread *thr) const override;
739   size_t unsafe_max_tlab_alloc(Thread *thread) const override;
740   size_t max_tlab_size() const override;
741   size_t tlab_used(Thread* ignored) const override;
742 
743   void ensure_parsability(bool retire_labs) override;
744 
745   void labs_make_parsable();
746   void tlabs_retire(bool resize);
747   void gclabs_retire(bool resize);
748 
749   void set_young_lab_region_flags();
750 
751   inline void set_old_region_surplus(size_t surplus) { _old_regions_surplus = surplus; };
752   inline void set_old_region_deficit(size_t deficit) { _old_regions_deficit = deficit; };
753 
754   inline size_t get_old_region_surplus() { return _old_regions_surplus; };
755   inline size_t get_old_region_deficit() { return _old_regions_deficit; };
756 
757 // ---------- Marking support
758 //
759 private:
760   ShenandoahMarkingContext* _marking_context;
761   MemRegion  _bitmap_region;
762   MemRegion  _aux_bitmap_region;
763   MarkBitMap _verification_bit_map;
764   MarkBitMap _aux_bit_map;
765 
766   size_t _bitmap_size;
767   size_t _bitmap_regions_per_slice;
768   size_t _bitmap_bytes_per_slice;
769 
770   size_t _pretouch_heap_page_size;
771   size_t _pretouch_bitmap_page_size;
772 
773   bool _bitmap_region_special;
774   bool _aux_bitmap_region_special;
775 
776   ShenandoahLiveData** _liveness_cache;
777 
778 public:
779   inline ShenandoahMarkingContext* complete_marking_context() const;
780   inline ShenandoahMarkingContext* marking_context() const;
781 
782   template<class T>
783   inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
784 
785   template<class T>
786   inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
787 
788   template<class T>
789   inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
790 
791   // SATB barriers hooks
792   inline bool requires_marking(const void* entry) const;
793 
794   // Support for bitmap uncommits
795   bool commit_bitmap_slice(ShenandoahHeapRegion *r);
796   bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
797   bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
798 
799   // Liveness caching support
800   ShenandoahLiveData* get_liveness_cache(uint worker_id);
801   void flush_liveness_cache(uint worker_id);
802 
803   size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
804 
805 // ---------- Evacuation support
806 //
807 private:
808   ShenandoahCollectionSet* _collection_set;
809   ShenandoahEvacOOMHandler _oom_evac_handler;
810   ShenandoahSharedFlag _old_gen_oom_evac;
811 
812   inline oop try_evacuate_object(oop src, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahAffiliation target_gen);
813   void handle_old_evacuation(HeapWord* obj, size_t words, bool promotion);
814   void handle_old_evacuation_failure();
815 
816 public:
817   void handle_promotion_failure();
818   void report_promotion_failure(Thread* thread, size_t size);
819 
820   static address in_cset_fast_test_addr();
821 
822   ShenandoahCollectionSet* collection_set() const { return _collection_set; }
823 
824   // Checks if object is in the collection set.
825   inline bool in_collection_set(oop obj) const;
826 
827   // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
828   inline bool in_collection_set_loc(void* loc) const;
829 
830   // Evacuates or promotes object src. Returns the evacuated object, either evacuated
831   // by this thread, or by some other thread.
832   inline oop evacuate_object(oop src, Thread* thread);
833 
834   // Call before/after evacuation.
835   inline void enter_evacuation(Thread* t);
836   inline void leave_evacuation(Thread* t);
837 
838   inline bool clear_old_evacuation_failure();
839 
840 // ---------- Generational support
841 //
842 private:
843   RememberedScanner* _card_scan;
844 
845 public:
846   inline RememberedScanner* card_scan() { return _card_scan; }
847   void clear_cards_for(ShenandoahHeapRegion* region);
848   void dirty_cards(HeapWord* start, HeapWord* end);
849   void clear_cards(HeapWord* start, HeapWord* end);
850   void mark_card_as_dirty(void* location);
851   void retire_plab(PLAB* plab);
852   void retire_plab(PLAB* plab, Thread* thread);
853   void cancel_old_gc();
854   bool is_old_gc_active();
855   void coalesce_and_fill_old_regions();
856   void adjust_generation_sizes_for_next_cycle(size_t old_xfer_limit, size_t young_cset_regions, size_t old_cset_regions);
857 
858 // ---------- Helper functions
859 //
860 public:
861   template <class T>
862   inline void conc_update_with_forwarded(T* p);
863 
864   template <class T>
865   inline void update_with_forwarded(T* p);
866 
867   static inline void atomic_update_oop(oop update,       oop* addr,       oop compare);
868   static inline void atomic_update_oop(oop update, narrowOop* addr,       oop compare);
869   static inline void atomic_update_oop(oop update, narrowOop* addr, narrowOop compare);
870 
871   static inline bool atomic_update_oop_check(oop update,       oop* addr,       oop compare);
872   static inline bool atomic_update_oop_check(oop update, narrowOop* addr,       oop compare);
873   static inline bool atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare);
874 
875   static inline void atomic_clear_oop(      oop* addr,       oop compare);
876   static inline void atomic_clear_oop(narrowOop* addr,       oop compare);
877   static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
878 
879   size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
880 
881   static inline void increase_object_age(oop obj, uint additional_age);
882   static inline uint get_object_age(oop obj);
883 
884   void transfer_old_pointers_from_satb();
885 
886   void log_heap_status(const char *msg) const;
887 
888 private:
889   void trash_cset_regions();
890 
891 // ---------- Testing helpers functions
892 //
893 private:
894   ShenandoahSharedFlag _inject_alloc_failure;
895 
896   void try_inject_alloc_failure();
897   bool should_inject_alloc_failure();
898 };
899 
900 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP