1 /*
   2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
  26 
  27 #include "gc_implementation/shared/markBitMap.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahAsserts.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahAllocRequest.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahLock.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahEvacOOMHandler.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahPadding.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahSharedVariables.hpp"
  34 
  35 class ConcurrentGCTimer;
  36 
  37 class ShenandoahCollectionSet;
  38 class ShenandoahCollectorPolicy;
  39 class ShenandoahConcurrentMark;
  40 class ShenandoahControlThread;
  41 class ShenandoahGCSession;
  42 class ShenandoahGCStateResetter;
  43 class ShenandoahFreeSet;
  44 class ShenandoahHeapRegion;
  45 class ShenandoahHeapRegionClosure;
  46 class ShenandoahMarkCompact;
  47 class ShenandoahMonitoringSupport;
  48 class ShenandoahHeuristics;
  49 class ShenandoahMarkingContext;
  50 class ShenandoahMode;
  51 class ShenandoahPhaseTimings;
  52 class ShenandoahPacer;
  53 class ShenandoahVerifier;
  54 class ShenandoahWorkGang;
  55 class VMStructs;
  56 
  57 // Used for buffering per-region liveness data.
  58 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
  59 // The ShenandoahHeap array has max-workers elements, each of which is an array of
  60 // uint16_t * max_regions. The choice of uint16_t is not accidental:
  61 // there is a tradeoff between static/dynamic footprint that translates
  62 // into cache pressure (which is already high during marking), and
  63 // too many atomic updates. uint32_t is too large, uint8_t is too small.
  64 typedef uint16_t ShenandoahLiveData;
  65 #define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
  66 
  67 class ShenandoahRegionIterator : public StackObj {
  68 private:
  69   ShenandoahHeap* _heap;
  70 
  71   shenandoah_padding(0);
  72   volatile jint _index;
  73   shenandoah_padding(1);
  74 
  75   // No implicit copying: iterators should be passed by reference to capture the state
  76   ShenandoahRegionIterator(const ShenandoahRegionIterator& that);
  77   ShenandoahRegionIterator& operator=(const ShenandoahRegionIterator& o);
  78 
  79 public:
  80   ShenandoahRegionIterator();
  81   ShenandoahRegionIterator(ShenandoahHeap* heap);
  82 
  83   // Reset iterator to default state
  84   void reset();
  85 
  86   // Returns next region, or NULL if there are no more regions.
  87   // This is multi-thread-safe.
  88   inline ShenandoahHeapRegion* next();
  89 
  90   // This is *not* MT safe. However, in the absence of multithreaded access, it
  91   // can be used to determine if there is more work to do.
  92   bool has_next() const;
  93 };
  94 
  95 class ShenandoahHeapRegionClosure : public StackObj {
  96 public:
  97   virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
  98   virtual bool is_thread_safe() { return false; }
  99 };
 100 
 101 typedef ShenandoahLock    ShenandoahHeapLock;
 102 typedef ShenandoahLocker  ShenandoahHeapLocker;
 103 
 104 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
 105 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
 106 // See ShenandoahControlThread for GC cycle structure.
 107 //
 108 class ShenandoahHeap : public SharedHeap {
 109   friend class ShenandoahAsserts;
 110   friend class VMStructs;
 111   friend class ShenandoahGCSession;
 112   friend class ShenandoahGCStateResetter;
 113   friend class ShenandoahSafepoint;
 114 
 115 // ---------- Locks that guard important data structures in Heap
 116 //
 117 private:
 118   ShenandoahHeapLock _lock;
 119 
 120 public:
 121   ShenandoahHeapLock* lock() {
 122     return &_lock;
 123   }
 124 
 125 // ---------- Initialization, termination, identification, printing routines
 126 //
 127 private:
 128   static ShenandoahHeap* _heap;
 129 
 130 public:
 131   static ShenandoahHeap* heap();
 132   static size_t conservative_max_heap_alignment();
 133 
 134   const char* name()          const { return "Shenandoah"; }
 135   ShenandoahHeap::Name kind() const { return CollectedHeap::ShenandoahHeap; }
 136 
 137   ShenandoahHeap(ShenandoahCollectorPolicy* policy);
 138   jint initialize();
 139   void post_initialize();
 140   void initialize_heuristics();
 141 
 142   void print_on(outputStream* st)               const;
 143   void print_extended_on(outputStream *st)      const;
 144   void print_tracing_info()                     const;
 145   void print_gc_threads_on(outputStream* st)    const;
 146   void print_heap_regions_on(outputStream* st)  const;
 147 
 148   void stop();
 149 
 150   void prepare_for_verify();
 151   void verify(bool silent, VerifyOption vo);
 152 
 153 // ---------- Heap counters and metrics
 154 //
 155 private:
 156            size_t _initial_size;
 157            size_t _minimum_size;
 158   volatile size_t _soft_max_size;
 159   shenandoah_padding(0);
 160   volatile jlong  _used;
 161   volatile size_t _committed;
 162   volatile jlong  _bytes_allocated_since_gc_start;
 163   shenandoah_padding(1);
 164 
 165 public:
 166   void increase_used(size_t bytes);
 167   void decrease_used(size_t bytes);
 168   void set_used(size_t bytes);
 169 
 170   void increase_committed(size_t bytes);
 171   void decrease_committed(size_t bytes);
 172   void increase_allocated(size_t bytes);
 173 
 174   size_t bytes_allocated_since_gc_start();
 175   void reset_bytes_allocated_since_gc_start();
 176 
 177   size_t min_capacity()      const;
 178   size_t max_capacity()      const;
 179   size_t soft_max_capacity() const;
 180   size_t initial_capacity()  const;
 181   size_t capacity()          const;
 182   size_t used()              const;
 183   size_t committed()         const;
 184 
 185   void set_soft_max_capacity(size_t v);
 186 
 187 // ---------- Workers handling
 188 //
 189 private:
 190   uint _max_workers;
 191 
 192 public:
 193   uint max_workers();
 194   void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
 195 
 196   ShenandoahWorkGang* workers() const;
 197 
 198   void gc_threads_do(ThreadClosure* tcl) const;
 199 
 200 // ---------- Heap regions handling machinery
 201 //
 202 private:
 203   MemRegion _heap_region;
 204   bool      _heap_region_special;
 205   size_t    _num_regions;
 206   ShenandoahHeapRegion** _regions;
 207   ShenandoahRegionIterator _update_refs_iterator;
 208 
 209 public:
 210   inline size_t num_regions() const { return _num_regions; }
 211   inline bool is_heap_region_special() { return _heap_region_special; }
 212 
 213   inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
 214   inline size_t heap_region_index_containing(const void* addr) const;
 215 
 216   inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
 217 
 218   void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
 219   void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
 220 
 221 // ---------- GC state machinery
 222 //
 223 // GC state describes the important parts of collector state, that may be
 224 // used to make barrier selection decisions in the native and generated code.
 225 // Multiple bits can be set at once.
 226 //
 227 // Important invariant: when GC state is zero, the heap is stable, and no barriers
 228 // are required.
 229 //
 230 public:
 231   enum GCStateBitPos {
 232     // Heap has forwarded objects: need RB, ACMP, CAS barriers.
 233     HAS_FORWARDED_BITPOS   = 0,
 234 
 235     // Heap is under marking: needs SATB barriers.
 236     MARKING_BITPOS    = 1,
 237 
 238     // Heap is under evacuation: needs WB barriers. (Set together with UNSTABLE)
 239     EVACUATION_BITPOS = 2,
 240 
 241     // Heap is under updating: needs SVRB/SVWB barriers.
 242     UPDATEREFS_BITPOS = 3,
 243   };
 244 
 245   enum GCState {
 246     STABLE        = 0,
 247     HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
 248     MARKING       = 1 << MARKING_BITPOS,
 249     EVACUATION    = 1 << EVACUATION_BITPOS,
 250     UPDATEREFS    = 1 << UPDATEREFS_BITPOS,
 251   };
 252 
 253 private:
 254   ShenandoahSharedBitmap _gc_state;
 255   ShenandoahSharedFlag   _degenerated_gc_in_progress;
 256   ShenandoahSharedFlag   _full_gc_in_progress;
 257   ShenandoahSharedFlag   _full_gc_move_in_progress;
 258   ShenandoahSharedFlag   _progress_last_gc;
 259 
 260   void set_gc_state_mask(uint mask, bool value);
 261 
 262 public:
 263   char gc_state();
 264   static address gc_state_addr();
 265 
 266   void set_concurrent_mark_in_progress(bool in_progress);
 267   void set_evacuation_in_progress(bool in_progress);
 268   void set_update_refs_in_progress(bool in_progress);
 269   void set_degenerated_gc_in_progress(bool in_progress);
 270   void set_full_gc_in_progress(bool in_progress);
 271   void set_full_gc_move_in_progress(bool in_progress);
 272   void set_has_forwarded_objects(bool cond);
 273 
 274   inline bool is_stable() const;
 275   inline bool is_idle() const;
 276   inline bool is_concurrent_mark_in_progress() const;
 277   inline bool is_update_refs_in_progress() const;
 278   inline bool is_evacuation_in_progress() const;
 279   inline bool is_degenerated_gc_in_progress() const;
 280   inline bool is_full_gc_in_progress() const;
 281   inline bool is_full_gc_move_in_progress() const;
 282   inline bool has_forwarded_objects() const;
 283   inline bool is_gc_in_progress_mask(uint mask) const;
 284 
 285 // ---------- GC cancellation and degeneration machinery
 286 //
 287 // Cancelled GC flag is used to notify concurrent phases that they should terminate.
 288 //
 289 public:
 290   enum ShenandoahDegenPoint {
 291     _degenerated_unset,
 292     _degenerated_outside_cycle,
 293     _degenerated_mark,
 294     _degenerated_evac,
 295     _degenerated_updaterefs,
 296     _DEGENERATED_LIMIT
 297   };
 298 
 299   static const char* degen_point_to_string(ShenandoahDegenPoint point) {
 300     switch (point) {
 301       case _degenerated_unset:
 302         return "<UNSET>";
 303       case _degenerated_outside_cycle:
 304         return "Outside of Cycle";
 305       case _degenerated_mark:
 306         return "Mark";
 307       case _degenerated_evac:
 308         return "Evacuation";
 309       case _degenerated_updaterefs:
 310         return "Update Refs";
 311       default:
 312         ShouldNotReachHere();
 313         return "ERROR";
 314     }
 315   };
 316 
 317 private:
 318   ShenandoahSharedFlag _cancelled_gc;
 319   inline bool try_cancel_gc();
 320 
 321 public:
 322   static address cancelled_gc_addr();
 323 
 324   inline bool cancelled_gc() const;
 325 
 326   inline void clear_cancelled_gc();
 327 
 328   void cancel_gc(GCCause::Cause cause);
 329 
 330 // ---------- GC operations entry points
 331 //
 332 public:
 333   // Entry points to STW GC operations, these cause a related safepoint, that then
 334   // call the entry method below
 335   void vmop_entry_init_mark();
 336   void vmop_entry_final_mark();
 337   void vmop_entry_init_updaterefs();
 338   void vmop_entry_final_updaterefs();
 339   void vmop_entry_full(GCCause::Cause cause);
 340   void vmop_degenerated(ShenandoahDegenPoint point);
 341 
 342   // Entry methods to normally STW GC operations. These set up logging, monitoring
 343   // and workers for net VM operation
 344   void entry_init_mark();
 345   void entry_final_mark();
 346   void entry_init_updaterefs();
 347   void entry_final_updaterefs();
 348   void entry_full(GCCause::Cause cause);
 349   void entry_degenerated(int point);
 350 
 351   // Entry methods to normally concurrent GC operations. These set up logging, monitoring
 352   // for concurrent operation.
 353   void entry_reset();
 354   void entry_mark();
 355   void entry_preclean();
 356   void entry_cleanup_early();
 357   void entry_evac();
 358   void entry_updaterefs();
 359   void entry_cleanup_complete();
 360   void entry_uncommit(double shrink_before, size_t shrink_until);
 361 
 362 private:
 363   // Actual work for the phases
 364   void op_init_mark();
 365   void op_final_mark();
 366   void op_init_updaterefs();
 367   void op_final_updaterefs();
 368   void op_full(GCCause::Cause cause);
 369   void op_degenerated(ShenandoahDegenPoint point);
 370   void op_degenerated_fail();
 371   void op_degenerated_futile();
 372 
 373   void op_reset();
 374   void op_mark();
 375   void op_preclean();
 376   void op_cleanup_early();
 377   void op_conc_evac();
 378   void op_stw_evac();
 379   void op_updaterefs();
 380   void op_cleanup_complete();
 381   void op_uncommit(double shrink_before, size_t shrink_until);
 382 
 383   // Messages for GC trace event, they have to be immortal for
 384   // passing around the logging/tracing systems
 385   const char* init_mark_event_message() const;
 386   const char* final_mark_event_message() const;
 387   const char* conc_mark_event_message() const;
 388   const char* degen_event_message(ShenandoahDegenPoint point) const;
 389 
 390 // ---------- GC subsystems
 391 //
 392 private:
 393   ShenandoahControlThread*   _control_thread;
 394   ShenandoahCollectorPolicy* _shenandoah_policy;
 395   ShenandoahMode*            _gc_mode;
 396   ShenandoahHeuristics*      _heuristics;
 397   ShenandoahFreeSet*         _free_set;
 398   ShenandoahConcurrentMark*  _scm;
 399   ShenandoahMarkCompact*     _full_gc;
 400   ShenandoahPacer*           _pacer;
 401   ShenandoahVerifier*        _verifier;
 402 
 403   ShenandoahPhaseTimings*    _phase_timings;
 404 
 405   ShenandoahControlThread*   control_thread()          { return _control_thread;    }
 406   ShenandoahMarkCompact*     full_gc()                 { return _full_gc;           }
 407 
 408 public:
 409   ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
 410   ShenandoahHeuristics*      heuristics()        const { return _heuristics;        }
 411   ShenandoahFreeSet*         free_set()          const { return _free_set;          }
 412   ShenandoahConcurrentMark*  concurrent_mark()         { return _scm;               }
 413   ShenandoahPacer*           pacer()             const { return _pacer;             }
 414 
 415   ShenandoahPhaseTimings*    phase_timings()     const { return _phase_timings;     }
 416 
 417   ShenandoahVerifier*        verifier();
 418 
 419 // ---------- VM subsystem bindings
 420 //
 421 private:
 422   ShenandoahMonitoringSupport* _monitoring_support;
 423   ConcurrentGCTimer* _gc_timer;
 424 
 425 public:
 426   ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; }
 427 
 428   GCTracer* tracer();
 429   GCTimer* gc_timer() const;
 430   CollectorPolicy* collector_policy() const;
 431 
 432 // ---------- Reference processing
 433 //
 434 private:
 435   ReferenceProcessor*  _ref_processor;
 436   ShenandoahSharedFlag _process_references;
 437 
 438   void ref_processing_init();
 439 
 440 public:
 441   ReferenceProcessor* ref_processor() { return _ref_processor;}
 442   void set_process_references(bool pr);
 443   bool process_references() const;
 444 
 445 // ---------- Class Unloading
 446 //
 447 private:
 448   ShenandoahSharedFlag _unload_classes;
 449 
 450 public:
 451   void set_unload_classes(bool uc);
 452   bool unload_classes() const;
 453 
 454   // Delete entries for dead interned string and clean up unreferenced symbols
 455   // in symbol table, possibly in parallel.
 456   void unload_classes_and_cleanup_tables(bool full_gc);
 457 
 458 // ---------- Generic interface hooks
 459 // Minor things that super-interface expects us to implement to play nice with
 460 // the rest of runtime. Some of the things here are not required to be implemented,
 461 // and can be stubbed out.
 462 //
 463 public:
 464   AdaptiveSizePolicy* size_policy() shenandoah_not_implemented_return(NULL);
 465   bool is_maximal_no_gc() const shenandoah_not_implemented_return(false);
 466 
 467   bool is_in(const void* p) const;
 468 
 469   // All objects can potentially move
 470   bool is_scavengable(const void* addr) { return true; }
 471 
 472   void collect(GCCause::Cause cause);
 473   void do_full_collection(bool clear_all_soft_refs);
 474 
 475   // Used for parsing heap during error printing
 476   HeapWord* block_start(const void* addr) const;
 477   size_t block_size(const HeapWord* addr) const;
 478   bool block_is_obj(const HeapWord* addr) const;
 479 
 480   // Used for native heap walkers: heap dumpers, mostly
 481   void object_iterate(ObjectClosure* cl);
 482   void safe_object_iterate(ObjectClosure* cl);
 483   void space_iterate(SpaceClosure* scl) shenandoah_not_implemented;
 484   void oop_iterate(ExtendedOopClosure* cl);
 485   Space* space_containing(const void* oop) const shenandoah_not_implemented_return(NULL);
 486 
 487   // Used by RMI
 488   jlong millis_since_last_gc();
 489 
 490   bool can_elide_tlab_store_barriers() const                  { return true;    }
 491   oop new_store_pre_barrier(JavaThread* thread, oop new_obj)  { return new_obj; }
 492   bool can_elide_initializing_store_barrier(oop new_obj)      { return true;    }
 493   bool card_mark_must_follow_store() const                    { return false;   }
 494 
 495   bool is_in_partial_collection(const void* p) shenandoah_not_implemented_return(false);
 496   bool supports_heap_inspection() const { return true; }
 497 
 498   void gc_prologue(bool b);
 499   void gc_epilogue(bool b);
 500 
 501   void acquire_pending_refs_lock();
 502   void release_pending_refs_lock();
 503 
 504 // ---------- Code roots handling hooks
 505 //
 506 public:
 507   void register_nmethod(nmethod* nm);
 508   void unregister_nmethod(nmethod* nm);
 509 
 510 // ---------- Pinning hooks
 511 //
 512 public:
 513   // Shenandoah supports per-object (per-region) pinning
 514   bool supports_object_pinning() const { return true; }
 515 
 516   oop pin_object(JavaThread* thread, oop obj);
 517   void unpin_object(JavaThread* thread, oop obj);
 518 
 519   void sync_pinned_region_status();
 520   void assert_pinned_region_status() NOT_DEBUG_RETURN;
 521 
 522 // ---------- Allocation support
 523 //
 524 private:
 525   HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
 526   inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
 527   HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
 528   HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
 529 
 530 public:
 531   HeapWord* allocate_memory(ShenandoahAllocRequest& request);
 532   HeapWord* mem_allocate(size_t size, bool* what);
 533 
 534   void notify_mutator_alloc_words(size_t words, bool waste);
 535 
 536   // Shenandoah supports TLAB allocation
 537   bool supports_tlab_allocation() const { return true; }
 538 
 539   HeapWord* allocate_new_tlab(size_t word_size);
 540   size_t tlab_capacity(Thread *thr) const;
 541   size_t unsafe_max_tlab_alloc(Thread *thread) const;
 542   size_t max_tlab_size() const;
 543   size_t tlab_used(Thread* ignored) const;
 544 
 545   void resize_tlabs();
 546   void resize_all_tlabs();
 547 
 548   void accumulate_statistics_tlabs();
 549   void accumulate_statistics_all_gclabs();
 550 
 551   void make_parsable(bool retire_tlabs);
 552   void ensure_parsability(bool retire_tlabs);
 553 
 554 // ---------- Marking support
 555 //
 556 private:
 557   ShenandoahMarkingContext* _marking_context;
 558   MemRegion _bitmap_region;
 559   MemRegion _aux_bitmap_region;
 560   MarkBitMap _verification_bit_map;
 561   MarkBitMap _aux_bit_map;
 562 
 563   size_t _bitmap_size;
 564   size_t _bitmap_regions_per_slice;
 565   size_t _bitmap_bytes_per_slice;
 566 
 567   size_t _pretouch_heap_page_size;
 568   size_t _pretouch_bitmap_page_size;
 569 
 570   bool _bitmap_region_special;
 571   bool _aux_bitmap_region_special;
 572 
 573   ShenandoahLiveData** _liveness_cache;
 574 
 575 public:
 576   inline ShenandoahMarkingContext* complete_marking_context() const;
 577   inline ShenandoahMarkingContext* marking_context() const;
 578   inline void mark_complete_marking_context();
 579   inline void mark_incomplete_marking_context();
 580 
 581   template<class T>
 582   inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
 583 
 584   template<class T>
 585   inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
 586 
 587   template<class T>
 588   inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
 589 
 590   void reset_mark_bitmap();
 591 
 592   // SATB barriers hooks
 593   inline bool requires_marking(const void* entry) const;
 594   void force_satb_flush_all_threads();
 595 
 596   // Support for bitmap uncommits
 597   bool commit_bitmap_slice(ShenandoahHeapRegion *r);
 598   bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
 599   bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
 600 
 601   // Liveness caching support
 602   ShenandoahLiveData* get_liveness_cache(uint worker_id);
 603   void flush_liveness_cache(uint worker_id);
 604 
 605   size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
 606 
 607 // ---------- Evacuation support
 608 //
 609 private:
 610   ShenandoahCollectionSet* _collection_set;
 611   ShenandoahEvacOOMHandler _oom_evac_handler;
 612 
 613   void evacuate_and_update_roots();
 614 
 615 public:
 616   static address in_cset_fast_test_addr();
 617 
 618   ShenandoahCollectionSet* collection_set() const { return _collection_set; }
 619 
 620   // Checks if object is in the collection set.
 621   inline bool in_collection_set(oop obj) const;
 622 
 623   // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
 624   inline bool in_collection_set_loc(void* loc) const;
 625 
 626   // Evacuates object src. Returns the evacuated object, either evacuated
 627   // by this thread, or by some other thread.
 628   inline oop  evacuate_object(oop src, Thread* thread);
 629 
 630   // Call before/after evacuation.
 631   void enter_evacuation();
 632   void leave_evacuation();
 633 
 634 // ---------- Helper functions
 635 //
 636 public:
 637   template <class T>
 638   inline oop evac_update_with_forwarded(T* p);
 639 
 640   template <class T>
 641   inline oop maybe_update_with_forwarded(T* p);
 642 
 643   template <class T>
 644   inline oop maybe_update_with_forwarded_not_null(T* p, oop obj);
 645 
 646   template <class T>
 647   inline oop update_with_forwarded_not_null(T* p, oop obj);
 648 
 649   static inline oop cas_oop(oop n, narrowOop* addr, oop c);
 650   static inline oop cas_oop(oop n, oop* addr, oop c);
 651   static inline oop cas_oop(oop n, narrowOop* addr, narrowOop c);
 652 
 653   void trash_humongous_region_at(ShenandoahHeapRegion *r);
 654 
 655   void complete_marking();
 656 
 657 private:
 658   void trash_cset_regions();
 659   void update_heap_references(bool concurrent);
 660 
 661 // ---------- Testing helpers functions
 662 //
 663 private:
 664   ShenandoahSharedFlag _inject_alloc_failure;
 665 
 666   void try_inject_alloc_failure();
 667   bool should_inject_alloc_failure();
 668 };
 669 
 670 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP