1 /*
   2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBERED_HPP
  26 #define SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBERED_HPP
  27 
  28 // Terminology used within this source file:
  29 //
  30 // Card Entry:   This is the information that identifies whether a
  31 //               particular card-table entry is Clean or Dirty.  A clean
  32 //               card entry denotes that the associated memory does not
  33 //               hold references to young-gen memory.
  34 //
  35 // Card Region, aka
  36 // Card Memory:  This is the region of memory that is assocated with a
  37 //               particular card entry.
  38 //
  39 // Card Cluster: A card cluster represents 64 card entries.  A card
  40 //               cluster is the minimal amount of work performed at a
  41 //               time by a parallel thread.  Note that the work required
  42 //               to scan a card cluster is somewhat variable in that the
  43 //               required effort depends on how many cards are dirty, how
  44 //               many references are held within the objects that span a
  45 //               DIRTY card's memory, and on the size of the object
  46 //               that spans the end of a DIRTY card's memory (because
  47 //               that object, if it's not an array, may need to be scanned in
  48 //               its entirety, when the object is imprecisely dirtied. Imprecise
  49 //               dirtying is when the card corresponding to the object header
  50 //               is dirtied, rather than the card on which the updated field lives).
  51 //               To better balance work amongst them, parallel worker threads dynamically
  52 //               claim clusters and are flexible in the number of clusters they
  53 //               process.
  54 //
  55 // A cluster represents a "natural" quantum of work to be performed by
  56 // a parallel GC thread's background remembered set scanning efforts.
  57 // The notion of cluster is similar to the notion of stripe in the
  58 // implementation of parallel GC card scanning.  However, a cluster is
  59 // typically smaller than a stripe, enabling finer grain division of
  60 // labor between multiple threads, and potentially better load balancing
  61 // when dirty cards are not uniformly distributed in the heap, as is often
  62 // the case with generational workloads where more recently promoted objects
  63 // may be dirtied more frequently that older objects.
  64 //
  65 // For illustration, consider the following possible JVM configurations:
  66 //
  67 //   Scenario 1:
  68 //     RegionSize is 128 MB
  69 //     Span of a card entry is 512 B
  70 //     Each card table entry consumes 1 B
  71 //     Assume one long word (8 B)of the card table represents a cluster.
  72 //       This long word holds 8 card table entries, spanning a
  73 //       total of 8*512 B = 4 KB of the heap
  74 //     The number of clusters per region is 128 MB / 4 KB = 32 K
  75 //
  76 //   Scenario 2:
  77 //     RegionSize is 128 MB
  78 //     Span of each card entry is 128 B
  79 //     Each card table entry consumes 1 bit
  80 //     Assume one int word (4 B) of the card table represents a cluster.
  81 //       This int word holds 32 b/1 b = 32 card table entries, spanning a
  82 //       total of 32 * 128 B = 4 KB of the heap
  83 //     The number of clusters per region is 128 MB / 4 KB = 32 K
  84 //
  85 //   Scenario 3:
  86 //     RegionSize is 128 MB
  87 //     Span of each card entry is 512 B
  88 //     Each card table entry consumes 1 bit
  89 //     Assume one long word (8 B) of card table represents a cluster.
  90 //       This long word holds 64 b/ 1 b = 64 card table entries, spanning a
  91 //       total of 64 * 512 B = 32 KB of the heap
  92 //     The number of clusters per region is 128 MB / 32 KB = 4 K
  93 //
  94 // At the start of a new young-gen concurrent mark pass, the gang of
  95 // Shenandoah worker threads collaborate in performing the following
  96 // actions:
  97 //
  98 //  Let old_regions = number of ShenandoahHeapRegion comprising
  99 //    old-gen memory
 100 //  Let region_size = ShenandoahHeapRegion::region_size_bytes()
 101 //    represent the number of bytes in each region
 102 //  Let clusters_per_region = region_size / 512
 103 //  Let rs represent the relevant RememberedSet implementation
 104 //    (an instance of ShenandoahDirectCardMarkRememberedSet or an instance
 105 //     of a to-be-implemented ShenandoahBufferWithSATBRememberedSet)
 106 //
 107 //  for each ShenandoahHeapRegion old_region in the whole heap
 108 //    determine the cluster number of the first cluster belonging
 109 //      to that region
 110 //    for each cluster contained within that region
 111 //      Assure that exactly one worker thread processes each
 112 //      cluster, each thread making a series of invocations of the
 113 //      following:
 114 //
 115 //        rs->process_clusters(worker_id, ReferenceProcessor *,
 116 //                             ShenandoahConcurrentMark *, cluster_no, cluster_count,
 117 //                             HeapWord *end_of_range, OopClosure *oops);
 118 //
 119 //  For efficiency, divide up the clusters so that different threads
 120 //  are responsible for processing different clusters.  Processing costs
 121 //  may vary greatly between clusters for the following reasons:
 122 //
 123 //        a) some clusters contain mostly dirty cards and other
 124 //           clusters contain mostly clean cards
 125 //        b) some clusters contain mostly primitive data and other
 126 //           clusters contain mostly reference data
 127 //        c) some clusters are spanned by very large non-array objects that
 128 //           begin in some other cluster.  When a large non-array object
 129 //           beginning in a preceding cluster spans large portions of
 130 //           this cluster, then because of imprecise dirtying, the
 131 //           portion of the object in this cluster may be clean, but
 132 //           will need to be processed by the worker responsible for
 133 //           this cluster, potentially increasing its work.
 134 //        d) in the case that the end of this cluster is spanned by a
 135 //           very large non-array object, the worker for this cluster will
 136 //           be responsible for processing the portion of the object
 137 //           in this cluster.
 138 //
 139 // Though an initial division of labor between marking threads may
 140 // assign equal numbers of clusters to be scanned by each thread, it
 141 // should be expected that some threads will finish their assigned
 142 // work before others.  Therefore, some amount of the full remembered
 143 // set scanning effort should be held back and assigned incrementally
 144 // to the threads that end up with excess capacity.  Consider the
 145 // following strategy for dividing labor:
 146 //
 147 //        1. Assume there are 8 marking threads and 1024 remembered
 148 //           set clusters to be scanned.
 149 //        2. Assign each thread to scan 64 clusters.  This leaves
 150 //           512 (1024 - (8*64)) clusters to still be scanned.
 151 //        3. As the 8 server threads complete previous cluster
 152 //           scanning assignments, issue each of the next 8 scanning
 153 //           assignments as units of 32 additional cluster each.
 154 //           In the case that there is high variance in effort
 155 //           associated with previous cluster scanning assignments,
 156 //           multiples of these next assignments may be serviced by
 157 //           the server threads that were previously assigned lighter
 158 //           workloads.
 159 //        4. Make subsequent scanning assignments as follows:
 160 //             a) 8 assignments of size 16 clusters
 161 //             b) 8 assignments of size 8 clusters
 162 //             c) 16 assignments of size 4 clusters
 163 //
 164 //    When there is no more remembered set processing work to be
 165 //    assigned to a newly idled worker thread, that thread can move
 166 //    on to work on other tasks associated with root scanning until such
 167 //    time as all clusters have been examined.
 168 //
 169 // Remembered set scanning is designed to run concurrently with
 170 // mutator threads, with multiple concurrent workers. Furthermore, the
 171 // current implementation of remembered set scanning never clears a
 172 // card once it has been marked.
 173 //
 174 // These limitations will be addressed in future enhancements to the
 175 // existing implementation.
 176 
 177 #include "gc/shared/workerThread.hpp"
 178 #include "gc/shenandoah/shenandoahCardStats.hpp"
 179 #include "gc/shenandoah/shenandoahCardTable.hpp"
 180 #include "gc/shenandoah/shenandoahNumberSeq.hpp"
 181 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
 182 #include "memory/iterator.hpp"
 183 #include "utilities/globalDefinitions.hpp"
 184 
 185 class ShenandoahReferenceProcessor;
 186 class ShenandoahConcurrentMark;
 187 class ShenandoahHeap;
 188 class ShenandoahHeapRegion;
 189 class ShenandoahRegionIterator;
 190 class ShenandoahMarkingContext;
 191 
 192 class CardTable;
 193 typedef CardTable::CardValue CardValue;
 194 
 195 class ShenandoahDirectCardMarkRememberedSet: public CHeapObj<mtGC> {
 196 
 197 private:
 198 
 199   // Use symbolic constants defined in cardTable.hpp
 200   //  CardTable::card_shift = 9;
 201   //  CardTable::card_size = 512;
 202   //  CardTable::card_size_in_words = 64;
 203   //  CardTable::clean_card_val()
 204   //  CardTable::dirty_card_val()
 205 
 206   const size_t LogCardValsPerIntPtr;    // the number of card values (entries) in an intptr_t
 207   const size_t LogCardSizeInWords;      // the size of a card in heap word units
 208 
 209   ShenandoahHeap *_heap;
 210   ShenandoahCardTable *_card_table;
 211   size_t _card_shift;
 212   size_t _total_card_count;
 213   size_t _cluster_count;
 214   HeapWord *_whole_heap_base;   // Points to first HeapWord of data contained within heap memory
 215   CardValue* _byte_map;         // Points to first entry within the card table
 216   CardValue* _byte_map_base;    // Points to byte_map minus the bias computed from address of heap memory
 217 
 218 public:
 219 
 220   // count is the number of cards represented by the card table.
 221   ShenandoahDirectCardMarkRememberedSet(ShenandoahCardTable *card_table, size_t total_card_count);
 222 
 223   // Card index is zero-based relative to _byte_map.
 224   size_t last_valid_index() const;
 225   size_t total_cards() const;
 226   size_t card_index_for_addr(HeapWord *p) const;
 227   HeapWord *addr_for_card_index(size_t card_index) const;
 228   inline const CardValue* get_card_table_byte_map(bool write_table) const;
 229   inline bool is_card_dirty(size_t card_index) const;
 230   inline bool is_write_card_dirty(size_t card_index) const;
 231   inline void mark_card_as_dirty(size_t card_index);
 232   inline void mark_range_as_dirty(size_t card_index, size_t num_cards);
 233   inline void mark_card_as_clean(size_t card_index);
 234   inline void mark_range_as_clean(size_t card_index, size_t num_cards);
 235   inline bool is_card_dirty(HeapWord *p) const;
 236   inline void mark_card_as_dirty(HeapWord *p);
 237   inline void mark_range_as_dirty(HeapWord *p, size_t num_heap_words);
 238   inline void mark_card_as_clean(HeapWord *p);
 239   inline void mark_range_as_clean(HeapWord *p, size_t num_heap_words);
 240   inline size_t cluster_count() const;
 241 
 242   // Called by GC thread at start of concurrent mark to exchange roles of read and write remembered sets.
 243   // Not currently used because mutator write barrier does not honor changes to the location of card table.
 244   // Instead of swap_remset, the current implementation of concurrent remembered set scanning does reset_remset
 245   // in parallel threads, each invocation processing one entire HeapRegion at a time.
 246   void swap_remset() {  _card_table->swap_card_tables(); }
 247 
 248   // Merge any dirty values from write table into the read table, while leaving
 249   // the write table unchanged.
 250   void merge_write_table(HeapWord* start, size_t word_count);
 251 
 252   // Destructively copy the write table to the read table, and clean the write table.
 253   void reset_remset(HeapWord* start, size_t word_count);
 254 
 255   // Called by GC thread after scanning old remembered set in order to prepare for next GC pass
 256   void clear_old_remset() {  _card_table->clear_read_table(); }
 257 };
 258 
 259 // A ShenandoahCardCluster represents the minimal unit of work
 260 // performed by independent parallel GC threads during scanning of
 261 // remembered sets.
 262 //
 263 // The GC threads that perform card-table remembered set scanning may
 264 // overwrite card-table entries to mark them as clean in the case that
 265 // the associated memory no longer holds references to young-gen
 266 // memory.  Rather than access the card-table entries directly, all GC
 267 // thread access to card-table information is made by way of the
 268 // ShenandoahCardCluster data abstraction.  This abstraction
 269 // effectively manages access to multiple possible underlying
 270 // remembered set implementations, including a traditional card-table
 271 // approach and a SATB-based approach.
 272 //
 273 // The API services represent a compromise between efficiency and
 274 // convenience.
 275 //
 276 // Multiple GC threads that scan the remembered set
 277 // in parallel.  The desire is to divide the complete scanning effort
 278 // into multiple clusters of work that can be independently processed
 279 // by individual threads without need for synchronizing efforts
 280 // between the work performed by each task.  The term "cluster" of
 281 // work is similar to the term "stripe" as used in the implementation
 282 // of Parallel GC.
 283 //
 284 // Complexity arises when an object to be scanned crosses the boundary
 285 // between adjacent cluster regions.  Here is the protocol that we currently
 286 // follow:
 287 //
 288 //  1. The thread responsible for scanning the cards in a cluster modifies
 289 //     the associated card-table entries. Only cards that are dirty are
 290 //     processed, except as described below for the case of objects that
 291 //     straddle more than one card.
 292 //  2. Object Arrays are precisely dirtied, so only the portion of the obj-array
 293 //     that overlaps the range of dirty cards in its cluster are scanned
 294 //     by each worker thread. This holds for portions of obj-arrays that extend
 295 //     over clusters processed by different workers, with each worked responsible
 296 //     for scanning the portion of the obj-array overlapping the dirty cards in
 297 //     its cluster.
 298 //  3. Non-array objects are precisely dirtied by the interpreter and the compilers
 299 //     For such objects that extend over multiple cards, or even multiple clusters,
 300 //     the entire object is scanned by the worker that processes the (dirty) card on
 301 //     which the object's header lies. (However, GC workers should precisely dirty the
 302 //     cards with inter-regional/inter-generational pointers in the body of this object,
 303 //     thus making subsequent scans potentially less expensive.) Such larger non-array
 304 //     objects are relatively rare.
 305 //
 306 //  A possible criticism:
 307 //  C. The representation of pointer location descriptive information
 308 //     within Klass representations is not designed for efficient
 309 //     "random access".  An alternative approach to this design would
 310 //     be to scan very large objects multiple times, once for each
 311 //     cluster that is spanned by the object's range.  This reduces
 312 //     unnecessary overscan, but it introduces different sorts of
 313 //     overhead effort:
 314 //       i) For each spanned cluster, we have to look up the start of
 315 //          the crossing object.
 316 //      ii) Each time we scan the very large object, we have to
 317 //          sequentially walk through its pointer location
 318 //          descriptors, skipping over all of the pointers that
 319 //          precede the start of the range of addresses that we
 320 //          consider relevant.
 321 
 322 
 323 // Because old-gen heap memory is not necessarily contiguous, and
 324 // because cards are not necessarily maintained for young-gen memory,
 325 // consecutive card numbers do not necessarily correspond to consecutive
 326 // address ranges.  For the traditional direct-card-marking
 327 // implementation of this interface, consecutive card numbers are
 328 // likely to correspond to contiguous regions of memory, but this
 329 // should not be assumed.  Instead, rely only upon the following:
 330 //
 331 //  1. All card numbers for cards pertaining to the same
 332 //     ShenandoahHeapRegion are consecutively numbered.
 333 //  2. In the case that neighboring ShenandoahHeapRegions both
 334 //     represent old-gen memory, the card regions that span the
 335 //     boundary between these neighboring heap regions will be
 336 //     consecutively numbered.
 337 //  3. (A corollary) In the case that an old-gen object straddles the
 338 //     boundary between two heap regions, the card regions that
 339 //     correspond to the span of this object will be consecutively
 340 //     numbered.
 341 //
 342 // ShenandoahCardCluster abstracts access to the remembered set
 343 // and also keeps track of crossing map information to allow efficient
 344 // resolution of object start addresses.
 345 //
 346 // ShenandoahCardCluster supports all of the services of
 347 // RememberedSet, plus it supports register_object() and lookup_object().
 348 // Note that we only need to register the start addresses of the object that
 349 // overlays the first address of a card; we need to do this for every card.
 350 // In other words, register_object() checks if the object crosses a card boundary,
 351 // and updates the offset value for each card that the object crosses into.
 352 // For objects that don't straddle cards, nothing needs to be done.
 353 //
 354 // The RememberedSet template parameter is intended to represent either
 355 //     ShenandoahDirectCardMarkRememberedSet, or a to-be-implemented
 356 //     ShenandoahBufferWithSATBRememberedSet.
 357 template<typename RememberedSet>
 358 class ShenandoahCardCluster: public CHeapObj<mtGC> {
 359 
 360 private:
 361   RememberedSet *_rs;
 362 
 363 public:
 364   static const size_t CardsPerCluster = 64;
 365 
 366 private:
 367   typedef struct cross_map { uint8_t first; uint8_t last; } xmap;
 368   typedef union crossing_info { uint16_t short_word; xmap offsets; } crossing_info;
 369 
 370   // ObjectStartsInCardRegion bit is set within a crossing_info.offsets.start iff at least one object starts within
 371   // a particular card region.  We pack this bit into start byte under assumption that start byte is accessed less
 372   // frequently than last byte.  This is true when number of clean cards is greater than number of dirty cards.
 373   static const uint16_t ObjectStartsInCardRegion = 0x80;
 374   static const uint16_t FirstStartBits           = 0x7f;
 375 
 376   // Check that we have enough bits to store the largest possible offset into a card for an object start.
 377   // The value for maximum card size is based on the constraints for GCCardSizeInBytes in gc_globals.hpp.
 378   static const int MaxCardSize = NOT_LP64(512) LP64_ONLY(1024);
 379   STATIC_ASSERT((MaxCardSize / HeapWordSize) - 1 <= FirstStartBits);
 380 
 381   crossing_info *object_starts;
 382 
 383 public:
 384   // If we're setting first_start, assume the card has an object.
 385   inline void set_first_start(size_t card_index, uint8_t value) {
 386     object_starts[card_index].offsets.first = ObjectStartsInCardRegion | value;
 387   }
 388 
 389   inline void set_last_start(size_t card_index, uint8_t value) {
 390     object_starts[card_index].offsets.last = value;
 391   }
 392 
 393   inline void set_starts_object_bit(size_t card_index) {
 394     object_starts[card_index].offsets.first |= ObjectStartsInCardRegion;
 395   }
 396 
 397   inline void clear_starts_object_bit(size_t card_index) {
 398     object_starts[card_index].offsets.first &= ~ObjectStartsInCardRegion;
 399   }
 400 
 401   // Returns true iff an object is known to start within the card memory associated with card card_index.
 402   inline bool starts_object(size_t card_index) const {
 403     return (object_starts[card_index].offsets.first & ObjectStartsInCardRegion) != 0;
 404   }
 405 
 406   inline void clear_objects_in_range(HeapWord *addr, size_t num_words) {
 407     size_t card_index = _rs->card_index_for_addr(addr);
 408     size_t last_card_index = _rs->card_index_for_addr(addr + num_words - 1);
 409     while (card_index <= last_card_index)
 410       object_starts[card_index++].short_word = 0;
 411   }
 412 
 413   ShenandoahCardCluster(RememberedSet *rs) {
 414     _rs = rs;
 415     // TODO: We don't really need object_starts entries for every card entry.  We only need these for
 416     // the card entries that correspond to old-gen memory.  But for now, let's be quick and dirty.
 417     object_starts = NEW_C_HEAP_ARRAY(crossing_info, rs->total_cards(), mtGC);
 418     for (size_t i = 0; i < rs->total_cards(); i++) {
 419       object_starts[i].short_word = 0;
 420     }
 421   }
 422 
 423   ~ShenandoahCardCluster() {
 424     FREE_C_HEAP_ARRAY(crossing_info, object_starts);
 425     object_starts = nullptr;
 426   }
 427 
 428   // There is one entry within the object_starts array for each card entry.
 429   //
 430   //  Suppose multiple garbage objects are coalesced during GC sweep
 431   //  into a single larger "free segment".  As each two objects are
 432   //  coalesced together, the start information pertaining to the second
 433   //  object must be removed from the objects_starts array.  If the
 434   //  second object had been been the first object within card memory,
 435   //  the new first object is the object that follows that object if
 436   //  that starts within the same card memory, or NoObject if the
 437   //  following object starts within the following cluster.  If the
 438   //  second object had been the last object in the card memory,
 439   //  replace this entry with the newly coalesced object if it starts
 440   //  within the same card memory, or with NoObject if it starts in a
 441   //  preceding card's memory.
 442   //
 443   //  Suppose a large free segment is divided into a smaller free
 444   //  segment and a new object.  The second part of the newly divided
 445   //  memory must be registered as a new object, overwriting at most
 446   //  one first_start and one last_start entry.  Note that one of the
 447   //  newly divided two objects might be a new GCLAB.
 448   //
 449   //  Suppose postprocessing of a GCLAB finds that the original GCLAB
 450   //  has been divided into N objects.  Each of the N newly allocated
 451   //  objects will be registered, overwriting at most one first_start
 452   //  and one last_start entries.
 453   //
 454   //  No object registration operations are linear in the length of
 455   //  the registered objects.
 456   //
 457   // Consider further the following observations regarding object
 458   // registration costs:
 459   //
 460   //   1. The cost is paid once for each old-gen object (Except when
 461   //      an object is demoted and repromoted, in which case we would
 462   //      pay the cost again).
 463   //   2. The cost can be deferred so that there is no urgency during
 464   //      mutator copy-on-first-access promotion.  Background GC
 465   //      threads will update the object_starts array by post-
 466   //      processing the contents of retired PLAB buffers.
 467   //   3. The bet is that these costs are paid relatively rarely
 468   //      because:
 469   //      a) Most objects die young and objects that die in young-gen
 470   //         memory never need to be registered with the object_starts
 471   //         array.
 472   //      b) Most objects that are promoted into old-gen memory live
 473   //         there without further relocation for a relatively long
 474   //         time, so we get a lot of benefit from each investment
 475   //         in registering an object.
 476 
 477 public:
 478 
 479   // The starting locations of objects contained within old-gen memory
 480   // are registered as part of the remembered set implementation.  This
 481   // information is required when scanning dirty card regions that are
 482   // spanned by objects beginning within preceding card regions.  It
 483   // is necessary to find the first and last objects that begin within
 484   // this card region.  Starting addresses of objects are required to
 485   // find the object headers, and object headers provide information
 486   // about which fields within the object hold addresses.
 487   //
 488   // The old-gen memory allocator invokes register_object() for any
 489   // object that is allocated within old-gen memory.  This identifies
 490   // the starting addresses of objects that span boundaries between
 491   // card regions.
 492   //
 493   // It is not necessary to invoke register_object at the very instant
 494   // an object is allocated.  It is only necessary to invoke it
 495   // prior to the next start of a garbage collection concurrent mark
 496   // or concurrent update-references phase.  An "ideal" time to register
 497   // objects is during post-processing of a GCLAB after the GCLAB is
 498   // retired due to depletion of its memory.
 499   //
 500   // register_object() does not perform synchronization.  In the case
 501   // that multiple threads are registering objects whose starting
 502   // addresses are within the same cluster, races between these
 503   // threads may result in corruption of the object-start data
 504   // structures.  Parallel GC threads should avoid registering objects
 505   // residing within the same cluster by adhering to the following
 506   // coordination protocols:
 507   //
 508   //  1. Align thread-local GCLAB buffers with some TBD multiple of
 509   //     card clusters.  The card cluster size is 32 KB.  If the
 510   //     desired GCLAB size is 128 KB, align the buffer on a multiple
 511   //     of 4 card clusters.
 512   //  2. Post-process the contents of GCLAB buffers to register the
 513   //     objects allocated therein.  Allow one GC thread at a
 514   //     time to do the post-processing of each GCLAB.
 515   //  3. Since only one GC thread at a time is registering objects
 516   //     belonging to a particular allocation buffer, no locking
 517   //     is performed when registering these objects.
 518   //  4. Any remnant of unallocated memory within an expended GC
 519   //     allocation buffer is not returned to the old-gen allocation
 520   //     pool until after the GC allocation buffer has been post
 521   //     processed.  Before any remnant memory is returned to the
 522   //     old-gen allocation pool, the GC thread that scanned this GC
 523   //     allocation buffer performs a write-commit memory barrier.
 524   //  5. Background GC threads that perform tenuring of young-gen
 525   //     objects without a GCLAB use a CAS lock before registering
 526   //     each tenured object.  The CAS lock assures both mutual
 527   //     exclusion and memory coherency/visibility.  Note that an
 528   //     object tenured by a background GC thread will not overlap
 529   //     with any of the clusters that are receiving tenured objects
 530   //     by way of GCLAB buffers.  Multiple independent GC threads may
 531   //     attempt to tenure objects into a shared cluster.  This is why
 532   //     sychronization may be necessary.  Consider the following
 533   //     scenarios:
 534   //
 535   //     a) If two objects are tenured into the same card region, each
 536   //        registration may attempt to modify the first-start or
 537   //        last-start information associated with that card region.
 538   //        Furthermore, because the representations of first-start
 539   //        and last-start information within the object_starts array
 540   //        entry uses different bits of a shared uint_16 to represent
 541   //        each, it is necessary to lock the entire card entry
 542   //        before modifying either the first-start or last-start
 543   //        information within the entry.
 544   //     b) Suppose GC thread X promotes a tenured object into
 545   //        card region A and this tenured object spans into
 546   //        neighboring card region B.  Suppose GC thread Y (not equal
 547   //        to X) promotes a tenured object into cluster B.  GC thread X
 548   //        will update the object_starts information for card A.  No
 549   //        synchronization is required.
 550   //     c) In summary, when background GC threads register objects
 551   //        newly tenured into old-gen memory, they must acquire a
 552   //        mutual exclusion lock on the card that holds the starting
 553   //        address of the newly tenured object.  This can be achieved
 554   //        by using a CAS instruction to assure that the previous
 555   //        values of first-offset and last-offset have not been
 556   //        changed since the same thread inquired as to their most
 557   //        current values.
 558   //
 559   //     One way to minimize the need for synchronization between
 560   //     background tenuring GC threads is for each tenuring GC thread
 561   //     to promote young-gen objects into distinct dedicated cluster
 562   //     ranges.
 563   //  6. The object_starts information is only required during the
 564   //     starting of concurrent marking and concurrent evacuation
 565   //     phases of GC.  Before we start either of these GC phases, the
 566   //     JVM enters a safe point and all GC threads perform
 567   //     commit-write barriers to assure that access to the
 568   //     object_starts information is coherent.
 569 
 570 
 571   // Notes on synchronization of register_object():
 572   //
 573   //  1. For efficiency, there is no locking in the implementation of register_object()
 574   //  2. Thus, it is required that users of this service assure that concurrent/parallel invocations of
 575   //     register_object() do pertain to the same card's memory range.  See discussion below to understand
 576   //     the risks.
 577   //  3. When allocating from a TLAB or GCLAB, the mutual exclusion can be guaranteed by assuring that each
 578   //     LAB's start and end are aligned on card memory boundaries.
 579   //  4. Use the same lock that guarantees exclusivity when performing free-list allocation within heap regions.
 580   //
 581   // Register the newly allocated object while we're holding the global lock since there's no synchronization
 582   // built in to the implementation of register_object().  There are potential races when multiple independent
 583   // threads are allocating objects, some of which might span the same card region.  For example, consider
 584   // a card table's memory region within which three objects are being allocated by three different threads:
 585   //
 586   // objects being "concurrently" allocated:
 587   //    [-----a------][-----b-----][--------------c------------------]
 588   //            [---- card table memory range --------------]
 589   //
 590   // Before any objects are allocated, this card's memory range holds no objects.  Note that:
 591   //   allocation of object a wants to set the has-object, first-start, and last-start attributes of the preceding card region.
 592   //   allocation of object b wants to set the has-object, first-start, and last-start attributes of this card region.
 593   //   allocation of object c also wants to set the has-object, first-start, and last-start attributes of this card region.
 594   //
 595   // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as last-start
 596   // representing object b while first-start represents object c.  This is why we need to require all register_object()
 597   // invocations associated with objects that are allocated from "free lists" to provide their own mutual exclusion locking
 598   // mechanism.
 599 
 600   // Reset the starts_object() information to false for all cards in the range between from and to.
 601   void reset_object_range(HeapWord *from, HeapWord *to);
 602 
 603   // register_object() requires that the caller hold the heap lock
 604   // before calling it.
 605   void register_object(HeapWord* address);
 606 
 607   // register_object_without_lock() does not require that the caller hold
 608   // the heap lock before calling it, under the assumption that the
 609   // caller has assure no other thread will endeavor to concurrently
 610   // register objects that start within the same card's memory region
 611   // as address.
 612   void register_object_without_lock(HeapWord* address);
 613 
 614   // During the reference updates phase of GC, we walk through each old-gen memory region that was
 615   // not part of the collection set and we invalidate all unmarked objects.  As part of this effort,
 616   // we coalesce neighboring dead objects in order to make future remembered set scanning more
 617   // efficient (since future remembered set scanning of any card region containing consecutive
 618   // dead objects can skip over all of them at once by reading only a single dead object header
 619   // instead of having to read the header of each of the coalesced dead objects.
 620   //
 621   // At some future time, we may implement a further optimization: satisfy future allocation requests
 622   // by carving new objects out of the range of memory that represents the coalesced dead objects.
 623   //
 624   // Suppose we want to combine several dead objects into a single coalesced object.  How does this
 625   // impact our representation of crossing map information?
 626   //  1. If the newly coalesced range is contained entirely within a card range, that card's last
 627   //     start entry either remains the same or it is changed to the start of the coalesced region.
 628   //  2. For the card that holds the start of the coalesced object, it will not impact the first start
 629   //     but it may impact the last start.
 630   //  3. For following cards spanned entirely by the newly coalesced object, it will change starts_object
 631   //     to false (and make first-start and last-start "undefined").
 632   //  4. For a following card that is spanned patially by the newly coalesced object, it may change
 633   //     first-start value, but it will not change the last-start value.
 634   //
 635   // The range of addresses represented by the arguments to coalesce_objects() must represent a range
 636   // of memory that was previously occupied exactly by one or more previously registered objects.  For
 637   // convenience, it is legal to invoke coalesce_objects() with arguments that span a single previously
 638   // registered object.
 639   //
 640   // The role of coalesce_objects is to change the crossing map information associated with all of the coalesced
 641   // objects.
 642   void coalesce_objects(HeapWord* address, size_t length_in_words);
 643 
 644   // The typical use case is going to look something like this:
 645   //   for each heapregion that comprises old-gen memory
 646   //     for each card number that corresponds to this heap region
 647   //       scan the objects contained therein if the card is dirty
 648   // To avoid excessive lookups in a sparse array, the API queries
 649   // the card number pertaining to a particular address and then uses the
 650   // card number for subsequent information lookups and stores.
 651 
 652   // If starts_object(card_index), this returns the word offset within this card
 653   // memory at which the first object begins.  If !starts_object(card_index), the
 654   // result is a don't care value -- asserts in a debug build.
 655   size_t get_first_start(size_t card_index) const;
 656 
 657   // If starts_object(card_index), this returns the word offset within this card
 658   // memory at which the last object begins.  If !starts_object(card_index), the
 659   // result is a don't care value.
 660   size_t get_last_start(size_t card_index) const;
 661 
 662 
 663   // Given a card_index, return the starting address of the first block in the heap
 664   // that straddles into the card. If the card is co-initial with an object, then
 665   // this would return the starting address of the heap that this card covers.
 666   // Expects to be called for a card affiliated with the old generation in
 667   // generational mode.
 668   HeapWord* block_start(size_t card_index) const;
 669 };
 670 
 671 // ShenandoahScanRemembered is a concrete class representing the
 672 // ability to scan the old-gen remembered set for references to
 673 // objects residing in young-gen memory.
 674 //
 675 // Scanning normally begins with an invocation of numRegions and ends
 676 // after all clusters of all regions have been scanned.
 677 //
 678 // Throughout the scanning effort, the number of regions does not
 679 // change.
 680 //
 681 // Even though the regions that comprise old-gen memory are not
 682 // necessarily contiguous, the abstraction represented by this class
 683 // identifies each of the old-gen regions with an integer value
 684 // in the range from 0 to (numRegions() - 1) inclusive.
 685 //
 686 
 687 template<typename RememberedSet>
 688 class ShenandoahScanRemembered: public CHeapObj<mtGC> {
 689 
 690 private:
 691   RememberedSet* _rs;
 692   ShenandoahCardCluster<RememberedSet>* _scc;
 693 
 694   // Global card stats (cumulative)
 695   HdrSeq _card_stats_scan_rs[MAX_CARD_STAT_TYPE];
 696   HdrSeq _card_stats_update_refs[MAX_CARD_STAT_TYPE];
 697   // Per worker card stats (multiplexed by phase)
 698   HdrSeq** _card_stats;
 699 
 700   // The types of card metrics that we gather
 701   const char* _card_stats_name[MAX_CARD_STAT_TYPE] = {
 702    "dirty_run", "clean_run",
 703    "dirty_cards", "clean_cards",
 704    "max_dirty_run", "max_clean_run",
 705    "dirty_scan_objs",
 706    "alternations"
 707   };
 708 
 709   // The statistics are collected and logged separately for
 710   // card-scans for initial marking, and for updating refs.
 711   const char* _card_stat_log_type[MAX_CARD_STAT_LOG_TYPE] = {
 712    "Scan Remembered Set", "Update Refs"
 713   };
 714 
 715   int _card_stats_log_counter[2] = {0, 0};
 716 
 717 public:
 718   // How to instantiate this object?
 719   //   ShenandoahDirectCardMarkRememberedSet *rs =
 720   //       new ShenandoahDirectCardMarkRememberedSet();
 721   //   scr = new
 722   //     ShenandoahScanRememberd<ShenandoahDirectCardMarkRememberedSet>(rs);
 723   //
 724   // or, after the planned implementation of
 725   // ShenandoahBufferWithSATBRememberedSet has been completed:
 726   //
 727   //   ShenandoahBufferWithSATBRememberedSet *rs =
 728   //       new ShenandoahBufferWithSATBRememberedSet();
 729   //   scr = new
 730   //     ShenandoahScanRememberd<ShenandoahBufferWithSATBRememberedSet>(rs);
 731 
 732 
 733   ShenandoahScanRemembered(RememberedSet *rs) {
 734     _rs = rs;
 735     _scc = new ShenandoahCardCluster<RememberedSet>(rs);
 736 
 737     // We allocate ParallelGCThreads worth even though we usually only
 738     // use up to ConcGCThreads, because degenerate collections may employ
 739     // ParallelGCThreads for remembered set scanning.
 740     if (ShenandoahEnableCardStats) {
 741       _card_stats = NEW_C_HEAP_ARRAY(HdrSeq*, ParallelGCThreads, mtGC);
 742       for (uint i = 0; i < ParallelGCThreads; i++) {
 743         _card_stats[i] = new HdrSeq[MAX_CARD_STAT_TYPE];
 744       }
 745     } else {
 746       _card_stats = nullptr;
 747     }
 748   }
 749 
 750   ~ShenandoahScanRemembered() {
 751     delete _scc;
 752     if (ShenandoahEnableCardStats) {
 753       for (uint i = 0; i < ParallelGCThreads; i++) {
 754         delete _card_stats[i];
 755       }
 756       FREE_C_HEAP_ARRAY(HdrSeq*, _card_stats);
 757       _card_stats = nullptr;
 758     }
 759     assert(_card_stats == nullptr, "Error");
 760   }
 761 
 762   HdrSeq* card_stats(uint worker_id) {
 763     assert(worker_id < ParallelGCThreads, "Error");
 764     assert(ShenandoahEnableCardStats == (_card_stats != nullptr), "Error");
 765     return ShenandoahEnableCardStats ? _card_stats[worker_id] : nullptr;
 766   }
 767 
 768   HdrSeq* card_stats_for_phase(CardStatLogType t) {
 769     switch (t) {
 770       case CARD_STAT_SCAN_RS:
 771         return _card_stats_scan_rs;
 772       case CARD_STAT_UPDATE_REFS:
 773         return _card_stats_update_refs;
 774       default:
 775         guarantee(false, "No such CardStatLogType");
 776     }
 777     return nullptr; // Quiet compiler
 778   }
 779 
 780   // TODO:  We really don't want to share all of these APIs with arbitrary consumers of the ShenandoahScanRemembered abstraction.
 781   // But in the spirit of quick and dirty for the time being, I'm going to go ahead and publish everything for right now.  Some
 782   // of existing code already depends on having access to these services (because existing code has not been written to honor
 783   // full abstraction of remembered set scanning.  In the not too distant future, we want to try to make most, if not all, of
 784   // these services private.  Two problems with publicizing:
 785   //  1. Allowing arbitrary users to reach beneath the hood allows the users to make assumptions about underlying implementation.
 786   //     This will make it more difficult to change underlying implementation at a future time, such as when we eventually experiment
 787   //     with SATB-based implementation of remembered set representation.
 788   //  2. If we carefully control sharing of certain of these services, we can reduce the overhead of synchronization by assuring
 789   //     that all users follow protocols that avoid contention that might require synchronization.  When we publish these APIs, we
 790   //     lose control over who and how the data is accessed.  As a result, we are required to insert more defensive measures into
 791   //     the implementation, including synchronization locks.
 792 
 793 
 794   // Card index is zero-based relative to first spanned card region.
 795   size_t last_valid_index();
 796   size_t total_cards();
 797   size_t card_index_for_addr(HeapWord *p);
 798   HeapWord *addr_for_card_index(size_t card_index);
 799   bool is_card_dirty(size_t card_index);
 800   bool is_write_card_dirty(size_t card_index) { return _rs->is_write_card_dirty(card_index); }
 801   void mark_card_as_dirty(size_t card_index);
 802   void mark_range_as_dirty(size_t card_index, size_t num_cards);
 803   void mark_card_as_clean(size_t card_index);
 804   void mark_range_as_clean(size_t card_index, size_t num_cards);
 805   bool is_card_dirty(HeapWord *p);
 806   void mark_card_as_dirty(HeapWord *p);
 807   void mark_range_as_dirty(HeapWord *p, size_t num_heap_words);
 808   void mark_card_as_clean(HeapWord *p);
 809   void mark_range_as_clean(HeapWord *p, size_t num_heap_words);
 810   size_t cluster_count();
 811 
 812   // Called by GC thread at start of concurrent mark to exchange roles of read and write remembered sets.
 813   void swap_remset() { _rs->swap_remset(); }
 814 
 815   void reset_remset(HeapWord* start, size_t word_count) { _rs->reset_remset(start, word_count); }
 816 
 817   void merge_write_table(HeapWord* start, size_t word_count) { _rs->merge_write_table(start, word_count); }
 818 
 819   // Called by GC thread after scanning old remembered set in order to prepare for next GC pass
 820   void clear_old_remset() { _rs->clear_old_remset(); }
 821 
 822   size_t cluster_for_addr(HeapWord *addr);
 823   HeapWord* addr_for_cluster(size_t cluster_no);
 824 
 825   void reset_object_range(HeapWord *from, HeapWord *to);
 826   void register_object(HeapWord *addr);
 827   void register_object_without_lock(HeapWord *addr);
 828   void coalesce_objects(HeapWord *addr, size_t length_in_words);
 829 
 830   HeapWord* first_object_in_card(size_t card_index) {
 831     if (_scc->starts_object(card_index)) {
 832       return addr_for_card_index(card_index) + _scc->get_first_start(card_index);
 833     } else {
 834       return nullptr;
 835     }
 836   }
 837 
 838   // Return true iff this object is "properly" registered.
 839   bool verify_registration(HeapWord* address, ShenandoahMarkingContext* ctx);
 840 
 841   // clear the cards to clean, and clear the object_starts info to no objects
 842   void mark_range_as_empty(HeapWord *addr, size_t length_in_words);
 843 
 844   // process_clusters() scans a portion of the remembered set
 845   // for references from old gen into young. Several worker threads
 846   // scan different portions of the remembered set by making parallel invocations
 847   // of process_clusters() with each invocation scanning different
 848   // "clusters" of the remembered set.
 849   //
 850   // An invocation of process_clusters() examines all of the
 851   // intergenerational references spanned by `count` clusters starting
 852   // with `first_cluster`.  The `oops` argument is a worker-thread-local
 853   // OopClosure that is applied to all "valid" references in the remembered set.
 854   //
 855   // A side-effect of executing process_clusters() is to update the remembered
 856   // set entries (e.g. marking dirty cards clean if they no longer
 857   // hold references to young-gen memory).
 858   //
 859   // An implementation of process_clusters() may choose to efficiently
 860   // address more typical scenarios in the structure of remembered sets. E.g.
 861   // in the generational setting, one might expect remembered sets to be very sparse
 862   // (low mutation rates in the old generation leading to sparse dirty cards,
 863   // each with very few intergenerational pointers). Specific implementations
 864   // may choose to degrade gracefully as the sparsity assumption fails to hold,
 865   // such as when there are sudden spikes in (premature) promotion or in the
 866   // case of an underprovisioned, poorly-tuned, or poorly-shaped heap.
 867   //
 868   // At the start of a concurrent young generation marking cycle, we invoke process_clusters
 869   // with ClosureType ShenandoahInitMarkRootsClosure.
 870   //
 871   // At the start of a concurrent evacuation phase, we invoke process_clusters with
 872   // ClosureType ShenandoahEvacuateUpdateRootsClosure.
 873 
 874   // All template expansions require methods to be defined in the inline.hpp file, but larger
 875   // such methods need not be declared as inline.
 876   template <typename ClosureType>
 877   void process_clusters(size_t first_cluster, size_t count, HeapWord *end_of_range, ClosureType *oops,
 878                                bool use_write_table, uint worker_id);
 879 
 880   template <typename ClosureType>
 881   inline void process_humongous_clusters(ShenandoahHeapRegion* r, size_t first_cluster, size_t count,
 882                                          HeapWord *end_of_range, ClosureType *oops, bool use_write_table);
 883 
 884   template <typename ClosureType>
 885   inline void process_region_slice(ShenandoahHeapRegion* region, size_t offset, size_t clusters, HeapWord* end_of_range,
 886                                    ClosureType *cl, bool use_write_table, uint worker_id);
 887 
 888   // To Do:
 889   //  Create subclasses of ShenandoahInitMarkRootsClosure and
 890   //  ShenandoahEvacuateUpdateRootsClosure and any other closures
 891   //  that need to participate in remembered set scanning.  Within the
 892   //  subclasses, add a (probably templated) instance variable that
 893   //  refers to the associated ShenandoahCardCluster object.  Use this
 894   //  ShenandoahCardCluster instance to "enhance" the do_oops
 895   //  processing so that we can:
 896   //
 897   //   1. Avoid processing references that correspond to clean card
 898   //      regions, and
 899   //   2. Set card status to CLEAN when the associated card region no
 900   //      longer holds inter-generatioanal references.
 901   //
 902   //  To enable efficient implementation of these behaviors, we
 903   //  probably also want to add a few fields into the
 904   //  ShenandoahCardCluster object that allow us to precompute and
 905   //  remember the addresses at which card status is going to change
 906   //  from dirty to clean and clean to dirty.  The do_oops
 907   //  implementations will want to update this value each time they
 908   //  cross one of these boundaries.
 909   void roots_do(OopIterateClosure* cl);
 910 
 911   // Log stats related to card/RS stats for given phase t
 912   void log_card_stats(uint nworkers, CardStatLogType t) PRODUCT_RETURN;
 913 private:
 914   // Log stats for given worker id related into given summary card/RS stats
 915   void log_worker_card_stats(uint worker_id, HdrSeq* sum_stats) PRODUCT_RETURN;
 916 
 917   // Log given stats
 918   inline void log_card_stats(HdrSeq* stats) PRODUCT_RETURN;
 919 
 920   // Merge the stats from worked_id into the given summary stats, and clear the worker_id's stats.
 921   void merge_worker_card_stats_cumulative(HdrSeq* worker_stats, HdrSeq* sum_stats) PRODUCT_RETURN;
 922 };
 923 
 924 
 925 // A ShenandoahRegionChunk represents a contiguous interval of a ShenandoahHeapRegion, typically representing
 926 // work to be done by a worker thread.
 927 struct ShenandoahRegionChunk {
 928   ShenandoahHeapRegion *_r;      // The region of which this represents a chunk
 929   size_t _chunk_offset;          // HeapWordSize offset
 930   size_t _chunk_size;            // HeapWordSize qty
 931 };
 932 
 933 // ShenandoahRegionChunkIterator divides the total remembered set scanning effort into ShenandoahRegionChunks
 934 // that are assigned one at a time to worker threads. (Here, we use the terms `assignments` and `chunks`
 935 // interchangeably.) Note that the effort required to scan a range of memory is not necessarily a linear
 936 // function of the size of the range.  Some memory ranges hold only a small number of live objects.
 937 // Some ranges hold primarily primitive (non-pointer) data.  We start with larger chunk sizes because larger chunks
 938 // reduce coordination overhead.  We expect that the GC worker threads that receive more difficult assignments
 939 // will work longer on those chunks.  Meanwhile, other worker will threads repeatedly accept and complete multiple
 940 // easier chunks.  As the total amount of work remaining to be completed decreases, we decrease the size of chunks
 941 // given to individual threads.  This reduces the likelihood of significant imbalance between worker thread assignments
 942 // when there is less meaningful work to be performed by the remaining worker threads while they wait for
 943 // worker threads with difficult assignments to finish, reducing the overall duration of the phase.
 944 
 945 class ShenandoahRegionChunkIterator : public StackObj {
 946 private:
 947   // The largest chunk size is 4 MiB, measured in words.  Otherwise, remembered set scanning may become too unbalanced.
 948   // If the largest chunk size is too small, there is too much overhead sifting out assignments to individual worker threads.
 949   static const size_t _maximum_chunk_size_words = (4 * 1024 * 1024) / HeapWordSize;
 950 
 951   static const size_t _clusters_in_smallest_chunk = 4;
 952 
 953   // smallest_chunk_size is 4 clusters.  Each cluster spans 128 KiB.
 954   // This is computed from CardTable::card_size_in_words() *
 955   //      ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
 956   static size_t smallest_chunk_size_words() {
 957       return _clusters_in_smallest_chunk * CardTable::card_size_in_words() *
 958              ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
 959   }
 960 
 961   // The total remembered set scanning effort is divided into chunks of work that are assigned to individual worker tasks.
 962   // The chunks of assigned work are divided into groups, where the size of the typical group (_regular_group_size) is half the
 963   // total number of regions.  The first group may be larger than
 964   // _regular_group_size in the case that the first group's chunk
 965   // size is less than the region size.  The last group may be larger
 966   // than _regular_group_size because no group is allowed to
 967   // have smaller assignments than _smallest_chunk_size, which is 128 KB.
 968 
 969   // Under normal circumstances, no configuration needs more than _maximum_groups (default value of 16).
 970   // The first group "effectively" processes chunks of size 1 MiB (or smaller for smaller region sizes).
 971   // The last group processes chunks of size 128 KiB.  There are four groups total.
 972 
 973   // group[0] is 4 MiB chunk size (_maximum_chunk_size_words)
 974   // group[1] is 2 MiB chunk size
 975   // group[2] is 1 MiB chunk size
 976   // group[3] is 512 KiB chunk size
 977   // group[4] is 256 KiB chunk size
 978   // group[5] is 128 Kib shunk size (_smallest_chunk_size_words = 4 * 64 * 64
 979   static const size_t _maximum_groups = 6;
 980 
 981   const ShenandoahHeap* _heap;
 982 
 983   const size_t _regular_group_size;                        // Number of chunks in each group
 984   const size_t _first_group_chunk_size_b4_rebalance;
 985   const size_t _num_groups;                        // Number of groups in this configuration
 986   const size_t _total_chunks;
 987 
 988   shenandoah_padding(0);
 989   volatile size_t _index;
 990   shenandoah_padding(1);
 991 
 992   size_t _region_index[_maximum_groups];           // The region index for the first region spanned by this group
 993   size_t _group_offset[_maximum_groups];           // The offset at which group begins within first region spanned by this group
 994   size_t _group_chunk_size[_maximum_groups];       // The size of each chunk within this group
 995   size_t _group_entries[_maximum_groups];          // Total chunks spanned by this group and the ones before it.
 996 
 997   // No implicit copying: iterators should be passed by reference to capture the state
 998   NONCOPYABLE(ShenandoahRegionChunkIterator);
 999 
1000   // Makes use of _heap.
1001   size_t calc_regular_group_size();
1002 
1003   // Makes use of _regular_group_size, which must be initialized before call.
1004   size_t calc_first_group_chunk_size_b4_rebalance();
1005 
1006   // Makes use of _regular_group_size and _first_group_chunk_size_b4_rebalance, both of which must be initialized before call.
1007   size_t calc_num_groups();
1008 
1009   // Makes use of _regular_group_size, _first_group_chunk_size_b4_rebalance, which must be initialized before call.
1010   size_t calc_total_chunks();
1011 
1012 public:
1013   ShenandoahRegionChunkIterator(size_t worker_count);
1014   ShenandoahRegionChunkIterator(ShenandoahHeap* heap, size_t worker_count);
1015 
1016   // Reset iterator to default state
1017   void reset();
1018 
1019   // Fills in assignment with next chunk of work and returns true iff there is more work.
1020   // Otherwise, returns false.  This is multi-thread-safe.
1021   inline bool next(struct ShenandoahRegionChunk *assignment);
1022 
1023   // This is *not* MT safe. However, in the absence of multithreaded access, it
1024   // can be used to determine if there is more work to do.
1025   inline bool has_next() const;
1026 };
1027 
1028 typedef ShenandoahScanRemembered<ShenandoahDirectCardMarkRememberedSet> RememberedScanner;
1029 
1030 class ShenandoahScanRememberedTask : public WorkerTask {
1031  private:
1032   ShenandoahObjToScanQueueSet* _queue_set;
1033   ShenandoahObjToScanQueueSet* _old_queue_set;
1034   ShenandoahReferenceProcessor* _rp;
1035   ShenandoahRegionChunkIterator* _work_list;
1036   bool _is_concurrent;
1037 
1038  public:
1039   ShenandoahScanRememberedTask(ShenandoahObjToScanQueueSet* queue_set,
1040                                ShenandoahObjToScanQueueSet* old_queue_set,
1041                                ShenandoahReferenceProcessor* rp,
1042                                ShenandoahRegionChunkIterator* work_list,
1043                                bool is_concurrent);
1044 
1045   void work(uint worker_id);
1046   void do_work(uint worker_id);
1047 };
1048 
1049 // After Full GC is done, reconstruct the remembered set by iterating over OLD regions,
1050 // registering all objects between bottom() and top(), and dirtying the cards containing
1051 // cross-generational pointers.
1052 class ShenandoahReconstructRememberedSetTask : public WorkerTask {
1053 private:
1054   ShenandoahRegionIterator* _regions;
1055 
1056 public:
1057   explicit ShenandoahReconstructRememberedSetTask(ShenandoahRegionIterator* regions);
1058 
1059   void work(uint worker_id) override;
1060 };
1061 
1062 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBERED_HPP