< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page




 126  private:
 127   void reset_from_card_cache(uint start_idx, size_t num_regions);
 128  public:
 129   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 130 };
 131 
 132 class G1CollectedHeap : public CollectedHeap {
 133   friend class G1FreeCollectionSetTask;
 134   friend class VM_CollectForMetadataAllocation;
 135   friend class VM_G1CollectForAllocation;
 136   friend class VM_G1CollectFull;
 137   friend class VMStructs;
 138   friend class MutatorAllocRegion;
 139   friend class G1FullCollector;
 140   friend class G1GCAllocRegion;
 141   friend class G1HeapVerifier;
 142 
 143   // Closures used in implementation.
 144   friend class G1ParScanThreadState;
 145   friend class G1ParScanThreadStateSet;
 146   friend class G1EvacuateRegionsTask;
 147   friend class G1PLABAllocator;

 148 
 149   // Other related classes.
 150   friend class HeapRegionClaimer;
 151 
 152   // Testing classes.
 153   friend class G1CheckCSetFastTableClosure;
 154 
 155 private:
 156   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 157 
 158   WorkGang* _workers;
 159   G1CollectorPolicy* _collector_policy;
 160   G1CardTable* _card_table;
 161 
 162   SoftRefPolicy      _soft_ref_policy;
 163 
 164   static size_t _humongous_object_threshold_in_words;
 165 
 166   // These sets keep track of old, archive and humongous regions respectively.
 167   HeapRegionSet _old_set;


 188   // humongous set which was not torn down in the first place. If
 189   // free_list_only is true, it will only rebuild the master free
 190   // list. It is called after a Full GC (free_list_only == false) or
 191   // after heap shrinking (free_list_only == true).
 192   void rebuild_region_sets(bool free_list_only);
 193 
 194   // Callback for region mapping changed events.
 195   G1RegionMappingChangedListener _listener;
 196 
 197   // The sequence of all heap regions in the heap.
 198   HeapRegionManager* _hrm;
 199 
 200   // Manages all allocations with regions except humongous object allocations.
 201   G1Allocator* _allocator;
 202 
 203   // Manages all heap verification.
 204   G1HeapVerifier* _verifier;
 205 
 206   // Outside of GC pauses, the number of bytes used in all regions other
 207   // than the current allocation region(s).
 208   volatile size_t _summary_bytes_used;
 209 
 210   void increase_used(size_t bytes);
 211   void decrease_used(size_t bytes);
 212 
 213   void set_used(size_t bytes);
 214 
 215   // Class that handles archive allocation ranges.
 216   G1ArchiveAllocator* _archive_allocator;
 217 
 218   // GC allocation statistics policy for survivors.
 219   G1EvacStats _survivor_evac_stats;
 220 
 221   // GC allocation statistics policy for tenured objects.
 222   G1EvacStats _old_evac_stats;
 223 
 224   // It specifies whether we should attempt to expand the heap after a
 225   // region allocation failure. If heap expansion fails we set this to
 226   // false so that we don't re-attempt the heap expansion (it's likely
 227   // that subsequent expansion attempts will also fail if one fails).
 228   // Currently, it is only consulted during GC and it's reset at the


 501   // successful, perform the allocation and return the address of the
 502   // allocated block, or else "NULL".
 503   HeapWord* expand_and_allocate(size_t word_size);
 504 
 505   // Process any reference objects discovered.
 506   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 507 
 508   // If during an initial mark pause we may install a pending list head which is not
 509   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 510   // to discover.
 511   void make_pending_list_reachable();
 512 
 513   // Merges the information gathered on a per-thread basis for all worker threads
 514   // during GC into global variables.
 515   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 516 public:
 517   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 518 
 519   WorkGang* workers() const { return _workers; }
 520 
 521   // Runs the given AbstractGangTask with the current active workers, returning the
 522   // total time taken.
 523   Tickspan run_task(AbstractGangTask* task);
 524 
 525   G1Allocator* allocator() {
 526     return _allocator;
 527   }
 528 
 529   G1HeapVerifier* verifier() {
 530     return _verifier;
 531   }
 532 
 533   G1MonitoringSupport* g1mm() {
 534     assert(_g1mm != NULL, "should have been initialized");
 535     return _g1mm;
 536   }
 537 
 538   void resize_heap_if_necessary();
 539 
 540   // Expand the garbage-first heap by at least the given size (in bytes!).
 541   // Returns true if the heap was expanded by the requested amount;
 542   // false otherwise.
 543   // (Rounds up to a HeapRegion boundary.)
 544   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);


 724   // methods that call do_collection_pause() release the Heap_lock
 725   // before the call, so it's easy to read gc_count_before just before.
 726   HeapWord* do_collection_pause(size_t         word_size,
 727                                 uint           gc_count_before,
 728                                 bool*          succeeded,
 729                                 GCCause::Cause gc_cause);
 730 
 731   void wait_for_root_region_scanning();
 732 
 733   // The guts of the incremental collection pause, executed by the vm
 734   // thread. It returns false if it is unable to do the collection due
 735   // to the GC locker being active, true otherwise
 736   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 737 
 738   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 739   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 740   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 741 
 742   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 743 
 744   // Actually do the work of evacuating the parts of the collection set.
 745   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 746   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 747 private:
 748   // Evacuate the next set of optional regions.
 749   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 750 
 751 public:
 752   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info);
 753   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 754 
 755   void expand_heap_after_young_collection();
 756   // Update object copying statistics.
 757   void record_obj_copy_mem_stats();
 758 
 759   // The hot card cache for remembered set insertion optimization.
 760   G1HotCardCache* _hot_card_cache;
 761 
 762   // The g1 remembered set of the heap.
 763   G1RemSet* _rem_set;
 764 
 765   // A set of cards that cover the objects for which the Rsets should be updated
 766   // concurrently after the collection.
 767   G1DirtyCardQueueSet _dirty_card_queue_set;
 768 
 769   // After a collection pause, convert the regions in the collection set into free
 770   // regions.
 771   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);


1154 
1155   // Calculate the region index of the given address. Given address must be
1156   // within the heap.
1157   inline uint addr_to_region(HeapWord* addr) const;
1158 
1159   inline HeapWord* bottom_addr_for_region(uint index) const;
1160 
1161   // Two functions to iterate over the heap regions in parallel. Threads
1162   // compete using the HeapRegionClaimer to claim the regions before
1163   // applying the closure on them.
1164   // The _from_worker_offset version uses the HeapRegionClaimer and
1165   // the worker id to calculate a start offset to prevent all workers to
1166   // start from the point.
1167   void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1168                                                   HeapRegionClaimer* hrclaimer,
1169                                                   uint worker_id) const;
1170 
1171   void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1172                                           HeapRegionClaimer* hrclaimer) const;
1173 
1174   // Iterate over all regions currently in the current collection set.
1175   void collection_set_iterate_all(HeapRegionClosure* blk);
1176 
1177   // Iterate over the regions in the current increment of the collection set.
1178   // Starts the iteration so that the start regions of a given worker id over the
1179   // set active_workers are evenly spread across the set of collection set regions
1180   // to be iterated.
1181   void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id);
1182 
1183   // Returns the HeapRegion that contains addr. addr must not be NULL.
1184   template <class T>
1185   inline HeapRegion* heap_region_containing(const T addr) const;
1186 
1187   // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted
1188   // region. addr must not be NULL.
1189   template <class T>
1190   inline HeapRegion* heap_region_containing_or_null(const T addr) const;
1191 
1192   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1193   // each address in the (reserved) heap is a member of exactly
1194   // one block.  The defining characteristic of a block is that it is
1195   // possible to find its size, and thus to progress forward to the next
1196   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1197   // represent Java objects, or they might be free blocks in a
1198   // free-list-based heap (or subheap), as long as the two kinds are
1199   // distinguishable and the size of each is determinable.
1200 
1201   // Returns the address of the start of the "block" that contains the


1241   // Print the maximum heap capacity.
1242   virtual size_t max_capacity() const;
1243 
1244   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1245   virtual size_t max_reserved_capacity() const;
1246 
1247   virtual jlong millis_since_last_gc();
1248 
1249 
1250   // Convenience function to be used in situations where the heap type can be
1251   // asserted to be this type.
1252   static G1CollectedHeap* heap();
1253 
1254   void set_region_short_lived_locked(HeapRegion* hr);
1255   // add appropriate methods for any other surv rate groups
1256 
1257   const G1SurvivorRegions* survivor() const { return &_survivor; }
1258 
1259   uint eden_regions_count() const { return _eden.length(); }
1260   uint survivor_regions_count() const { return _survivor.length(); }
1261   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1262   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1263   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1264   uint old_regions_count() const { return _old_set.length(); }
1265   uint archive_regions_count() const { return _archive_set.length(); }
1266   uint humongous_regions_count() const { return _humongous_set.length(); }
1267 
1268 #ifdef ASSERT
1269   bool check_young_list_empty();
1270 #endif
1271 
1272   // *** Stuff related to concurrent marking.  It's not clear to me that so
1273   // many of these need to be public.
1274 
1275   // The functions below are helper functions that a subclass of
1276   // "CollectedHeap" can use in the implementation of its virtual
1277   // functions.
1278   // This performs a concurrent marking of the live objects in a
1279   // bitmap off to the side.
1280   void do_concurrent_mark();
1281 
1282   bool is_marked_next(oop obj) const;


1411   virtual void print_gc_threads_on(outputStream* st) const;
1412   virtual void gc_threads_do(ThreadClosure* tc) const;
1413 
1414   // Override
1415   void print_tracing_info() const;
1416 
1417   // The following two methods are helpful for debugging RSet issues.
1418   void print_cset_rsets() PRODUCT_RETURN;
1419   void print_all_rsets() PRODUCT_RETURN;
1420 
1421   size_t pending_card_num();
1422 };
1423 
1424 class G1ParEvacuateFollowersClosure : public VoidClosure {
1425 private:
1426   double _start_term;
1427   double _term_time;
1428   size_t _term_attempts;
1429 
1430   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1431   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
1432 protected:
1433   G1CollectedHeap*              _g1h;
1434   G1ParScanThreadState*         _par_scan_state;
1435   RefToScanQueueSet*            _queues;
1436   ParallelTaskTerminator*       _terminator;
1437   G1GCPhaseTimes::GCParPhases   _phase;
1438 
1439   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1440   RefToScanQueueSet*      queues()         { return _queues; }
1441   ParallelTaskTerminator* terminator()     { return _terminator; }
1442 
1443 public:
1444   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1445                                 G1ParScanThreadState* par_scan_state,
1446                                 RefToScanQueueSet* queues,
1447                                 ParallelTaskTerminator* terminator,
1448                                 G1GCPhaseTimes::GCParPhases phase)
1449     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1450       _g1h(g1h), _par_scan_state(par_scan_state),
1451       _queues(queues), _terminator(terminator), _phase(phase) {}


 126  private:
 127   void reset_from_card_cache(uint start_idx, size_t num_regions);
 128  public:
 129   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 130 };
 131 
 132 class G1CollectedHeap : public CollectedHeap {
 133   friend class G1FreeCollectionSetTask;
 134   friend class VM_CollectForMetadataAllocation;
 135   friend class VM_G1CollectForAllocation;
 136   friend class VM_G1CollectFull;
 137   friend class VMStructs;
 138   friend class MutatorAllocRegion;
 139   friend class G1FullCollector;
 140   friend class G1GCAllocRegion;
 141   friend class G1HeapVerifier;
 142 
 143   // Closures used in implementation.
 144   friend class G1ParScanThreadState;
 145   friend class G1ParScanThreadStateSet;
 146   friend class G1ParTask;
 147   friend class G1PLABAllocator;
 148   friend class G1PrepareCompactClosure;
 149 
 150   // Other related classes.
 151   friend class HeapRegionClaimer;
 152 
 153   // Testing classes.
 154   friend class G1CheckCSetFastTableClosure;
 155 
 156 private:
 157   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 158 
 159   WorkGang* _workers;
 160   G1CollectorPolicy* _collector_policy;
 161   G1CardTable* _card_table;
 162 
 163   SoftRefPolicy      _soft_ref_policy;
 164 
 165   static size_t _humongous_object_threshold_in_words;
 166 
 167   // These sets keep track of old, archive and humongous regions respectively.
 168   HeapRegionSet _old_set;


 189   // humongous set which was not torn down in the first place. If
 190   // free_list_only is true, it will only rebuild the master free
 191   // list. It is called after a Full GC (free_list_only == false) or
 192   // after heap shrinking (free_list_only == true).
 193   void rebuild_region_sets(bool free_list_only);
 194 
 195   // Callback for region mapping changed events.
 196   G1RegionMappingChangedListener _listener;
 197 
 198   // The sequence of all heap regions in the heap.
 199   HeapRegionManager* _hrm;
 200 
 201   // Manages all allocations with regions except humongous object allocations.
 202   G1Allocator* _allocator;
 203 
 204   // Manages all heap verification.
 205   G1HeapVerifier* _verifier;
 206 
 207   // Outside of GC pauses, the number of bytes used in all regions other
 208   // than the current allocation region(s).
 209   size_t _summary_bytes_used;
 210 
 211   void increase_used(size_t bytes);
 212   void decrease_used(size_t bytes);
 213 
 214   void set_used(size_t bytes);
 215 
 216   // Class that handles archive allocation ranges.
 217   G1ArchiveAllocator* _archive_allocator;
 218 
 219   // GC allocation statistics policy for survivors.
 220   G1EvacStats _survivor_evac_stats;
 221 
 222   // GC allocation statistics policy for tenured objects.
 223   G1EvacStats _old_evac_stats;
 224 
 225   // It specifies whether we should attempt to expand the heap after a
 226   // region allocation failure. If heap expansion fails we set this to
 227   // false so that we don't re-attempt the heap expansion (it's likely
 228   // that subsequent expansion attempts will also fail if one fails).
 229   // Currently, it is only consulted during GC and it's reset at the


 502   // successful, perform the allocation and return the address of the
 503   // allocated block, or else "NULL".
 504   HeapWord* expand_and_allocate(size_t word_size);
 505 
 506   // Process any reference objects discovered.
 507   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 508 
 509   // If during an initial mark pause we may install a pending list head which is not
 510   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 511   // to discover.
 512   void make_pending_list_reachable();
 513 
 514   // Merges the information gathered on a per-thread basis for all worker threads
 515   // during GC into global variables.
 516   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 517 public:
 518   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 519 
 520   WorkGang* workers() const { return _workers; }
 521 




 522   G1Allocator* allocator() {
 523     return _allocator;
 524   }
 525 
 526   G1HeapVerifier* verifier() {
 527     return _verifier;
 528   }
 529 
 530   G1MonitoringSupport* g1mm() {
 531     assert(_g1mm != NULL, "should have been initialized");
 532     return _g1mm;
 533   }
 534 
 535   void resize_heap_if_necessary();
 536 
 537   // Expand the garbage-first heap by at least the given size (in bytes!).
 538   // Returns true if the heap was expanded by the requested amount;
 539   // false otherwise.
 540   // (Rounds up to a HeapRegion boundary.)
 541   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);


 721   // methods that call do_collection_pause() release the Heap_lock
 722   // before the call, so it's easy to read gc_count_before just before.
 723   HeapWord* do_collection_pause(size_t         word_size,
 724                                 uint           gc_count_before,
 725                                 bool*          succeeded,
 726                                 GCCause::Cause gc_cause);
 727 
 728   void wait_for_root_region_scanning();
 729 
 730   // The guts of the incremental collection pause, executed by the vm
 731   // thread. It returns false if it is unable to do the collection due
 732   // to the GC locker being active, true otherwise
 733   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 734 
 735   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 736   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 737   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 738 
 739   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 740 
 741   // Actually do the work of evacuating the collection set.
 742   void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
 743   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 744   void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);


 745 

 746   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info);
 747   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 748 
 749   void expand_heap_after_young_collection();
 750   // Update object copying statistics.
 751   void record_obj_copy_mem_stats();
 752 
 753   // The hot card cache for remembered set insertion optimization.
 754   G1HotCardCache* _hot_card_cache;
 755 
 756   // The g1 remembered set of the heap.
 757   G1RemSet* _rem_set;
 758 
 759   // A set of cards that cover the objects for which the Rsets should be updated
 760   // concurrently after the collection.
 761   G1DirtyCardQueueSet _dirty_card_queue_set;
 762 
 763   // After a collection pause, convert the regions in the collection set into free
 764   // regions.
 765   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);


1148 
1149   // Calculate the region index of the given address. Given address must be
1150   // within the heap.
1151   inline uint addr_to_region(HeapWord* addr) const;
1152 
1153   inline HeapWord* bottom_addr_for_region(uint index) const;
1154 
1155   // Two functions to iterate over the heap regions in parallel. Threads
1156   // compete using the HeapRegionClaimer to claim the regions before
1157   // applying the closure on them.
1158   // The _from_worker_offset version uses the HeapRegionClaimer and
1159   // the worker id to calculate a start offset to prevent all workers to
1160   // start from the point.
1161   void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1162                                                   HeapRegionClaimer* hrclaimer,
1163                                                   uint worker_id) const;
1164 
1165   void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1166                                           HeapRegionClaimer* hrclaimer) const;
1167 
1168   // Iterate over the regions (if any) in the current collection set.
1169   void collection_set_iterate(HeapRegionClosure* blk);
1170 
1171   // Iterate over the regions (if any) in the current collection set. Starts the
1172   // iteration over the entire collection set so that the start regions of a given
1173   // worker id over the set active_workers are evenly spread across the set of
1174   // collection set regions.
1175   void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
1176 
1177   // Returns the HeapRegion that contains addr. addr must not be NULL.
1178   template <class T>
1179   inline HeapRegion* heap_region_containing(const T addr) const;
1180 
1181   // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted
1182   // region. addr must not be NULL.
1183   template <class T>
1184   inline HeapRegion* heap_region_containing_or_null(const T addr) const;
1185 
1186   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1187   // each address in the (reserved) heap is a member of exactly
1188   // one block.  The defining characteristic of a block is that it is
1189   // possible to find its size, and thus to progress forward to the next
1190   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1191   // represent Java objects, or they might be free blocks in a
1192   // free-list-based heap (or subheap), as long as the two kinds are
1193   // distinguishable and the size of each is determinable.
1194 
1195   // Returns the address of the start of the "block" that contains the


1235   // Print the maximum heap capacity.
1236   virtual size_t max_capacity() const;
1237 
1238   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1239   virtual size_t max_reserved_capacity() const;
1240 
1241   virtual jlong millis_since_last_gc();
1242 
1243 
1244   // Convenience function to be used in situations where the heap type can be
1245   // asserted to be this type.
1246   static G1CollectedHeap* heap();
1247 
1248   void set_region_short_lived_locked(HeapRegion* hr);
1249   // add appropriate methods for any other surv rate groups
1250 
1251   const G1SurvivorRegions* survivor() const { return &_survivor; }
1252 
1253   uint eden_regions_count() const { return _eden.length(); }
1254   uint survivor_regions_count() const { return _survivor.length(); }


1255   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1256   uint old_regions_count() const { return _old_set.length(); }
1257   uint archive_regions_count() const { return _archive_set.length(); }
1258   uint humongous_regions_count() const { return _humongous_set.length(); }
1259 
1260 #ifdef ASSERT
1261   bool check_young_list_empty();
1262 #endif
1263 
1264   // *** Stuff related to concurrent marking.  It's not clear to me that so
1265   // many of these need to be public.
1266 
1267   // The functions below are helper functions that a subclass of
1268   // "CollectedHeap" can use in the implementation of its virtual
1269   // functions.
1270   // This performs a concurrent marking of the live objects in a
1271   // bitmap off to the side.
1272   void do_concurrent_mark();
1273 
1274   bool is_marked_next(oop obj) const;


1403   virtual void print_gc_threads_on(outputStream* st) const;
1404   virtual void gc_threads_do(ThreadClosure* tc) const;
1405 
1406   // Override
1407   void print_tracing_info() const;
1408 
1409   // The following two methods are helpful for debugging RSet issues.
1410   void print_cset_rsets() PRODUCT_RETURN;
1411   void print_all_rsets() PRODUCT_RETURN;
1412 
1413   size_t pending_card_num();
1414 };
1415 
1416 class G1ParEvacuateFollowersClosure : public VoidClosure {
1417 private:
1418   double _start_term;
1419   double _term_time;
1420   size_t _term_attempts;
1421 
1422   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1423   void end_term_time() { _term_time += os::elapsedTime() - _start_term; }
1424 protected:
1425   G1CollectedHeap*              _g1h;
1426   G1ParScanThreadState*         _par_scan_state;
1427   RefToScanQueueSet*            _queues;
1428   ParallelTaskTerminator*       _terminator;
1429   G1GCPhaseTimes::GCParPhases   _phase;
1430 
1431   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1432   RefToScanQueueSet*      queues()         { return _queues; }
1433   ParallelTaskTerminator* terminator()     { return _terminator; }
1434 
1435 public:
1436   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1437                                 G1ParScanThreadState* par_scan_state,
1438                                 RefToScanQueueSet* queues,
1439                                 ParallelTaskTerminator* terminator,
1440                                 G1GCPhaseTimes::GCParPhases phase)
1441     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1442       _g1h(g1h), _par_scan_state(par_scan_state),
1443       _queues(queues), _terminator(terminator), _phase(phase) {}
< prev index next >