< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp

Print this page




 524   friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
 525   friend class VM_CMS_Operation;
 526   friend class VM_CMS_Initial_Mark;
 527   friend class VM_CMS_Final_Remark;
 528   friend class TraceCMSMemoryManagerStats;
 529 
 530  private:
 531   jlong _time_of_last_gc;
 532   void update_time_of_last_gc(jlong now) {
 533     _time_of_last_gc = now;
 534   }
 535 
 536   OopTaskQueueSet* _task_queues;
 537 
 538   // Overflow list of grey objects, threaded through mark-word
 539   // Manipulated with CAS in the parallel/multi-threaded case.
 540   oopDesc* volatile _overflow_list;
 541   // The following array-pair keeps track of mark words
 542   // displaced for accommodating overflow list above.
 543   // This code will likely be revisited under RFE#4922830.
 544   Stack<oop, mtGC>      _preserved_oop_stack;
 545   Stack<markWord, mtGC> _preserved_mark_stack;
 546 
 547   // In support of multi-threaded concurrent phases
 548   YieldingFlexibleWorkGang* _conc_workers;
 549 
 550   // Performance Counters
 551   CollectorCounters* _gc_counters;
 552   CollectorCounters* _cgc_counters;
 553 
 554   // Initialization Errors
 555   bool _completed_initialization;
 556 
 557   // In support of ExplicitGCInvokesConcurrent
 558   static bool _full_gc_requested;
 559   static GCCause::Cause _full_gc_cause;
 560   unsigned int _collection_count_start;
 561 
 562   // Should we unload classes this concurrent cycle?
 563   bool _should_unload_classes;
 564   unsigned int  _concurrent_cycles_since_last_unload;
 565   unsigned int concurrent_cycles_since_last_unload() const {


 725 
 726   // Support for parallelizing survivor space rescan
 727   HeapWord** _survivor_chunk_array;
 728   size_t     _survivor_chunk_index;
 729   size_t     _survivor_chunk_capacity;
 730   size_t*    _cursor;
 731   ChunkArray* _survivor_plab_array;
 732 
 733   // Support for marking stack overflow handling
 734   bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
 735   bool par_take_from_overflow_list(size_t num,
 736                                    OopTaskQueue* to_work_q,
 737                                    int no_of_gc_threads);
 738   void push_on_overflow_list(oop p);
 739   void par_push_on_overflow_list(oop p);
 740   // The following is, obviously, not, in general, "MT-stable"
 741   bool overflow_list_is_empty() const;
 742 
 743   void preserve_mark_if_necessary(oop p);
 744   void par_preserve_mark_if_necessary(oop p);
 745   void preserve_mark_work(oop p, markWord m);
 746   void restore_preserved_marks_if_any();
 747   NOT_PRODUCT(bool no_preserved_marks() const;)
 748   // In support of testing overflow code
 749   NOT_PRODUCT(int _overflow_counter;)
 750   NOT_PRODUCT(bool simulate_overflow();)       // Sequential
 751   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
 752 
 753   // CMS work methods
 754   void checkpointRootsInitialWork(); // Initial checkpoint work
 755 
 756   // A return value of false indicates failure due to stack overflow
 757   bool markFromRootsWork();  // Concurrent marking work
 758 
 759  public:   // FIX ME!!! only for testing
 760   bool do_marking_st();      // Single-threaded marking
 761   bool do_marking_mt();      // Multi-threaded  marking
 762 
 763  private:
 764 
 765   // Concurrent precleaning work


1095     // Note: CMS does MT-discovery during the parallel-remark
1096     // phases. Use ReferenceProcessorMTMutator to make refs
1097     // discovery MT-safe during such phases or other parallel
1098     // discovery phases in the future. This may all go away
1099     // if/when we decide that refs discovery is sufficiently
1100     // rare that the cost of the CAS's involved is in the
1101     // noise. That's a measurement that should be done, and
1102     // the code simplified if that turns out to be the case.
1103     return ConcGCThreads > 1;
1104   }
1105 
1106   // Override
1107   virtual void ref_processor_init();
1108 
1109   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1110 
1111   // Space enquiries
1112   double occupancy() const { return ((double)used())/((double)capacity()); }
1113   size_t contiguous_available() const;
1114   size_t unsafe_max_alloc_nogc() const;
1115   size_t used_stable() const;
1116 
1117   // over-rides
1118   MemRegion used_region_at_save_marks() const;
1119 
1120   // Adjust quantities in the generation affected by
1121   // the compaction.
1122   void reset_after_compaction();
1123 
1124   // Allocation support
1125   HeapWord* allocate(size_t size, bool tlab);
1126   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1127   oop       promote(oop obj, size_t obj_size);
1128   HeapWord* par_allocate(size_t size, bool tlab) {
1129     return allocate(size, tlab);
1130   }
1131 
1132 
1133   // Used by CMSStats to track direct allocation.  The value is sampled and
1134   // reset after each young gen collection.
1135   size_t direct_allocated_words() const { return _direct_allocated_words; }
1136   void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
1137 
1138   // Overrides for parallel promotion.
1139   virtual oop par_promote(int thread_num,
1140                           oop obj, markWord m, size_t word_sz);
1141   virtual void par_promote_alloc_done(int thread_num);
1142   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1143 
1144   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
1145 
1146   // Inform this (old) generation that a promotion failure was
1147   // encountered during a collection of the young generation.
1148   virtual void promotion_failure_occurred();
1149 
1150   bool should_collect(bool full, size_t size, bool tlab);
1151   virtual bool should_concurrent_collect() const;
1152   virtual bool is_too_full() const;
1153   void collect(bool   full,
1154                bool   clear_all_soft_refs,
1155                size_t size,
1156                bool   tlab);
1157 
1158   HeapWord* expand_and_allocate(size_t word_size,
1159                                 bool tlab,
1160                                 bool parallel = false);




 524   friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
 525   friend class VM_CMS_Operation;
 526   friend class VM_CMS_Initial_Mark;
 527   friend class VM_CMS_Final_Remark;
 528   friend class TraceCMSMemoryManagerStats;
 529 
 530  private:
 531   jlong _time_of_last_gc;
 532   void update_time_of_last_gc(jlong now) {
 533     _time_of_last_gc = now;
 534   }
 535 
 536   OopTaskQueueSet* _task_queues;
 537 
 538   // Overflow list of grey objects, threaded through mark-word
 539   // Manipulated with CAS in the parallel/multi-threaded case.
 540   oopDesc* volatile _overflow_list;
 541   // The following array-pair keeps track of mark words
 542   // displaced for accommodating overflow list above.
 543   // This code will likely be revisited under RFE#4922830.
 544   Stack<oop, mtGC>     _preserved_oop_stack;
 545   Stack<markOop, mtGC> _preserved_mark_stack;
 546 
 547   // In support of multi-threaded concurrent phases
 548   YieldingFlexibleWorkGang* _conc_workers;
 549 
 550   // Performance Counters
 551   CollectorCounters* _gc_counters;
 552   CollectorCounters* _cgc_counters;
 553 
 554   // Initialization Errors
 555   bool _completed_initialization;
 556 
 557   // In support of ExplicitGCInvokesConcurrent
 558   static bool _full_gc_requested;
 559   static GCCause::Cause _full_gc_cause;
 560   unsigned int _collection_count_start;
 561 
 562   // Should we unload classes this concurrent cycle?
 563   bool _should_unload_classes;
 564   unsigned int  _concurrent_cycles_since_last_unload;
 565   unsigned int concurrent_cycles_since_last_unload() const {


 725 
 726   // Support for parallelizing survivor space rescan
 727   HeapWord** _survivor_chunk_array;
 728   size_t     _survivor_chunk_index;
 729   size_t     _survivor_chunk_capacity;
 730   size_t*    _cursor;
 731   ChunkArray* _survivor_plab_array;
 732 
 733   // Support for marking stack overflow handling
 734   bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
 735   bool par_take_from_overflow_list(size_t num,
 736                                    OopTaskQueue* to_work_q,
 737                                    int no_of_gc_threads);
 738   void push_on_overflow_list(oop p);
 739   void par_push_on_overflow_list(oop p);
 740   // The following is, obviously, not, in general, "MT-stable"
 741   bool overflow_list_is_empty() const;
 742 
 743   void preserve_mark_if_necessary(oop p);
 744   void par_preserve_mark_if_necessary(oop p);
 745   void preserve_mark_work(oop p, markOop m);
 746   void restore_preserved_marks_if_any();
 747   NOT_PRODUCT(bool no_preserved_marks() const;)
 748   // In support of testing overflow code
 749   NOT_PRODUCT(int _overflow_counter;)
 750   NOT_PRODUCT(bool simulate_overflow();)       // Sequential
 751   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
 752 
 753   // CMS work methods
 754   void checkpointRootsInitialWork(); // Initial checkpoint work
 755 
 756   // A return value of false indicates failure due to stack overflow
 757   bool markFromRootsWork();  // Concurrent marking work
 758 
 759  public:   // FIX ME!!! only for testing
 760   bool do_marking_st();      // Single-threaded marking
 761   bool do_marking_mt();      // Multi-threaded  marking
 762 
 763  private:
 764 
 765   // Concurrent precleaning work


1095     // Note: CMS does MT-discovery during the parallel-remark
1096     // phases. Use ReferenceProcessorMTMutator to make refs
1097     // discovery MT-safe during such phases or other parallel
1098     // discovery phases in the future. This may all go away
1099     // if/when we decide that refs discovery is sufficiently
1100     // rare that the cost of the CAS's involved is in the
1101     // noise. That's a measurement that should be done, and
1102     // the code simplified if that turns out to be the case.
1103     return ConcGCThreads > 1;
1104   }
1105 
1106   // Override
1107   virtual void ref_processor_init();
1108 
1109   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1110 
1111   // Space enquiries
1112   double occupancy() const { return ((double)used())/((double)capacity()); }
1113   size_t contiguous_available() const;
1114   size_t unsafe_max_alloc_nogc() const;

1115 
1116   // over-rides
1117   MemRegion used_region_at_save_marks() const;
1118 
1119   // Adjust quantities in the generation affected by
1120   // the compaction.
1121   void reset_after_compaction();
1122 
1123   // Allocation support
1124   HeapWord* allocate(size_t size, bool tlab);
1125   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1126   oop       promote(oop obj, size_t obj_size);
1127   HeapWord* par_allocate(size_t size, bool tlab) {
1128     return allocate(size, tlab);
1129   }
1130 
1131 
1132   // Used by CMSStats to track direct allocation.  The value is sampled and
1133   // reset after each young gen collection.
1134   size_t direct_allocated_words() const { return _direct_allocated_words; }
1135   void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
1136 
1137   // Overrides for parallel promotion.
1138   virtual oop par_promote(int thread_num,
1139                           oop obj, markOop m, size_t word_sz);
1140   virtual void par_promote_alloc_done(int thread_num);
1141   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1142 
1143   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
1144 
1145   // Inform this (old) generation that a promotion failure was
1146   // encountered during a collection of the young generation.
1147   virtual void promotion_failure_occurred();
1148 
1149   bool should_collect(bool full, size_t size, bool tlab);
1150   virtual bool should_concurrent_collect() const;
1151   virtual bool is_too_full() const;
1152   void collect(bool   full,
1153                bool   clear_all_soft_refs,
1154                size_t size,
1155                bool   tlab);
1156 
1157   HeapWord* expand_and_allocate(size_t word_size,
1158                                 bool tlab,
1159                                 bool parallel = false);


< prev index next >