63 private:
64 void log_heap(bool before);
65
66 public:
67 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
68
69 void log_heap_before() {
70 log_heap(true);
71 }
72 void log_heap_after() {
73 log_heap(false);
74 }
75 };
76
77 //
78 // CollectedHeap
79 // SharedHeap
80 // GenCollectedHeap
81 // G1CollectedHeap
82 // ParallelScavengeHeap
83 //
84 class CollectedHeap : public CHeapObj<mtInternal> {
85 friend class VMStructs;
86 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
87
88 #ifdef ASSERT
89 static int _fire_out_of_memory_count;
90 #endif
91
92 // Used for filler objects (static, but initialized in ctor).
93 static size_t _filler_array_max_size;
94
95 GCHeapLog* _gc_heap_log;
96
97 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
98 bool _defer_initial_card_mark;
99
100 protected:
101 MemRegion _reserved;
102 BarrierSet* _barrier_set;
171 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
172
173 // Fill with a single object (either an int array or a java.lang.Object).
174 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
175
176 virtual void trace_heap(GCWhen::Type when, GCTracer* tracer);
177
178 // Verification functions
179 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
180 PRODUCT_RETURN;
181 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
182 PRODUCT_RETURN;
183 debug_only(static void check_for_valid_allocation_state();)
184
185 public:
186 enum Name {
187 Abstract,
188 SharedHeap,
189 GenCollectedHeap,
190 ParallelScavengeHeap,
191 G1CollectedHeap
192 };
193
194 static inline size_t filler_array_max_size() {
195 return _filler_array_max_size;
196 }
197
198 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
199
200 /**
201 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
202 * and JNI_OK on success.
203 */
204 virtual jint initialize() = 0;
205
206 // In many heaps, there will be a need to perform some initialization activities
207 // after the Universe is fully formed, but before general heap allocation is allowed.
208 // This is the correct place to place such initialization methods.
209 virtual void post_initialize() = 0;
210
211 // Stop any onging concurrent work and prepare for exit.
588 // Iterator for all GC threads (other than VM thread)
589 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
590
591 // Print any relevant tracing info that flags imply.
592 // Default implementation does nothing.
593 virtual void print_tracing_info() const = 0;
594
595 void print_heap_before_gc();
596 void print_heap_after_gc();
597
598 // Registering and unregistering an nmethod (compiled code) with the heap.
599 // Override with specific mechanism for each specialized heap type.
600 virtual void register_nmethod(nmethod* nm);
601 virtual void unregister_nmethod(nmethod* nm);
602
603 void trace_heap_before_gc(GCTracer* gc_tracer);
604 void trace_heap_after_gc(GCTracer* gc_tracer);
605
606 // Heap verification
607 virtual void verify(bool silent, VerifyOption option) = 0;
608
609 // Non product verification and debugging.
610 #ifndef PRODUCT
611 // Support for PromotionFailureALot. Return true if it's time to cause a
612 // promotion failure. The no-argument version uses
613 // this->_promotion_failure_alot_count as the counter.
614 inline bool promotion_should_fail(volatile size_t* count);
615 inline bool promotion_should_fail();
616
617 // Reset the PromotionFailureALot counters. Should be called at the end of a
618 // GC in which promotion failure occurred.
619 inline void reset_promotion_should_fail(volatile size_t* count);
620 inline void reset_promotion_should_fail();
621 #endif // #ifndef PRODUCT
622
623 #ifdef ASSERT
624 static int fired_fake_oom() {
625 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
626 }
627 #endif
|
63 private:
64 void log_heap(bool before);
65
66 public:
67 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
68
69 void log_heap_before() {
70 log_heap(true);
71 }
72 void log_heap_after() {
73 log_heap(false);
74 }
75 };
76
77 //
78 // CollectedHeap
79 // SharedHeap
80 // GenCollectedHeap
81 // G1CollectedHeap
82 // ParallelScavengeHeap
83 // ShenandoahHeap
84 //
85 class CollectedHeap : public CHeapObj<mtInternal> {
86 friend class VMStructs;
87 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
88
89 #ifdef ASSERT
90 static int _fire_out_of_memory_count;
91 #endif
92
93 // Used for filler objects (static, but initialized in ctor).
94 static size_t _filler_array_max_size;
95
96 GCHeapLog* _gc_heap_log;
97
98 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
99 bool _defer_initial_card_mark;
100
101 protected:
102 MemRegion _reserved;
103 BarrierSet* _barrier_set;
172 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
173
174 // Fill with a single object (either an int array or a java.lang.Object).
175 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
176
177 virtual void trace_heap(GCWhen::Type when, GCTracer* tracer);
178
179 // Verification functions
180 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
181 PRODUCT_RETURN;
182 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
183 PRODUCT_RETURN;
184 debug_only(static void check_for_valid_allocation_state();)
185
186 public:
187 enum Name {
188 Abstract,
189 SharedHeap,
190 GenCollectedHeap,
191 ParallelScavengeHeap,
192 G1CollectedHeap,
193 ShenandoahHeap
194 };
195
196 static inline size_t filler_array_max_size() {
197 return _filler_array_max_size;
198 }
199
200 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
201
202 /**
203 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
204 * and JNI_OK on success.
205 */
206 virtual jint initialize() = 0;
207
208 // In many heaps, there will be a need to perform some initialization activities
209 // after the Universe is fully formed, but before general heap allocation is allowed.
210 // This is the correct place to place such initialization methods.
211 virtual void post_initialize() = 0;
212
213 // Stop any onging concurrent work and prepare for exit.
590 // Iterator for all GC threads (other than VM thread)
591 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
592
593 // Print any relevant tracing info that flags imply.
594 // Default implementation does nothing.
595 virtual void print_tracing_info() const = 0;
596
597 void print_heap_before_gc();
598 void print_heap_after_gc();
599
600 // Registering and unregistering an nmethod (compiled code) with the heap.
601 // Override with specific mechanism for each specialized heap type.
602 virtual void register_nmethod(nmethod* nm);
603 virtual void unregister_nmethod(nmethod* nm);
604
605 void trace_heap_before_gc(GCTracer* gc_tracer);
606 void trace_heap_after_gc(GCTracer* gc_tracer);
607
608 // Heap verification
609 virtual void verify(bool silent, VerifyOption option) = 0;
610
611 // Shut down all GC workers and other GC related threads.
612 virtual void shutdown();
613
614 // Accumulate additional statistics from GCLABs.
615 virtual void accumulate_statistics_all_gclabs();
616
617 // Support for object pinning. This is used by JNI Get*Critical()
618 // and Release*Critical() family of functions. If supported, the GC
619 // must guarantee that pinned objects never move.
620 virtual bool supports_object_pinning() const;
621 virtual oop pin_object(JavaThread* thread, oop obj);
622 virtual void unpin_object(JavaThread* thread, oop obj);
623
624 // Non product verification and debugging.
625 #ifndef PRODUCT
626 // Support for PromotionFailureALot. Return true if it's time to cause a
627 // promotion failure. The no-argument version uses
628 // this->_promotion_failure_alot_count as the counter.
629 inline bool promotion_should_fail(volatile size_t* count);
630 inline bool promotion_should_fail();
631
632 // Reset the PromotionFailureALot counters. Should be called at the end of a
633 // GC in which promotion failure occurred.
634 inline void reset_promotion_should_fail(volatile size_t* count);
635 inline void reset_promotion_should_fail();
636 #endif // #ifndef PRODUCT
637
638 #ifdef ASSERT
639 static int fired_fake_oom() {
640 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
641 }
642 #endif
|