< prev index next >

src/hotspot/share/gc/shared/collectedHeap.hpp

Print this page




  72   void log_heap(CollectedHeap* heap, bool before);
  73 
  74  public:
  75   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
  76 
  77   void log_heap_before(CollectedHeap* heap) {
  78     log_heap(heap, true);
  79   }
  80   void log_heap_after(CollectedHeap* heap) {
  81     log_heap(heap, false);
  82   }
  83 };
  84 
  85 //
  86 // CollectedHeap
  87 //   GenCollectedHeap
  88 //     SerialHeap
  89 //     CMSHeap
  90 //   G1CollectedHeap
  91 //   ParallelScavengeHeap

  92 //   ZCollectedHeap
  93 //
  94 class CollectedHeap : public CHeapObj<mtInternal> {
  95   friend class VMStructs;
  96   friend class JVMCIVMStructs;
  97   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
  98   friend class MemAllocator;
  99 
 100  private:
 101 #ifdef ASSERT
 102   static int       _fire_out_of_memory_count;
 103 #endif
 104 
 105   GCHeapLog* _gc_heap_log;
 106 
 107   MemRegion _reserved;
 108 
 109  protected:
 110   bool _is_gc_active;
 111 


 161   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
 162 
 163   // Fill with a single object (either an int array or a java.lang.Object).
 164   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
 165 
 166   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 167 
 168   // Verification functions
 169   virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
 170     PRODUCT_RETURN;
 171   debug_only(static void check_for_valid_allocation_state();)
 172 
 173  public:
 174   enum Name {
 175     None,
 176     Serial,
 177     Parallel,
 178     CMS,
 179     G1,
 180     Epsilon,
 181     Z

 182   };
 183 
 184   static inline size_t filler_array_max_size() {
 185     return _filler_array_max_size;
 186   }
 187 


 188   virtual Name kind() const = 0;
 189 
 190   virtual const char* name() const = 0;
 191 
 192   /**
 193    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
 194    * and JNI_OK on success.
 195    */
 196   virtual jint initialize() = 0;
 197 
 198   // In many heaps, there will be a need to perform some initialization activities
 199   // after the Universe is fully formed, but before general heap allocation is allowed.
 200   // This is the correct place to place such initialization methods.
 201   virtual void post_initialize();
 202 
 203   // Stop any onging concurrent work and prepare for exit.
 204   virtual void stop() {}
 205 
 206   // Stop and resume concurrent GC threads interfering with safepoint operations
 207   virtual void safepoint_synchronize_begin() {}


 271     return is_in_reserved(p);
 272   }
 273 
 274   bool is_in_closed_subset_or_null(const void* p) const {
 275     return p == NULL || is_in_closed_subset(p);
 276   }
 277 
 278   void set_gc_cause(GCCause::Cause v) {
 279      if (UsePerfData) {
 280        _gc_lastcause = _gc_cause;
 281        _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
 282        _perf_gc_cause->set_value(GCCause::to_string(v));
 283      }
 284     _gc_cause = v;
 285   }
 286   GCCause::Cause gc_cause() { return _gc_cause; }
 287 
 288   virtual oop obj_allocate(Klass* klass, int size, TRAPS);
 289   virtual oop array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS);
 290   virtual oop class_allocate(Klass* klass, int size, TRAPS);






 291 
 292   // Utilities for turning raw memory into filler objects.
 293   //
 294   // min_fill_size() is the smallest region that can be filled.
 295   // fill_with_objects() can fill arbitrary-sized regions of the heap using
 296   // multiple objects.  fill_with_object() is for regions known to be smaller
 297   // than the largest array of integers; it uses a single object to fill the
 298   // region and has slightly less overhead.
 299   static size_t min_fill_size() {
 300     return size_t(align_object_size(oopDesc::header_size()));
 301   }
 302 
 303   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
 304 
 305   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
 306   static void fill_with_object(MemRegion region, bool zap = true) {
 307     fill_with_object(region.start(), region.word_size(), zap);
 308   }
 309   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
 310     fill_with_object(start, pointer_delta(end, start), zap);




  72   void log_heap(CollectedHeap* heap, bool before);
  73 
  74  public:
  75   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
  76 
  77   void log_heap_before(CollectedHeap* heap) {
  78     log_heap(heap, true);
  79   }
  80   void log_heap_after(CollectedHeap* heap) {
  81     log_heap(heap, false);
  82   }
  83 };
  84 
  85 //
  86 // CollectedHeap
  87 //   GenCollectedHeap
  88 //     SerialHeap
  89 //     CMSHeap
  90 //   G1CollectedHeap
  91 //   ParallelScavengeHeap
  92 //   ShenandoahHeap
  93 //   ZCollectedHeap
  94 //
  95 class CollectedHeap : public CHeapObj<mtInternal> {
  96   friend class VMStructs;
  97   friend class JVMCIVMStructs;
  98   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
  99   friend class MemAllocator;
 100 
 101  private:
 102 #ifdef ASSERT
 103   static int       _fire_out_of_memory_count;
 104 #endif
 105 
 106   GCHeapLog* _gc_heap_log;
 107 
 108   MemRegion _reserved;
 109 
 110  protected:
 111   bool _is_gc_active;
 112 


 162   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
 163 
 164   // Fill with a single object (either an int array or a java.lang.Object).
 165   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
 166 
 167   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 168 
 169   // Verification functions
 170   virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
 171     PRODUCT_RETURN;
 172   debug_only(static void check_for_valid_allocation_state();)
 173 
 174  public:
 175   enum Name {
 176     None,
 177     Serial,
 178     Parallel,
 179     CMS,
 180     G1,
 181     Epsilon,
 182     Z,
 183     Shenandoah
 184   };
 185 
 186   static inline size_t filler_array_max_size() {
 187     return _filler_array_max_size;
 188   }
 189 
 190   virtual HeapWord* tlab_post_allocation_setup(HeapWord* obj);
 191 
 192   virtual Name kind() const = 0;
 193 
 194   virtual const char* name() const = 0;
 195 
 196   /**
 197    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
 198    * and JNI_OK on success.
 199    */
 200   virtual jint initialize() = 0;
 201 
 202   // In many heaps, there will be a need to perform some initialization activities
 203   // after the Universe is fully formed, but before general heap allocation is allowed.
 204   // This is the correct place to place such initialization methods.
 205   virtual void post_initialize();
 206 
 207   // Stop any onging concurrent work and prepare for exit.
 208   virtual void stop() {}
 209 
 210   // Stop and resume concurrent GC threads interfering with safepoint operations
 211   virtual void safepoint_synchronize_begin() {}


 275     return is_in_reserved(p);
 276   }
 277 
 278   bool is_in_closed_subset_or_null(const void* p) const {
 279     return p == NULL || is_in_closed_subset(p);
 280   }
 281 
 282   void set_gc_cause(GCCause::Cause v) {
 283      if (UsePerfData) {
 284        _gc_lastcause = _gc_cause;
 285        _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
 286        _perf_gc_cause->set_value(GCCause::to_string(v));
 287      }
 288     _gc_cause = v;
 289   }
 290   GCCause::Cause gc_cause() { return _gc_cause; }
 291 
 292   virtual oop obj_allocate(Klass* klass, int size, TRAPS);
 293   virtual oop array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS);
 294   virtual oop class_allocate(Klass* klass, int size, TRAPS);
 295 
 296   virtual uint oop_extra_words();
 297 
 298 #ifndef CC_INTERP
 299   virtual void compile_prepare_oop(MacroAssembler* masm, Register obj);
 300 #endif
 301 
 302   // Utilities for turning raw memory into filler objects.
 303   //
 304   // min_fill_size() is the smallest region that can be filled.
 305   // fill_with_objects() can fill arbitrary-sized regions of the heap using
 306   // multiple objects.  fill_with_object() is for regions known to be smaller
 307   // than the largest array of integers; it uses a single object to fill the
 308   // region and has slightly less overhead.
 309   static size_t min_fill_size() {
 310     return size_t(align_object_size(oopDesc::header_size()));
 311   }
 312 
 313   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
 314 
 315   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
 316   static void fill_with_object(MemRegion region, bool zap = true) {
 317     fill_with_object(region.start(), region.word_size(), zap);
 318   }
 319   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
 320     fill_with_object(start, pointer_delta(end, start), zap);


< prev index next >