< prev index next >

src/hotspot/share/gc/shared/collectedHeap.hpp

Print this page

146   // Create a new tlab. All TLAB allocations must go through this.
147   // To allow more flexible TLAB allocations min_size specifies
148   // the minimum size needed, while requested_size is the requested
149   // size based on ergonomics. The actually allocated size will be
150   // returned in actual_size.
151   virtual HeapWord* allocate_new_tlab(size_t min_size,
152                                       size_t requested_size,
153                                       size_t* actual_size);
154 
155   // Reinitialize tlabs before resuming mutators.
156   virtual void resize_all_tlabs();
157 
158   // Raw memory allocation facilities
159   // The obj and array allocate methods are covers for these methods.
160   // mem_allocate() should never be
161   // called to allocate TLABs, only individual objects.
162   virtual HeapWord* mem_allocate(size_t size,
163                                  bool* gc_overhead_limit_was_exceeded) = 0;
164 
165   // Filler object utilities.
166   static inline size_t filler_array_hdr_size();
167   static inline size_t filler_array_min_size();
168 
169   static inline void zap_filler_array_with(HeapWord* start, size_t words, juint value);
170   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
171   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
172 
173   // Fill with a single array; caller must ensure filler_array_min_size() <=
174   // words <= filler_array_max_size().
175   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
176 
177   // Fill with a single object (either an int array or a java.lang.Object).
178   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
179 
180   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
181 
182   // Verification functions
183   virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
184     PRODUCT_RETURN;
185   debug_only(static void check_for_valid_allocation_state();)
186 

292   // min_fill_size() is the smallest region that can be filled.
293   // fill_with_objects() can fill arbitrary-sized regions of the heap using
294   // multiple objects.  fill_with_object() is for regions known to be smaller
295   // than the largest array of integers; it uses a single object to fill the
296   // region and has slightly less overhead.
297   static size_t min_fill_size() {
298     return size_t(align_object_size(oopDesc::header_size()));
299   }
300 
301   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
302 
303   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
304   static void fill_with_object(MemRegion region, bool zap = true) {
305     fill_with_object(region.start(), region.word_size(), zap);
306   }
307   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
308     fill_with_object(start, pointer_delta(end, start), zap);
309   }
310 
311   virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
312   static constexpr size_t min_dummy_object_size() {
313     return oopDesc::header_size();
314   }
315 
316   static size_t lab_alignment_reserve() {
317     assert(_lab_alignment_reserve != ~(size_t)0, "uninitialized");
318     return _lab_alignment_reserve;
319   }
320 
321   // Some heaps may be in an unparseable state at certain times between
322   // collections. This may be necessary for efficient implementation of
323   // certain allocation-related activities. Calling this function before
324   // attempting to parse a heap ensures that the heap is in a parsable
325   // state (provided other concurrent activity does not introduce
326   // unparsability). It is normally expected, therefore, that this
327   // method is invoked with the world stopped.
328   // NOTE: if you override this method, make sure you call
329   // super::ensure_parsability so that the non-generational
330   // part of the work gets done. See implementation of
331   // CollectedHeap::ensure_parsability and, for instance,
332   // that of GenCollectedHeap::ensure_parsability().

146   // Create a new tlab. All TLAB allocations must go through this.
147   // To allow more flexible TLAB allocations min_size specifies
148   // the minimum size needed, while requested_size is the requested
149   // size based on ergonomics. The actually allocated size will be
150   // returned in actual_size.
151   virtual HeapWord* allocate_new_tlab(size_t min_size,
152                                       size_t requested_size,
153                                       size_t* actual_size);
154 
155   // Reinitialize tlabs before resuming mutators.
156   virtual void resize_all_tlabs();
157 
158   // Raw memory allocation facilities
159   // The obj and array allocate methods are covers for these methods.
160   // mem_allocate() should never be
161   // called to allocate TLABs, only individual objects.
162   virtual HeapWord* mem_allocate(size_t size,
163                                  bool* gc_overhead_limit_was_exceeded) = 0;
164 
165   // Filler object utilities.

166   static inline size_t filler_array_min_size();
167 
168   static inline void zap_filler_array_with(HeapWord* start, size_t words, juint value);
169   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
170   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
171 
172   // Fill with a single array; caller must ensure filler_array_min_size() <=
173   // words <= filler_array_max_size().
174   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
175 
176   // Fill with a single object (either an int array or a java.lang.Object).
177   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
178 
179   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
180 
181   // Verification functions
182   virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
183     PRODUCT_RETURN;
184   debug_only(static void check_for_valid_allocation_state();)
185 

291   // min_fill_size() is the smallest region that can be filled.
292   // fill_with_objects() can fill arbitrary-sized regions of the heap using
293   // multiple objects.  fill_with_object() is for regions known to be smaller
294   // than the largest array of integers; it uses a single object to fill the
295   // region and has slightly less overhead.
296   static size_t min_fill_size() {
297     return size_t(align_object_size(oopDesc::header_size()));
298   }
299 
300   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
301 
302   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
303   static void fill_with_object(MemRegion region, bool zap = true) {
304     fill_with_object(region.start(), region.word_size(), zap);
305   }
306   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
307     fill_with_object(start, pointer_delta(end, start), zap);
308   }
309 
310   virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
311   static size_t min_dummy_object_size() {
312     return oopDesc::header_size();
313   }
314 
315   static size_t lab_alignment_reserve() {
316     assert(_lab_alignment_reserve != ~(size_t)0, "uninitialized");
317     return _lab_alignment_reserve;
318   }
319 
320   // Some heaps may be in an unparseable state at certain times between
321   // collections. This may be necessary for efficient implementation of
322   // certain allocation-related activities. Calling this function before
323   // attempting to parse a heap ensures that the heap is in a parsable
324   // state (provided other concurrent activity does not introduce
325   // unparsability). It is normally expected, therefore, that this
326   // method is invoked with the world stopped.
327   // NOTE: if you override this method, make sure you call
328   // super::ensure_parsability so that the non-generational
329   // part of the work gets done. See implementation of
330   // CollectedHeap::ensure_parsability and, for instance,
331   // that of GenCollectedHeap::ensure_parsability().
< prev index next >