1 /*
  2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
 28 
 29 #include "gc/shared/gc_globals.hpp"
 30 #include "gc/shared/spaceDecorator.hpp"
 31 #include "gc/shenandoah/shenandoahAffiliation.hpp"
 32 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
 33 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
 34 #include "gc/shenandoah/shenandoahAsserts.hpp"
 35 #include "gc/shenandoah/shenandoahHeap.hpp"
 36 #include "gc/shenandoah/shenandoahPadding.hpp"
 37 #include "utilities/sizes.hpp"
 38 
 39 class VMStructs;
 40 class ShenandoahHeapRegionStateConstant;
 41 
 42 class ShenandoahHeapRegion {
 43   friend class VMStructs;
 44   friend class ShenandoahHeapRegionStateConstant;
 45 private:
 46   /*
 47     Region state is described by a state machine. Transitions are guarded by
 48     heap lock, which allows changing the state of several regions atomically.
 49     Region states can be logically aggregated in groups.
 50 
 51       "Empty":
 52       .................................................................
 53       .                                                               .
 54       .                                                               .
 55       .         Uncommitted  <-------  Committed <------------------------\
 56       .              |                     |                          .   |
 57       .              \---------v-----------/                          .   |
 58       .                        |                                      .   |
 59       .........................|.......................................   |
 60                                |                                          |
 61       "Active":                |                                          |
 62       .........................|.......................................   |
 63       .                        |                                      .   |
 64       .      /-----------------^-------------------\                  .   |
 65       .      |                                     |                  .   |
 66       .      v                                     v    "Humongous":  .   |
 67       .   Regular ---\-----\     ..................O................  .   |
 68       .     |  ^     |     |     .                 |               .  .   |
 69       .     |  |     |     |     .                 *---------\     .  .   |
 70       .     v  |     |     |     .                 v         v     .  .   |
 71       .    Pinned  Cset    |     .  HStart <--> H/Start   H/Cont   .  .   |
 72       .       ^    / |     |     .  Pinned         v         |     .  .   |
 73       .       |   /  |     |     .                 *<--------/     .  .   |
 74       .       |  v   |     |     .                 |               .  .   |
 75       .  CsetPinned  |     |     ..................O................  .   |
 76       .              |     |                       |                  .   |
 77       .              \-----\---v-------------------/                  .   |
 78       .                        |                                      .   |
 79       .........................|.......................................   |
 80                                |                                          |
 81       "Trash":                 |                                          |
 82       .........................|.......................................   |
 83       .                        |                                      .   |
 84       .                        v                                      .   |
 85       .                      Trash ---------------------------------------/
 86       .                                                               .
 87       .                                                               .
 88       .................................................................
 89 
 90     Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed}
 91     to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous.
 92 
 93     Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle,
 94     and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows
 95     quick reclamation without actual cleaning up.
 96 
 97     Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata.
 98     Can be done asynchronously and in bulk.
 99 
100     Note how internal transitions disallow logic bugs:
101       a) No region can go Empty, unless properly reclaimed/recycled;
102       b) No region can go Uncommitted, unless reclaimed/recycled first;
103       c) Only Regular regions can go to CSet;
104       d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned;
105       e) Pinned cannot go CSet, thus it never moves;
106       f) Humongous cannot be used for regular allocations;
107       g) Humongous cannot go CSet, thus it never moves;
108       h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should
109          follow associated humongous starts, not pinnable/movable by themselves);
110       i) Empty cannot go Trash, avoiding useless work;
111       j) ...
112    */
113 
114   enum RegionState {
115     _empty_uncommitted,       // region is empty and has memory uncommitted
116     _empty_committed,         // region is empty and has memory committed
117     _regular,                 // region is for regular allocations
118     _humongous_start,         // region is the humongous start
119     _humongous_cont,          // region is the humongous continuation
120     _pinned_humongous_start,  // region is both humongous start and pinned
121     _cset,                    // region is in collection set
122     _pinned,                  // region is pinned
123     _pinned_cset,             // region is pinned and in cset (evac failure path)
124     _trash,                   // region contains only trash
125     _REGION_STATES_NUM        // last
126   };
127 
128 public:
129   static const char* region_state_to_string(RegionState s) {
130     switch (s) {
131       case _empty_uncommitted:       return "Empty Uncommitted";
132       case _empty_committed:         return "Empty Committed";
133       case _regular:                 return "Regular";
134       case _humongous_start:         return "Humongous Start";
135       case _humongous_cont:          return "Humongous Continuation";
136       case _pinned_humongous_start:  return "Humongous Start, Pinned";
137       case _cset:                    return "Collection Set";
138       case _pinned:                  return "Pinned";
139       case _pinned_cset:             return "Collection Set, Pinned";
140       case _trash:                   return "Trash";
141       default:
142         ShouldNotReachHere();
143         return "";
144     }
145   }
146 
147 private:
148   // This method protects from accidental changes in enum order:
149   int region_state_to_ordinal(RegionState s) const {
150     switch (s) {
151       case _empty_uncommitted:      return 0;
152       case _empty_committed:        return 1;
153       case _regular:                return 2;
154       case _humongous_start:        return 3;
155       case _humongous_cont:         return 4;
156       case _cset:                   return 5;
157       case _pinned:                 return 6;
158       case _trash:                  return 7;
159       case _pinned_cset:            return 8;
160       case _pinned_humongous_start: return 9;
161       default:
162         ShouldNotReachHere();
163         return -1;
164     }
165   }
166 
167   void report_illegal_transition(const char* method);
168 
169 public:
170   static int region_states_num() {
171     return _REGION_STATES_NUM;
172   }
173 
174   // Allowed transitions from the outside code:
175   void make_regular_allocation(ShenandoahAffiliation affiliation);
176   void make_affiliated_maybe();
177   void make_regular_bypass();
178   void make_humongous_start();
179   void make_humongous_cont();
180   void make_humongous_start_bypass(ShenandoahAffiliation affiliation);
181   void make_humongous_cont_bypass(ShenandoahAffiliation affiliation);
182   void make_pinned();
183   void make_unpinned();
184   void make_cset();
185   void make_trash();
186   void make_trash_immediate();
187   void make_empty();
188   void make_uncommitted();
189   void make_committed_bypass();
190 
191   // Individual states:
192   bool is_empty_uncommitted()      const { return _state == _empty_uncommitted; }
193   bool is_empty_committed()        const { return _state == _empty_committed; }
194   bool is_regular()                const { return _state == _regular; }
195   bool is_humongous_continuation() const { return _state == _humongous_cont; }
196 
197   // Participation in logical groups:
198   bool is_empty()                  const { return is_empty_committed() || is_empty_uncommitted(); }
199   bool is_active()                 const { return !is_empty() && !is_trash(); }
200   bool is_trash()                  const { return _state == _trash; }
201   bool is_humongous_start()        const { return _state == _humongous_start || _state == _pinned_humongous_start; }
202   bool is_humongous()              const { return is_humongous_start() || is_humongous_continuation(); }
203   bool is_committed()              const { return !is_empty_uncommitted(); }
204   bool is_cset()                   const { return _state == _cset   || _state == _pinned_cset; }
205   bool is_pinned()                 const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; }
206   bool is_regular_pinned()         const { return _state == _pinned; }
207 
208   inline bool is_young() const;
209   inline bool is_old() const;
210   inline bool is_affiliated() const;
211 
212   // Macro-properties:
213   bool is_alloc_allowed()          const { return is_empty() || is_regular() || _state == _pinned; }
214   bool is_stw_move_allowed()       const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); }
215 
216   RegionState state()              const { return _state; }
217   int  state_ordinal()             const { return region_state_to_ordinal(_state); }
218 
219   void record_pin();
220   void record_unpin();
221   size_t pin_count() const;
222 
223 private:
224   static size_t RegionCount;
225   static size_t RegionSizeBytes;
226   static size_t RegionSizeWords;
227   static size_t RegionSizeBytesShift;
228   static size_t RegionSizeWordsShift;
229   static size_t RegionSizeBytesMask;
230   static size_t RegionSizeWordsMask;
231   static size_t MaxTLABSizeBytes;
232   static size_t MaxTLABSizeWords;
233 
234   // Never updated fields
235   size_t const _index;
236   HeapWord* const _bottom;
237   HeapWord* const _end;
238 
239   // Rarely updated fields
240   HeapWord* _new_top;
241   double _empty_time;
242 
243   HeapWord* _top_before_promoted;
244 
245   // Seldom updated fields
246   RegionState _state;
247   HeapWord* _coalesce_and_fill_boundary; // for old regions not selected as collection set candidates.
248 
249   // Frequently updated fields
250   HeapWord* _top;
251 
252   size_t _tlab_allocs;
253   size_t _gclab_allocs;
254   size_t _plab_allocs;
255 
256   volatile size_t _live_data;
257   volatile size_t _critical_pins;
258 
259   HeapWord* volatile _update_watermark;
260 
261   uint _age;
262   CENSUS_NOISE(uint _youth;)   // tracks epochs of retrograde ageing (rejuvenation)
263 
264 public:
265   ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed);
266 
267   static const size_t MIN_NUM_REGIONS = 10;
268 
269   // Return adjusted max heap size
270   static size_t setup_sizes(size_t max_heap_size);
271 
272   double empty_time() {
273     return _empty_time;
274   }
275 
276   inline static size_t required_regions(size_t bytes) {
277     return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift();
278   }
279 
280   inline static bool requires_humongous(size_t words) {
281     return words > ShenandoahHeapRegion::RegionSizeWords;
282   }
283 
284   inline static size_t region_count() {
285     return ShenandoahHeapRegion::RegionCount;
286   }
287 
288   inline static size_t region_size_bytes() {
289     return ShenandoahHeapRegion::RegionSizeBytes;
290   }
291 
292   inline static size_t region_size_words() {
293     return ShenandoahHeapRegion::RegionSizeWords;
294   }
295 
296   inline static size_t region_size_bytes_shift() {
297     return ShenandoahHeapRegion::RegionSizeBytesShift;
298   }
299 
300   inline static size_t region_size_words_shift() {
301     return ShenandoahHeapRegion::RegionSizeWordsShift;
302   }
303 
304   inline static size_t region_size_bytes_mask() {
305     return ShenandoahHeapRegion::RegionSizeBytesMask;
306   }
307 
308   inline static size_t region_size_words_mask() {
309     return ShenandoahHeapRegion::RegionSizeWordsMask;
310   }
311 
312   // Convert to jint with sanity checking
313   inline static jint region_size_bytes_jint() {
314     assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity");
315     return (jint)ShenandoahHeapRegion::RegionSizeBytes;
316   }
317 
318   // Convert to jint with sanity checking
319   inline static jint region_size_words_jint() {
320     assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity");
321     return (jint)ShenandoahHeapRegion::RegionSizeWords;
322   }
323 
324   // Convert to jint with sanity checking
325   inline static jint region_size_bytes_shift_jint() {
326     assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity");
327     return (jint)ShenandoahHeapRegion::RegionSizeBytesShift;
328   }
329 
330   // Convert to jint with sanity checking
331   inline static jint region_size_words_shift_jint() {
332     assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity");
333     return (jint)ShenandoahHeapRegion::RegionSizeWordsShift;
334   }
335 
336   inline static size_t max_tlab_size_bytes() {
337     return ShenandoahHeapRegion::MaxTLABSizeBytes;
338   }
339 
340   inline static size_t max_tlab_size_words() {
341     return ShenandoahHeapRegion::MaxTLABSizeWords;
342   }
343 
344   inline size_t index() const {
345     return _index;
346   }
347 
348   inline void save_top_before_promote();
349   inline HeapWord* get_top_before_promote() const { return _top_before_promoted; }
350   inline void restore_top_before_promote();
351   inline size_t garbage_before_padded_for_promote() const;
352 
353   // If next available memory is not aligned on address that is multiple of alignment, fill the empty space
354   // so that returned object is aligned on an address that is a multiple of alignment_in_bytes.  Requested
355   // size is in words.  It is assumed that this->is_old().  A pad object is allocated, filled, and registered
356   // if necessary to assure the new allocation is properly aligned.  Return nullptr if memory is not available.
357   inline HeapWord* allocate_aligned(size_t word_size, ShenandoahAllocRequest &req, size_t alignment_in_bytes);
358 
359   // Allocation (return nullptr if full)
360   inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest req);
361 
362   inline void clear_live_data();
363   void set_live_data(size_t s);
364 
365   // Increase live data for newly allocated region
366   inline void increase_live_data_alloc_words(size_t s);
367 
368   // Increase live data for region scanned with GC
369   inline void increase_live_data_gc_words(size_t s);
370 
371   inline bool has_live() const;
372   inline size_t get_live_data_bytes() const;
373   inline size_t get_live_data_words() const;
374 
375   inline size_t garbage() const;
376 
377   void print_on(outputStream* st) const;
378 
379   void recycle();
380 
381   inline void begin_preemptible_coalesce_and_fill() {
382     _coalesce_and_fill_boundary = _bottom;
383   }
384 
385   inline void end_preemptible_coalesce_and_fill() {
386     _coalesce_and_fill_boundary = _end;
387   }
388 
389   inline void suspend_coalesce_and_fill(HeapWord* next_focus) {
390     _coalesce_and_fill_boundary = next_focus;
391   }
392 
393   inline HeapWord* resume_coalesce_and_fill() {
394     return _coalesce_and_fill_boundary;
395   }
396 
397   // Coalesce contiguous spans of garbage objects by filling header and registering start locations with remembered set.
398   // This is used by old-gen GC following concurrent marking to make old-gen HeapRegions parsable. Old regions must be
399   // parsable because the mark bitmap is not reliable during the concurrent old mark.
400   // Return true iff region is completely coalesced and filled.  Returns false if cancelled before task is complete.
401   bool oop_coalesce_and_fill(bool cancellable);
402 
403   // Invoke closure on every reference contained within the humongous object that spans this humongous
404   // region if the reference is contained within a DIRTY card and the reference is no more than words following
405   // start within the humongous object.
406   void oop_iterate_humongous_slice_dirty(OopIterateClosure* cl, HeapWord* start, size_t words, bool write_table) const;
407 
408   // Invoke closure on every reference contained within the humongous object starting from start and
409   // ending at start + words.
410   void oop_iterate_humongous_slice_all(OopIterateClosure* cl, HeapWord* start, size_t words) const;
411 
412   HeapWord* block_start(const void* p) const;
413   size_t block_size(const HeapWord* p) const;
414   bool block_is_obj(const HeapWord* p) const { return p < top(); }
415 
416   // Find humongous start region that this region belongs to
417   ShenandoahHeapRegion* humongous_start_region() const;
418 
419   HeapWord* top() const         { return _top;     }
420   void set_top(HeapWord* v)     { _top = v;        }
421 
422   HeapWord* new_top() const     { return _new_top; }
423   void set_new_top(HeapWord* v) { _new_top = v;    }
424 
425   HeapWord* bottom() const      { return _bottom;  }
426   HeapWord* end() const         { return _end;     }
427 
428   size_t capacity() const       { return byte_size(bottom(), end()); }
429   size_t used() const           { return byte_size(bottom(), top()); }
430   size_t used_before_promote() const { return byte_size(bottom(), get_top_before_promote()); }
431   size_t free() const           { return byte_size(top(),    end()); }
432 
433   // Does this region contain this address?
434   bool contains(HeapWord* p) const {
435     return (bottom() <= p) && (p < top());
436   }
437 
438   inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t);
439   void reset_alloc_metadata();
440   size_t get_shared_allocs() const;
441   size_t get_tlab_allocs() const;
442   size_t get_gclab_allocs() const;
443   size_t get_plab_allocs() const;
444 
445   inline HeapWord* get_update_watermark() const;
446   inline void set_update_watermark(HeapWord* w);
447   inline void set_update_watermark_at_safepoint(HeapWord* w);
448 
449   inline ShenandoahAffiliation affiliation() const;
450   inline const char* affiliation_name() const;
451 
452   void set_affiliation(ShenandoahAffiliation new_affiliation);
453 
454   // Region ageing and rejuvenation
455   uint age() const { return _age; }
456   CENSUS_NOISE(uint youth() const { return _youth; })
457 
458   void increment_age() {
459     const uint max_age = markWord::max_age;
460     assert(_age <= max_age, "Error");
461     if (_age++ >= max_age) {
462       _age = max_age;   // clamp
463     }
464   }
465 
466   void reset_age() {
467     CENSUS_NOISE(_youth += _age;)
468     _age = 0;
469   }
470 
471   CENSUS_NOISE(void clear_youth() { _youth = 0; })
472 
473 private:
474   void decrement_humongous_waste() const;
475   void do_commit();
476   void do_uncommit();
477 
478   inline void internal_increase_live_data(size_t s);
479 
480   void set_state(RegionState to);
481 };
482 
483 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP