1 /*
  2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
 28 
 29 #include "gc/shared/gc_globals.hpp"
 30 #include "gc/shared/spaceDecorator.hpp"
 31 #include "gc/shenandoah/shenandoahAffiliation.hpp"
 32 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
 33 #include "gc/shenandoah/shenandoahAsserts.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.hpp"
 35 #include "gc/shenandoah/shenandoahPacer.hpp"
 36 #include "gc/shenandoah/shenandoahPadding.hpp"
 37 #include "utilities/sizes.hpp"
 38 
 39 class VMStructs;
 40 class ShenandoahHeapRegionStateConstant;
 41 
 42 class ShenandoahHeapRegion {
 43   friend class VMStructs;
 44   friend class ShenandoahHeapRegionStateConstant;
 45 private:
 46   /*
 47     Region state is described by a state machine. Transitions are guarded by
 48     heap lock, which allows changing the state of several regions atomically.
 49     Region states can be logically aggregated in groups.
 50 
 51       "Empty":
 52       .................................................................
 53       .                                                               .
 54       .                                                               .
 55       .         Uncommitted  <-------  Committed <------------------------\
 56       .              |                     |                          .   |
 57       .              \---------v-----------/                          .   |
 58       .                        |                                      .   |
 59       .........................|.......................................   |
 60                                |                                          |
 61       "Active":                |                                          |
 62       .........................|.......................................   |
 63       .                        |                                      .   |
 64       .      /-----------------^-------------------\                  .   |
 65       .      |                                     |                  .   |
 66       .      v                                     v    "Humongous":  .   |
 67       .   Regular ---\-----\     ..................O................  .   |
 68       .     |  ^     |     |     .                 |               .  .   |
 69       .     |  |     |     |     .                 *---------\     .  .   |
 70       .     v  |     |     |     .                 v         v     .  .   |
 71       .    Pinned  Cset    |     .  HStart <--> H/Start   H/Cont   .  .   |
 72       .       ^    / |     |     .  Pinned         v         |     .  .   |
 73       .       |   /  |     |     .                 *<--------/     .  .   |
 74       .       |  v   |     |     .                 |               .  .   |
 75       .  CsetPinned  |     |     ..................O................  .   |
 76       .              |     |                       |                  .   |
 77       .              \-----\---v-------------------/                  .   |
 78       .                        |                                      .   |
 79       .........................|.......................................   |
 80                                |                                          |
 81       "Trash":                 |                                          |
 82       .........................|.......................................   |
 83       .                        |                                      .   |
 84       .                        v                                      .   |
 85       .                      Trash ---------------------------------------/
 86       .                                                               .
 87       .                                                               .
 88       .................................................................
 89 
 90     Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed}
 91     to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous.
 92 
 93     Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle,
 94     and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows
 95     quick reclamation without actual cleaning up.
 96 
 97     Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata.
 98     Can be done asynchronously and in bulk.
 99 
100     Note how internal transitions disallow logic bugs:
101       a) No region can go Empty, unless properly reclaimed/recycled;
102       b) No region can go Uncommitted, unless reclaimed/recycled first;
103       c) Only Regular regions can go to CSet;
104       d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned;
105       e) Pinned cannot go CSet, thus it never moves;
106       f) Humongous cannot be used for regular allocations;
107       g) Humongous cannot go CSet, thus it never moves;
108       h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should
109          follow associated humongous starts, not pinnable/movable by themselves);
110       i) Empty cannot go Trash, avoiding useless work;
111       j) ...
112    */
113 
114   enum RegionState {
115     _empty_uncommitted,       // region is empty and has memory uncommitted
116     _empty_committed,         // region is empty and has memory committed
117     _regular,                 // region is for regular allocations
118     _humongous_start,         // region is the humongous start
119     _humongous_cont,          // region is the humongous continuation
120     _pinned_humongous_start,  // region is both humongous start and pinned
121     _cset,                    // region is in collection set
122     _pinned,                  // region is pinned
123     _pinned_cset,             // region is pinned and in cset (evac failure path)
124     _trash,                   // region contains only trash
125     _REGION_STATES_NUM        // last
126   };
127 
128   static const char* region_state_to_string(RegionState s) {
129     switch (s) {
130       case _empty_uncommitted:       return "Empty Uncommitted";
131       case _empty_committed:         return "Empty Committed";
132       case _regular:                 return "Regular";
133       case _humongous_start:         return "Humongous Start";
134       case _humongous_cont:          return "Humongous Continuation";
135       case _pinned_humongous_start:  return "Humongous Start, Pinned";
136       case _cset:                    return "Collection Set";
137       case _pinned:                  return "Pinned";
138       case _pinned_cset:             return "Collection Set, Pinned";
139       case _trash:                   return "Trash";
140       default:
141         ShouldNotReachHere();
142         return "";
143     }
144   }
145 
146   // This method protects from accidental changes in enum order:
147   int region_state_to_ordinal(RegionState s) const {
148     switch (s) {
149       case _empty_uncommitted:      return 0;
150       case _empty_committed:        return 1;
151       case _regular:                return 2;
152       case _humongous_start:        return 3;
153       case _humongous_cont:         return 4;
154       case _cset:                   return 5;
155       case _pinned:                 return 6;
156       case _trash:                  return 7;
157       case _pinned_cset:            return 8;
158       case _pinned_humongous_start: return 9;
159       default:
160         ShouldNotReachHere();
161         return -1;
162     }
163   }
164 
165   void report_illegal_transition(const char* method);
166 
167 public:
168   static int region_states_num() {
169     return _REGION_STATES_NUM;
170   }
171 
172   // Allowed transitions from the outside code:
173   void make_regular_allocation(ShenandoahAffiliation affiliation);
174   void make_young_maybe();
175   void make_regular_bypass();
176   void make_humongous_start();
177   void make_humongous_cont();
178   void make_humongous_start_bypass(ShenandoahAffiliation affiliation);
179   void make_humongous_cont_bypass(ShenandoahAffiliation affiliation);
180   void make_pinned();
181   void make_unpinned();
182   void make_cset();
183   void make_trash();
184   void make_trash_immediate();
185   void make_empty();
186   void make_uncommitted();
187   void make_committed_bypass();
188 
189   // Individual states:
190   bool is_empty_uncommitted()      const { return _state == _empty_uncommitted; }
191   bool is_empty_committed()        const { return _state == _empty_committed; }
192   bool is_regular()                const { return _state == _regular; }
193   bool is_humongous_continuation() const { return _state == _humongous_cont; }
194 
195   // Participation in logical groups:
196   bool is_empty()                  const { return is_empty_committed() || is_empty_uncommitted(); }
197   bool is_active()                 const { return !is_empty() && !is_trash(); }
198   bool is_trash()                  const { return _state == _trash; }
199   bool is_humongous_start()        const { return _state == _humongous_start || _state == _pinned_humongous_start; }
200   bool is_humongous()              const { return is_humongous_start() || is_humongous_continuation(); }
201   bool is_committed()              const { return !is_empty_uncommitted(); }
202   bool is_cset()                   const { return _state == _cset   || _state == _pinned_cset; }
203   bool is_pinned()                 const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; }
204   inline bool is_young() const;
205   inline bool is_old() const;
206   inline bool is_affiliated() const;
207 
208   // Macro-properties:
209   bool is_alloc_allowed()          const { return is_empty() || is_regular() || _state == _pinned; }
210   bool is_stw_move_allowed()       const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); }
211 
212   RegionState state()              const { return _state; }
213   int  state_ordinal()             const { return region_state_to_ordinal(_state); }
214 
215   void record_pin();
216   void record_unpin();
217   size_t pin_count() const;
218 
219   void clear_young_lab_flags();
220   void set_young_lab_flag();
221   bool has_young_lab_flag();
222 
223 private:
224   static size_t RegionCount;
225   static size_t RegionSizeBytes;
226   static size_t RegionSizeWords;
227   static size_t RegionSizeBytesShift;
228   static size_t RegionSizeWordsShift;
229   static size_t RegionSizeBytesMask;
230   static size_t RegionSizeWordsMask;
231   static size_t HumongousThresholdBytes;
232   static size_t HumongousThresholdWords;
233   static size_t MaxTLABSizeBytes;
234   static size_t MaxTLABSizeWords;
235 
236   // Never updated fields
237   size_t const _index;
238   HeapWord* const _bottom;
239   HeapWord* const _end;
240 
241   // Rarely updated fields
242   HeapWord* _new_top;
243   double _empty_time;
244 
245   HeapWord* _top_before_promoted;
246 
247   // Seldom updated fields
248   RegionState _state;
249   HeapWord* _coalesce_and_fill_boundary; // for old regions not selected as collection set candidates.
250 
251   // Frequently updated fields
252   HeapWord* _top;
253 
254   size_t _tlab_allocs;
255   size_t _gclab_allocs;
256   size_t _plab_allocs;
257 
258   bool _has_young_lab;
259 
260   volatile size_t _live_data;
261   volatile size_t _critical_pins;
262 
263   HeapWord* volatile _update_watermark;
264 
265   uint _age;
266   CENSUS_NOISE(uint _youth;)   // tracks epochs of retrograde ageing (rejuvenation)
267 
268 public:
269   ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed);
270 
271   static const size_t MIN_NUM_REGIONS = 10;
272 
273   // Return adjusted max heap size
274   static size_t setup_sizes(size_t max_heap_size);
275 
276   double empty_time() {
277     return _empty_time;
278   }
279 
280   inline static size_t required_regions(size_t bytes) {
281     return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift();
282   }
283 
284   inline static size_t region_count() {
285     return ShenandoahHeapRegion::RegionCount;
286   }
287 
288   inline static size_t region_size_bytes() {
289     return ShenandoahHeapRegion::RegionSizeBytes;
290   }
291 
292   inline static size_t region_size_words() {
293     return ShenandoahHeapRegion::RegionSizeWords;
294   }
295 
296   inline static size_t region_size_bytes_shift() {
297     return ShenandoahHeapRegion::RegionSizeBytesShift;
298   }
299 
300   inline static size_t region_size_words_shift() {
301     return ShenandoahHeapRegion::RegionSizeWordsShift;
302   }
303 
304   inline static size_t region_size_bytes_mask() {
305     return ShenandoahHeapRegion::RegionSizeBytesMask;
306   }
307 
308   inline static size_t region_size_words_mask() {
309     return ShenandoahHeapRegion::RegionSizeWordsMask;
310   }
311 
312   // Convert to jint with sanity checking
313   inline static jint region_size_bytes_jint() {
314     assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity");
315     return (jint)ShenandoahHeapRegion::RegionSizeBytes;
316   }
317 
318   // Convert to jint with sanity checking
319   inline static jint region_size_words_jint() {
320     assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity");
321     return (jint)ShenandoahHeapRegion::RegionSizeWords;
322   }
323 
324   // Convert to jint with sanity checking
325   inline static jint region_size_bytes_shift_jint() {
326     assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity");
327     return (jint)ShenandoahHeapRegion::RegionSizeBytesShift;
328   }
329 
330   // Convert to jint with sanity checking
331   inline static jint region_size_words_shift_jint() {
332     assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity");
333     return (jint)ShenandoahHeapRegion::RegionSizeWordsShift;
334   }
335 
336   inline static size_t humongous_threshold_bytes() {
337     return ShenandoahHeapRegion::HumongousThresholdBytes;
338   }
339 
340   inline static size_t humongous_threshold_words() {
341     return ShenandoahHeapRegion::HumongousThresholdWords;
342   }
343 
344   inline static size_t max_tlab_size_bytes() {
345     return ShenandoahHeapRegion::MaxTLABSizeBytes;
346   }
347 
348   inline static size_t max_tlab_size_words() {
349     return ShenandoahHeapRegion::MaxTLABSizeWords;
350   }
351 
352   inline size_t index() const {
353     return _index;
354   }
355 
356   inline void save_top_before_promote();
357   inline HeapWord* get_top_before_promote() const { return _top_before_promoted; }
358   inline void restore_top_before_promote();
359   inline size_t garbage_before_padded_for_promote() const;
360 
361   // Allocation (return nullptr if full)
362   inline HeapWord* allocate_aligned(size_t word_size, ShenandoahAllocRequest &req, size_t alignment_in_words);
363 
364   // Allocation (return nullptr if full)
365   inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest req);
366 
367   inline void clear_live_data();
368   void set_live_data(size_t s);
369 
370   // Increase live data for newly allocated region
371   inline void increase_live_data_alloc_words(size_t s);
372 
373   // Increase live data for region scanned with GC
374   inline void increase_live_data_gc_words(size_t s);
375 
376   inline bool has_live() const;
377   inline size_t get_live_data_bytes() const;
378   inline size_t get_live_data_words() const;
379 
380   inline size_t garbage() const;
381 
382   void print_on(outputStream* st) const;
383 
384   void recycle();
385 
386   inline void begin_preemptible_coalesce_and_fill() {
387     _coalesce_and_fill_boundary = _bottom;
388   }
389 
390   inline void end_preemptible_coalesce_and_fill() {
391     _coalesce_and_fill_boundary = _end;
392   }
393 
394   inline void suspend_coalesce_and_fill(HeapWord* next_focus) {
395     _coalesce_and_fill_boundary = next_focus;
396   }
397 
398   inline HeapWord* resume_coalesce_and_fill() {
399     return _coalesce_and_fill_boundary;
400   }
401 
402   // Coalesce contiguous spans of garbage objects by filling header and reregistering start locations with remembered set.
403   // This is used by old-gen GC following concurrent marking to make old-gen HeapRegions parseable.  Return true iff
404   // region is completely coalesced and filled.  Returns false if cancelled before task is complete.
405   bool oop_fill_and_coalesce();
406 
407   // Like oop_fill_and_coalesce(), but without honoring cancellation requests.
408   bool oop_fill_and_coalesce_without_cancel();
409 
410   // During global collections, this service iterates through an old-gen heap region that is not part of collection
411   // set to fill and register ranges of dead memory.  Note that live objects were previously registered.  Some dead objects
412   // that are subsumed into coalesced ranges of dead memory need to be "unregistered".
413   void global_oop_iterate_and_fill_dead(OopIterateClosure* cl);
414   void oop_iterate_humongous(OopIterateClosure* cl);
415   void oop_iterate_humongous(OopIterateClosure* cl, HeapWord* start, size_t words);
416 
417   // Invoke closure on every reference contained within the humongous object that spans this humongous
418   // region if the reference is contained within a DIRTY card and the reference is no more than words following
419   // start within the humongous object.
420   void oop_iterate_humongous_slice(OopIterateClosure* cl, bool dirty_only, HeapWord* start, size_t words, bool write_table);
421 
422   HeapWord* block_start(const void* p) const;
423   size_t block_size(const HeapWord* p) const;
424   bool block_is_obj(const HeapWord* p) const { return p < top(); }
425 
426   // Find humongous start region that this region belongs to
427   ShenandoahHeapRegion* humongous_start_region() const;
428 
429   HeapWord* top() const         { return _top;     }
430   void set_top(HeapWord* v)     { _top = v;        }
431 
432   HeapWord* new_top() const     { return _new_top; }
433   void set_new_top(HeapWord* v) { _new_top = v;    }
434 
435   HeapWord* bottom() const      { return _bottom;  }
436   HeapWord* end() const         { return _end;     }
437 
438   size_t capacity() const       { return byte_size(bottom(), end()); }
439   size_t used() const           { return byte_size(bottom(), top()); }
440   size_t used_before_promote() const { return byte_size(bottom(), get_top_before_promote()); }
441   size_t free() const           { return byte_size(top(),    end()); }
442 
443   // Does this region contain this address?
444   bool contains(HeapWord* p) const {
445     return (bottom() <= p) && (p < top());
446   }
447 
448   inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t);
449   void reset_alloc_metadata();
450   size_t get_shared_allocs() const;
451   size_t get_tlab_allocs() const;
452   size_t get_gclab_allocs() const;
453   size_t get_plab_allocs() const;
454 
455   inline HeapWord* get_update_watermark() const;
456   inline void set_update_watermark(HeapWord* w);
457   inline void set_update_watermark_at_safepoint(HeapWord* w);
458 
459   inline ShenandoahAffiliation affiliation() const;
460   inline const char* affiliation_name() const;
461 
462   void set_affiliation(ShenandoahAffiliation new_affiliation);
463 
464   // Region ageing and rejuvenation
465   uint age() { return _age; }
466   CENSUS_NOISE(uint youth() { return _youth; })
467 
468   void increment_age() {
469     const uint max_age = markWord::max_age;
470     assert(_age <= max_age, "Error");
471     if (_age++ >= max_age) {
472       _age = max_age;   // clamp
473     }
474   }
475 
476   void reset_age() {
477     CENSUS_NOISE(_youth += _age;)
478     _age = 0;
479   }
480 
481   CENSUS_NOISE(void clear_youth() { _youth = 0; })
482 
483   // Register all objects.  Set all remembered set cards to dirty.
484   void promote_humongous();
485   void promote_in_place();
486 
487 private:
488   void decrement_humongous_waste() const;
489   void do_commit();
490   void do_uncommit();
491 
492   // This is an old-region that was not part of the collection set during a GLOBAL collection.  We coalesce the dead
493   // objects, but do not need to register the live objects as they are already registered.
494   void global_oop_iterate_objects_and_fill_dead(OopIterateClosure* cl);
495 
496   inline void internal_increase_live_data(size_t s);
497 
498   void set_state(RegionState to);
499 };
500 
501 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP