1 /*
  2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
 28 
 29 #include "gc/shared/gc_globals.hpp"
 30 #include "gc/shared/spaceDecorator.hpp"
 31 #include "gc/shenandoah/shenandoahAffiliation.hpp"
 32 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
 33 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
 34 #include "gc/shenandoah/shenandoahAsserts.hpp"
 35 #include "gc/shenandoah/shenandoahHeap.hpp"
 36 #include "gc/shenandoah/shenandoahPadding.hpp"
 37 #include "utilities/sizes.hpp"
 38 
 39 class VMStructs;
 40 class ShenandoahHeapRegionStateConstant;
 41 
 42 class ShenandoahHeapRegion {
 43   friend class VMStructs;
 44   friend class ShenandoahHeapRegionStateConstant;
 45 private:
 46   /*
 47     Region state is described by a state machine. Transitions are guarded by
 48     heap lock, which allows changing the state of several regions atomically.
 49     Region states can be logically aggregated in groups.
 50 
 51       "Empty":
 52       .................................................................
 53       .                                                               .
 54       .                                                               .
 55       .         Uncommitted  <-------  Committed <------------------------\
 56       .              |                     |                          .   |
 57       .              \---------v-----------/                          .   |
 58       .                        |                                      .   |
 59       .........................|.......................................   |
 60                                |                                          |
 61       "Active":                |                                          |
 62       .........................|.......................................   |
 63       .                        |                                      .   |
 64       .      /-----------------^-------------------\                  .   |
 65       .      |                                     |                  .   |
 66       .      v                                     v    "Humongous":  .   |
 67       .   Regular ---\-----\     ..................O................  .   |
 68       .     |  ^     |     |     .                 |               .  .   |
 69       .     |  |     |     |     .                 *---------\     .  .   |
 70       .     v  |     |     |     .                 v         v     .  .   |
 71       .    Pinned  Cset    |     .  HStart <--> H/Start   H/Cont   .  .   |
 72       .       ^    / |     |     .  Pinned         v         |     .  .   |
 73       .       |   /  |     |     .                 *<--------/     .  .   |
 74       .       |  v   |     |     .                 |               .  .   |
 75       .  CsetPinned  |     |     ..................O................  .   |
 76       .              |     |                       |                  .   |
 77       .              \-----\---v-------------------/                  .   |
 78       .                        |                                      .   |
 79       .........................|.......................................   |
 80                                |                                          |
 81       "Trash":                 |                                          |
 82       .........................|.......................................   |
 83       .                        |                                      .   |
 84       .                        v                                      .   |
 85       .                      Trash ---------------------------------------/
 86       .                                                               .
 87       .                                                               .
 88       .................................................................
 89 
 90     Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed}
 91     to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous.
 92 
 93     Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle,
 94     and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows
 95     quick reclamation without actual cleaning up.
 96 
 97     Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata.
 98     Can be done asynchronously and in bulk.
 99 
100     Note how internal transitions disallow logic bugs:
101       a) No region can go Empty, unless properly reclaimed/recycled;
102       b) No region can go Uncommitted, unless reclaimed/recycled first;
103       c) Only Regular regions can go to CSet;
104       d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned;
105       e) Pinned cannot go CSet, thus it never moves;
106       f) Humongous cannot be used for regular allocations;
107       g) Humongous cannot go CSet, thus it never moves;
108       h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should
109          follow associated humongous starts, not pinnable/movable by themselves);
110       i) Empty cannot go Trash, avoiding useless work;
111       j) ...
112    */
113 
114   enum RegionState {
115     _empty_uncommitted,       // region is empty and has memory uncommitted
116     _empty_committed,         // region is empty and has memory committed
117     _regular,                 // region is for regular allocations
118     _humongous_start,         // region is the humongous start
119     _humongous_cont,          // region is the humongous continuation
120     _pinned_humongous_start,  // region is both humongous start and pinned
121     _cset,                    // region is in collection set
122     _pinned,                  // region is pinned
123     _pinned_cset,             // region is pinned and in cset (evac failure path)
124     _trash,                   // region contains only trash
125     _REGION_STATES_NUM        // last
126   };
127 
128 public:
129   static const char* region_state_to_string(RegionState s) {
130     switch (s) {
131       case _empty_uncommitted:       return "Empty Uncommitted";
132       case _empty_committed:         return "Empty Committed";
133       case _regular:                 return "Regular";
134       case _humongous_start:         return "Humongous Start";
135       case _humongous_cont:          return "Humongous Continuation";
136       case _pinned_humongous_start:  return "Humongous Start, Pinned";
137       case _cset:                    return "Collection Set";
138       case _pinned:                  return "Pinned";
139       case _pinned_cset:             return "Collection Set, Pinned";
140       case _trash:                   return "Trash";
141       default:
142         ShouldNotReachHere();
143         return "";
144     }
145   }
146 
147 private:
148   // This method protects from accidental changes in enum order:
149   int region_state_to_ordinal(RegionState s) const {
150     switch (s) {
151       case _empty_uncommitted:      return 0;
152       case _empty_committed:        return 1;
153       case _regular:                return 2;
154       case _humongous_start:        return 3;
155       case _humongous_cont:         return 4;
156       case _cset:                   return 5;
157       case _pinned:                 return 6;
158       case _trash:                  return 7;
159       case _pinned_cset:            return 8;
160       case _pinned_humongous_start: return 9;
161       default:
162         ShouldNotReachHere();
163         return -1;
164     }
165   }
166 
167   void report_illegal_transition(const char* method);
168 
169 public:
170   static int region_states_num() {
171     return _REGION_STATES_NUM;
172   }
173 
174   // Allowed transitions from the outside code:
175   void make_regular_allocation(ShenandoahAffiliation affiliation);
176   void make_affiliated_maybe();
177   void make_regular_bypass();
178   void make_humongous_start();
179   void make_humongous_cont();
180   void make_humongous_start_bypass(ShenandoahAffiliation affiliation);
181   void make_humongous_cont_bypass(ShenandoahAffiliation affiliation);
182   void make_pinned();
183   void make_unpinned();
184   void make_cset();
185   void make_trash();
186   void make_trash_immediate();
187   void make_empty();
188   void make_uncommitted();
189   void make_committed_bypass();
190 
191   // Individual states:
192   bool is_empty_uncommitted()      const { return _state == _empty_uncommitted; }
193   bool is_empty_committed()        const { return _state == _empty_committed; }
194   bool is_regular()                const { return _state == _regular; }
195   bool is_humongous_continuation() const { return _state == _humongous_cont; }
196 
197   // Participation in logical groups:
198   bool is_empty()                  const { return is_empty_committed() || is_empty_uncommitted(); }
199   bool is_active()                 const { return !is_empty() && !is_trash(); }
200   bool is_trash()                  const { return _state == _trash; }
201   bool is_humongous_start()        const { return _state == _humongous_start || _state == _pinned_humongous_start; }
202   bool is_humongous()              const { return is_humongous_start() || is_humongous_continuation(); }
203   bool is_committed()              const { return !is_empty_uncommitted(); }
204   bool is_cset()                   const { return _state == _cset   || _state == _pinned_cset; }
205   bool is_pinned()                 const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; }
206   bool is_regular_pinned()         const { return _state == _pinned; }
207 
208   inline bool is_young() const;
209   inline bool is_old() const;
210   inline bool is_affiliated() const;
211 
212   // Macro-properties:
213   bool is_alloc_allowed()          const { return is_empty() || is_regular() || _state == _pinned; }
214   bool is_stw_move_allowed()       const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); }
215 
216   RegionState state()              const { return _state; }
217   int  state_ordinal()             const { return region_state_to_ordinal(_state); }
218 
219   void record_pin();
220   void record_unpin();
221   size_t pin_count() const;
222 
223 private:
224   static size_t RegionCount;
225   static size_t RegionSizeBytes;
226   static size_t RegionSizeWords;
227   static size_t RegionSizeBytesShift;
228   static size_t RegionSizeWordsShift;
229   static size_t RegionSizeBytesMask;
230   static size_t RegionSizeWordsMask;
231   static size_t HumongousThresholdBytes;
232   static size_t HumongousThresholdWords;
233   static size_t MaxTLABSizeBytes;
234   static size_t MaxTLABSizeWords;
235 
236   // Never updated fields
237   size_t const _index;
238   HeapWord* const _bottom;
239   HeapWord* const _end;
240 
241   // Rarely updated fields
242   HeapWord* _new_top;
243   double _empty_time;
244 
245   HeapWord* _top_before_promoted;
246 
247   // Seldom updated fields
248   RegionState _state;
249   HeapWord* _coalesce_and_fill_boundary; // for old regions not selected as collection set candidates.
250 
251   // Frequently updated fields
252   HeapWord* _top;
253 
254   size_t _tlab_allocs;
255   size_t _gclab_allocs;
256   size_t _plab_allocs;
257 
258   volatile size_t _live_data;
259   volatile size_t _critical_pins;
260 
261   HeapWord* volatile _update_watermark;
262 
263   uint _age;
264   CENSUS_NOISE(uint _youth;)   // tracks epochs of retrograde ageing (rejuvenation)
265 
266 public:
267   ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed);
268 
269   static const size_t MIN_NUM_REGIONS = 10;
270 
271   // Return adjusted max heap size
272   static size_t setup_sizes(size_t max_heap_size);
273 
274   double empty_time() {
275     return _empty_time;
276   }
277 
278   inline static size_t required_regions(size_t bytes) {
279     return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift();
280   }
281 
282   inline static size_t region_count() {
283     return ShenandoahHeapRegion::RegionCount;
284   }
285 
286   inline static size_t region_size_bytes() {
287     return ShenandoahHeapRegion::RegionSizeBytes;
288   }
289 
290   inline static size_t region_size_words() {
291     return ShenandoahHeapRegion::RegionSizeWords;
292   }
293 
294   inline static size_t region_size_bytes_shift() {
295     return ShenandoahHeapRegion::RegionSizeBytesShift;
296   }
297 
298   inline static size_t region_size_words_shift() {
299     return ShenandoahHeapRegion::RegionSizeWordsShift;
300   }
301 
302   inline static size_t region_size_bytes_mask() {
303     return ShenandoahHeapRegion::RegionSizeBytesMask;
304   }
305 
306   inline static size_t region_size_words_mask() {
307     return ShenandoahHeapRegion::RegionSizeWordsMask;
308   }
309 
310   // Convert to jint with sanity checking
311   inline static jint region_size_bytes_jint() {
312     assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity");
313     return (jint)ShenandoahHeapRegion::RegionSizeBytes;
314   }
315 
316   // Convert to jint with sanity checking
317   inline static jint region_size_words_jint() {
318     assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity");
319     return (jint)ShenandoahHeapRegion::RegionSizeWords;
320   }
321 
322   // Convert to jint with sanity checking
323   inline static jint region_size_bytes_shift_jint() {
324     assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity");
325     return (jint)ShenandoahHeapRegion::RegionSizeBytesShift;
326   }
327 
328   // Convert to jint with sanity checking
329   inline static jint region_size_words_shift_jint() {
330     assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity");
331     return (jint)ShenandoahHeapRegion::RegionSizeWordsShift;
332   }
333 
334   inline static size_t humongous_threshold_bytes() {
335     return ShenandoahHeapRegion::HumongousThresholdBytes;
336   }
337 
338   inline static size_t humongous_threshold_words() {
339     return ShenandoahHeapRegion::HumongousThresholdWords;
340   }
341 
342   inline static size_t max_tlab_size_bytes() {
343     return ShenandoahHeapRegion::MaxTLABSizeBytes;
344   }
345 
346   inline static size_t max_tlab_size_words() {
347     return ShenandoahHeapRegion::MaxTLABSizeWords;
348   }
349 
350   inline size_t index() const {
351     return _index;
352   }
353 
354   inline void save_top_before_promote();
355   inline HeapWord* get_top_before_promote() const { return _top_before_promoted; }
356   inline void restore_top_before_promote();
357   inline size_t garbage_before_padded_for_promote() const;
358 
359   // If next available memory is not aligned on address that is multiple of alignment, fill the empty space
360   // so that returned object is aligned on an address that is a multiple of alignment_in_bytes.  Requested
361   // size is in words.  It is assumed that this->is_old().  A pad object is allocated, filled, and registered
362   // if necessary to assure the new allocation is properly aligned.  Return nullptr if memory is not available.
363   inline HeapWord* allocate_aligned(size_t word_size, ShenandoahAllocRequest &req, size_t alignment_in_bytes);
364 
365   // Allocation (return nullptr if full)
366   inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest req);
367 
368   inline void clear_live_data();
369   void set_live_data(size_t s);
370 
371   // Increase live data for newly allocated region
372   inline void increase_live_data_alloc_words(size_t s);
373 
374   // Increase live data for region scanned with GC
375   inline void increase_live_data_gc_words(size_t s);
376 
377   inline bool has_live() const;
378   inline size_t get_live_data_bytes() const;
379   inline size_t get_live_data_words() const;
380 
381   inline size_t garbage() const;
382 
383   void print_on(outputStream* st) const;
384 
385   void recycle();
386 
387   inline void begin_preemptible_coalesce_and_fill() {
388     _coalesce_and_fill_boundary = _bottom;
389   }
390 
391   inline void end_preemptible_coalesce_and_fill() {
392     _coalesce_and_fill_boundary = _end;
393   }
394 
395   inline void suspend_coalesce_and_fill(HeapWord* next_focus) {
396     _coalesce_and_fill_boundary = next_focus;
397   }
398 
399   inline HeapWord* resume_coalesce_and_fill() {
400     return _coalesce_and_fill_boundary;
401   }
402 
403   // Coalesce contiguous spans of garbage objects by filling header and registering start locations with remembered set.
404   // This is used by old-gen GC following concurrent marking to make old-gen HeapRegions parsable. Old regions must be
405   // parsable because the mark bitmap is not reliable during the concurrent old mark.
406   // Return true iff region is completely coalesced and filled.  Returns false if cancelled before task is complete.
407   bool oop_coalesce_and_fill(bool cancellable);
408 
409   // Invoke closure on every reference contained within the humongous object that spans this humongous
410   // region if the reference is contained within a DIRTY card and the reference is no more than words following
411   // start within the humongous object.
412   void oop_iterate_humongous_slice_dirty(OopIterateClosure* cl, HeapWord* start, size_t words, bool write_table) const;
413 
414   // Invoke closure on every reference contained within the humongous object starting from start and
415   // ending at start + words.
416   void oop_iterate_humongous_slice_all(OopIterateClosure* cl, HeapWord* start, size_t words) const;
417 
418   HeapWord* block_start(const void* p) const;
419   size_t block_size(const HeapWord* p) const;
420   bool block_is_obj(const HeapWord* p) const { return p < top(); }
421 
422   // Find humongous start region that this region belongs to
423   ShenandoahHeapRegion* humongous_start_region() const;
424 
425   HeapWord* top() const         { return _top;     }
426   void set_top(HeapWord* v)     { _top = v;        }
427 
428   HeapWord* new_top() const     { return _new_top; }
429   void set_new_top(HeapWord* v) { _new_top = v;    }
430 
431   HeapWord* bottom() const      { return _bottom;  }
432   HeapWord* end() const         { return _end;     }
433 
434   size_t capacity() const       { return byte_size(bottom(), end()); }
435   size_t used() const           { return byte_size(bottom(), top()); }
436   size_t used_before_promote() const { return byte_size(bottom(), get_top_before_promote()); }
437   size_t free() const           { return byte_size(top(),    end()); }
438 
439   // Does this region contain this address?
440   bool contains(HeapWord* p) const {
441     return (bottom() <= p) && (p < top());
442   }
443 
444   inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t);
445   void reset_alloc_metadata();
446   size_t get_shared_allocs() const;
447   size_t get_tlab_allocs() const;
448   size_t get_gclab_allocs() const;
449   size_t get_plab_allocs() const;
450 
451   inline HeapWord* get_update_watermark() const;
452   inline void set_update_watermark(HeapWord* w);
453   inline void set_update_watermark_at_safepoint(HeapWord* w);
454 
455   inline ShenandoahAffiliation affiliation() const;
456   inline const char* affiliation_name() const;
457 
458   void set_affiliation(ShenandoahAffiliation new_affiliation);
459 
460   // Region ageing and rejuvenation
461   uint age() const { return _age; }
462   CENSUS_NOISE(uint youth() const { return _youth; })
463 
464   void increment_age() {
465     const uint max_age = markWord::max_age;
466     assert(_age <= max_age, "Error");
467     if (_age++ >= max_age) {
468       _age = max_age;   // clamp
469     }
470   }
471 
472   void reset_age() {
473     CENSUS_NOISE(_youth += _age;)
474     _age = 0;
475   }
476 
477   CENSUS_NOISE(void clear_youth() { _youth = 0; })
478 
479 private:
480   void decrement_humongous_waste() const;
481   void do_commit();
482   void do_uncommit();
483 
484   inline void internal_increase_live_data(size_t s);
485 
486   void set_state(RegionState to);
487 };
488 
489 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP