1 /* 2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP 28 29 #include "gc/shared/gc_globals.hpp" 30 #include "gc/shared/spaceDecorator.hpp" 31 #include "gc/shenandoah/shenandoahAffiliation.hpp" 32 #include "gc/shenandoah/shenandoahAllocRequest.hpp" 33 #include "gc/shenandoah/shenandoahAsserts.hpp" 34 #include "gc/shenandoah/shenandoahHeap.hpp" 35 #include "gc/shenandoah/shenandoahPadding.hpp" 36 #include "utilities/sizes.hpp" 37 38 class VMStructs; 39 class ShenandoahHeapRegionStateConstant; 40 41 class ShenandoahHeapRegion { 42 friend class VMStructs; 43 friend class ShenandoahHeapRegionStateConstant; 44 private: 45 /* 46 Region state is described by a state machine. Transitions are guarded by 47 heap lock, which allows changing the state of several regions atomically. 48 Region states can be logically aggregated in groups. 49 50 "Empty": 51 ................................................................. 52 . . 53 . . 54 . Uncommitted <------- Committed <------------------------\ 55 . | | . | 56 . \---------v-----------/ . | 57 . | . | 58 .........................|....................................... | 59 | | 60 "Active": | | 61 .........................|....................................... | 62 . | . | 63 . /-----------------^-------------------\ . | 64 . | | . | 65 . v v "Humongous": . | 66 . Regular ---\-----\ ..................O................ . | 67 . | ^ | | . | . . | 68 . | | | | . *---------\ . . | 69 . v | | | . v v . . | 70 . Pinned Cset | . HStart <--> H/Start H/Cont . . | 71 . ^ / | | . Pinned v | . . | 72 . | / | | . *<--------/ . . | 73 . | v | | . | . . | 74 . CsetPinned | | ..................O................ . | 75 . | | | . | 76 . \-----\---v-------------------/ . | 77 . | . | 78 .........................|....................................... | 79 | | 80 "Trash": | | 81 .........................|....................................... | 82 . | . | 83 . v . | 84 . Trash ---------------------------------------/ 85 . . 86 . . 87 ................................................................. 88 89 Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed} 90 to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous. 91 92 Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle, 93 and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows 94 quick reclamation without actual cleaning up. 95 96 Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata. 97 Can be done asynchronously and in bulk. 98 99 Note how internal transitions disallow logic bugs: 100 a) No region can go Empty, unless properly reclaimed/recycled; 101 b) No region can go Uncommitted, unless reclaimed/recycled first; 102 c) Only Regular regions can go to CSet; 103 d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned; 104 e) Pinned cannot go CSet, thus it never moves; 105 f) Humongous cannot be used for regular allocations; 106 g) Humongous cannot go CSet, thus it never moves; 107 h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should 108 follow associated humongous starts, not pinnable/movable by themselves); 109 i) Empty cannot go Trash, avoiding useless work; 110 j) ... 111 */ 112 113 enum RegionState { 114 _empty_uncommitted, // region is empty and has memory uncommitted 115 _empty_committed, // region is empty and has memory committed 116 _regular, // region is for regular allocations 117 _humongous_start, // region is the humongous start 118 _humongous_cont, // region is the humongous continuation 119 _pinned_humongous_start, // region is both humongous start and pinned 120 _cset, // region is in collection set 121 _pinned, // region is pinned 122 _pinned_cset, // region is pinned and in cset (evac failure path) 123 _trash, // region contains only trash 124 _REGION_STATES_NUM // last 125 }; 126 127 public: 128 static const char* region_state_to_string(RegionState s) { 129 switch (s) { 130 case _empty_uncommitted: return "Empty Uncommitted"; 131 case _empty_committed: return "Empty Committed"; 132 case _regular: return "Regular"; 133 case _humongous_start: return "Humongous Start"; 134 case _humongous_cont: return "Humongous Continuation"; 135 case _pinned_humongous_start: return "Humongous Start, Pinned"; 136 case _cset: return "Collection Set"; 137 case _pinned: return "Pinned"; 138 case _pinned_cset: return "Collection Set, Pinned"; 139 case _trash: return "Trash"; 140 default: 141 ShouldNotReachHere(); 142 return ""; 143 } 144 } 145 146 private: 147 // This method protects from accidental changes in enum order: 148 int region_state_to_ordinal(RegionState s) const { 149 switch (s) { 150 case _empty_uncommitted: return 0; 151 case _empty_committed: return 1; 152 case _regular: return 2; 153 case _humongous_start: return 3; 154 case _humongous_cont: return 4; 155 case _cset: return 5; 156 case _pinned: return 6; 157 case _trash: return 7; 158 case _pinned_cset: return 8; 159 case _pinned_humongous_start: return 9; 160 default: 161 ShouldNotReachHere(); 162 return -1; 163 } 164 } 165 166 void report_illegal_transition(const char* method); 167 168 public: 169 static int region_states_num() { 170 return _REGION_STATES_NUM; 171 } 172 173 // Allowed transitions from the outside code: 174 void make_regular_allocation(ShenandoahAffiliation affiliation); 175 void make_young_maybe(); 176 void make_regular_bypass(); 177 void make_humongous_start(); 178 void make_humongous_cont(); 179 void make_humongous_start_bypass(ShenandoahAffiliation affiliation); 180 void make_humongous_cont_bypass(ShenandoahAffiliation affiliation); 181 void make_pinned(); 182 void make_unpinned(); 183 void make_cset(); 184 void make_trash(); 185 void make_trash_immediate(); 186 void make_empty(); 187 void make_uncommitted(); 188 void make_committed_bypass(); 189 190 // Individual states: 191 bool is_empty_uncommitted() const { return _state == _empty_uncommitted; } 192 bool is_empty_committed() const { return _state == _empty_committed; } 193 bool is_regular() const { return _state == _regular; } 194 bool is_humongous_continuation() const { return _state == _humongous_cont; } 195 196 // Participation in logical groups: 197 bool is_empty() const { return is_empty_committed() || is_empty_uncommitted(); } 198 bool is_active() const { return !is_empty() && !is_trash(); } 199 bool is_trash() const { return _state == _trash; } 200 bool is_humongous_start() const { return _state == _humongous_start || _state == _pinned_humongous_start; } 201 bool is_humongous() const { return is_humongous_start() || is_humongous_continuation(); } 202 bool is_committed() const { return !is_empty_uncommitted(); } 203 bool is_cset() const { return _state == _cset || _state == _pinned_cset; } 204 bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; } 205 bool is_regular_pinned() const { return _state == _pinned; } 206 207 inline bool is_young() const; 208 inline bool is_old() const; 209 inline bool is_affiliated() const; 210 211 // Macro-properties: 212 bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; } 213 bool is_stw_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); } 214 215 RegionState state() const { return _state; } 216 int state_ordinal() const { return region_state_to_ordinal(_state); } 217 218 void record_pin(); 219 void record_unpin(); 220 size_t pin_count() const; 221 222 private: 223 static size_t RegionCount; 224 static size_t RegionSizeBytes; 225 static size_t RegionSizeWords; 226 static size_t RegionSizeBytesShift; 227 static size_t RegionSizeWordsShift; 228 static size_t RegionSizeBytesMask; 229 static size_t RegionSizeWordsMask; 230 static size_t HumongousThresholdBytes; 231 static size_t HumongousThresholdWords; 232 static size_t MaxTLABSizeBytes; 233 static size_t MaxTLABSizeWords; 234 235 // Never updated fields 236 size_t const _index; 237 HeapWord* const _bottom; 238 HeapWord* const _end; 239 240 // Rarely updated fields 241 HeapWord* _new_top; 242 double _empty_time; 243 244 HeapWord* _top_before_promoted; 245 246 // Seldom updated fields 247 RegionState _state; 248 HeapWord* _coalesce_and_fill_boundary; // for old regions not selected as collection set candidates. 249 250 // Frequently updated fields 251 HeapWord* _top; 252 253 size_t _tlab_allocs; 254 size_t _gclab_allocs; 255 size_t _plab_allocs; 256 257 volatile size_t _live_data; 258 volatile size_t _critical_pins; 259 260 HeapWord* volatile _update_watermark; 261 262 uint _age; 263 CENSUS_NOISE(uint _youth;) // tracks epochs of retrograde ageing (rejuvenation) 264 265 public: 266 ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed); 267 268 static const size_t MIN_NUM_REGIONS = 10; 269 270 // Return adjusted max heap size 271 static size_t setup_sizes(size_t max_heap_size); 272 273 double empty_time() { 274 return _empty_time; 275 } 276 277 inline static size_t required_regions(size_t bytes) { 278 return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift(); 279 } 280 281 inline static size_t region_count() { 282 return ShenandoahHeapRegion::RegionCount; 283 } 284 285 inline static size_t region_size_bytes() { 286 return ShenandoahHeapRegion::RegionSizeBytes; 287 } 288 289 inline static size_t region_size_words() { 290 return ShenandoahHeapRegion::RegionSizeWords; 291 } 292 293 inline static size_t region_size_bytes_shift() { 294 return ShenandoahHeapRegion::RegionSizeBytesShift; 295 } 296 297 inline static size_t region_size_words_shift() { 298 return ShenandoahHeapRegion::RegionSizeWordsShift; 299 } 300 301 inline static size_t region_size_bytes_mask() { 302 return ShenandoahHeapRegion::RegionSizeBytesMask; 303 } 304 305 inline static size_t region_size_words_mask() { 306 return ShenandoahHeapRegion::RegionSizeWordsMask; 307 } 308 309 // Convert to jint with sanity checking 310 inline static jint region_size_bytes_jint() { 311 assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity"); 312 return (jint)ShenandoahHeapRegion::RegionSizeBytes; 313 } 314 315 // Convert to jint with sanity checking 316 inline static jint region_size_words_jint() { 317 assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity"); 318 return (jint)ShenandoahHeapRegion::RegionSizeWords; 319 } 320 321 // Convert to jint with sanity checking 322 inline static jint region_size_bytes_shift_jint() { 323 assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity"); 324 return (jint)ShenandoahHeapRegion::RegionSizeBytesShift; 325 } 326 327 // Convert to jint with sanity checking 328 inline static jint region_size_words_shift_jint() { 329 assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity"); 330 return (jint)ShenandoahHeapRegion::RegionSizeWordsShift; 331 } 332 333 inline static size_t humongous_threshold_bytes() { 334 return ShenandoahHeapRegion::HumongousThresholdBytes; 335 } 336 337 inline static size_t humongous_threshold_words() { 338 return ShenandoahHeapRegion::HumongousThresholdWords; 339 } 340 341 inline static size_t max_tlab_size_bytes() { 342 return ShenandoahHeapRegion::MaxTLABSizeBytes; 343 } 344 345 inline static size_t max_tlab_size_words() { 346 return ShenandoahHeapRegion::MaxTLABSizeWords; 347 } 348 349 inline size_t index() const { 350 return _index; 351 } 352 353 inline void save_top_before_promote(); 354 inline HeapWord* get_top_before_promote() const { return _top_before_promoted; } 355 inline void restore_top_before_promote(); 356 inline size_t garbage_before_padded_for_promote() const; 357 358 // If next available memory is not aligned on address that is multiple of alignment, fill the empty space 359 // so that returned object is aligned on an address that is a multiple of alignment_in_bytes. Requested 360 // size is in words. It is assumed that this->is_old(). A pad object is allocated, filled, and registered 361 // if necessary to assure the new allocation is properly aligned. Return nullptr if memory is not available. 362 inline HeapWord* allocate_aligned(size_t word_size, ShenandoahAllocRequest &req, size_t alignment_in_bytes); 363 364 // Allocation (return nullptr if full) 365 inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest req); 366 367 inline void clear_live_data(); 368 void set_live_data(size_t s); 369 370 // Increase live data for newly allocated region 371 inline void increase_live_data_alloc_words(size_t s); 372 373 // Increase live data for region scanned with GC 374 inline void increase_live_data_gc_words(size_t s); 375 376 inline bool has_live() const; 377 inline size_t get_live_data_bytes() const; 378 inline size_t get_live_data_words() const; 379 380 inline size_t garbage() const; 381 382 void print_on(outputStream* st) const; 383 384 void recycle(); 385 386 inline void begin_preemptible_coalesce_and_fill() { 387 _coalesce_and_fill_boundary = _bottom; 388 } 389 390 inline void end_preemptible_coalesce_and_fill() { 391 _coalesce_and_fill_boundary = _end; 392 } 393 394 inline void suspend_coalesce_and_fill(HeapWord* next_focus) { 395 _coalesce_and_fill_boundary = next_focus; 396 } 397 398 inline HeapWord* resume_coalesce_and_fill() { 399 return _coalesce_and_fill_boundary; 400 } 401 402 // Coalesce contiguous spans of garbage objects by filling header and reregistering start locations with remembered set. 403 // This is used by old-gen GC following concurrent marking to make old-gen HeapRegions parsable. Return true iff 404 // region is completely coalesced and filled. Returns false if cancelled before task is complete. 405 bool oop_coalesce_and_fill(bool cancellable); 406 407 // Invoke closure on every reference contained within the humongous object that spans this humongous 408 // region if the reference is contained within a DIRTY card and the reference is no more than words following 409 // start within the humongous object. 410 void oop_iterate_humongous_slice(OopIterateClosure* cl, bool dirty_only, HeapWord* start, size_t words, bool write_table); 411 412 HeapWord* block_start(const void* p) const; 413 size_t block_size(const HeapWord* p) const; 414 bool block_is_obj(const HeapWord* p) const { return p < top(); } 415 416 // Find humongous start region that this region belongs to 417 ShenandoahHeapRegion* humongous_start_region() const; 418 419 HeapWord* top() const { return _top; } 420 void set_top(HeapWord* v) { _top = v; } 421 422 HeapWord* new_top() const { return _new_top; } 423 void set_new_top(HeapWord* v) { _new_top = v; } 424 425 HeapWord* bottom() const { return _bottom; } 426 HeapWord* end() const { return _end; } 427 428 size_t capacity() const { return byte_size(bottom(), end()); } 429 size_t used() const { return byte_size(bottom(), top()); } 430 size_t used_before_promote() const { return byte_size(bottom(), get_top_before_promote()); } 431 size_t free() const { return byte_size(top(), end()); } 432 433 // Does this region contain this address? 434 bool contains(HeapWord* p) const { 435 return (bottom() <= p) && (p < top()); 436 } 437 438 inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t); 439 void reset_alloc_metadata(); 440 size_t get_shared_allocs() const; 441 size_t get_tlab_allocs() const; 442 size_t get_gclab_allocs() const; 443 size_t get_plab_allocs() const; 444 445 inline HeapWord* get_update_watermark() const; 446 inline void set_update_watermark(HeapWord* w); 447 inline void set_update_watermark_at_safepoint(HeapWord* w); 448 449 inline ShenandoahAffiliation affiliation() const; 450 inline const char* affiliation_name() const; 451 452 void set_affiliation(ShenandoahAffiliation new_affiliation); 453 454 // Region ageing and rejuvenation 455 uint age() { return _age; } 456 CENSUS_NOISE(uint youth() { return _youth; }) 457 458 void increment_age() { 459 const uint max_age = markWord::max_age; 460 assert(_age <= max_age, "Error"); 461 if (_age++ >= max_age) { 462 _age = max_age; // clamp 463 } 464 } 465 466 void reset_age() { 467 CENSUS_NOISE(_youth += _age;) 468 _age = 0; 469 } 470 471 CENSUS_NOISE(void clear_youth() { _youth = 0; }) 472 473 private: 474 void decrement_humongous_waste() const; 475 void do_commit(); 476 void do_uncommit(); 477 478 inline void internal_increase_live_data(size_t s); 479 480 void set_state(RegionState to); 481 }; 482 483 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP