1 /* 2 * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP 27 28 #include "gc/shared/gc_globals.hpp" 29 #include "gc/shared/spaceDecorator.hpp" 30 #include "gc/shenandoah/shenandoahAllocRequest.hpp" 31 #include "gc/shenandoah/shenandoahAsserts.hpp" 32 #include "gc/shenandoah/shenandoahHeap.hpp" 33 #include "gc/shenandoah/shenandoahPacer.hpp" 34 #include "gc/shenandoah/shenandoahPadding.hpp" 35 #include "utilities/sizes.hpp" 36 37 class VMStructs; 38 class ShenandoahHeapRegionStateConstant; 39 40 class ShenandoahHeapRegion { 41 friend class VMStructs; 42 friend class ShenandoahHeapRegionStateConstant; 43 private: 44 /* 45 Region state is described by a state machine. Transitions are guarded by 46 heap lock, which allows changing the state of several regions atomically. 47 Region states can be logically aggregated in groups. 48 49 "Empty": 50 ................................................................. 51 . . 52 . . 53 . Uncommitted <------- Committed <------------------------\ 54 . | | . | 55 . \---------v-----------/ . | 56 . | . | 57 .........................|....................................... | 58 | | 59 "Active": | | 60 .........................|....................................... | 61 . | . | 62 . /-----------------^-------------------\ . | 63 . | | . | 64 . v v "Humongous": . | 65 . Regular ---\-----\ ..................O................ . | 66 . | ^ | | . | . . | 67 . | | | | . *---------\ . . | 68 . v | | | . v v . . | 69 . Pinned Cset | . HStart <--> H/Start H/Cont . . | 70 . ^ / | | . Pinned v | . . | 71 . | / | | . *<--------/ . . | 72 . | v | | . | . . | 73 . CsetPinned | | ..................O................ . | 74 . | | | . | 75 . \-----\---v-------------------/ . | 76 . | . | 77 .........................|....................................... | 78 | | 79 "Trash": | | 80 .........................|....................................... | 81 . | . | 82 . v . | 83 . Trash ---------------------------------------/ 84 . . 85 . . 86 ................................................................. 87 88 Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed} 89 to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous. 90 91 Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle, 92 and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows 93 quick reclamation without actual cleaning up. 94 95 Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata. 96 Can be done asynchronously and in bulk. 97 98 Note how internal transitions disallow logic bugs: 99 a) No region can go Empty, unless properly reclaimed/recycled; 100 b) No region can go Uncommitted, unless reclaimed/recycled first; 101 c) Only Regular regions can go to CSet; 102 d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned; 103 e) Pinned cannot go CSet, thus it never moves; 104 f) Humongous cannot be used for regular allocations; 105 g) Humongous cannot go CSet, thus it never moves; 106 h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should 107 follow associated humongous starts, not pinnable/movable by themselves); 108 i) Empty cannot go Trash, avoiding useless work; 109 j) ... 110 */ 111 112 enum RegionState { 113 _empty_uncommitted, // region is empty and has memory uncommitted 114 _empty_committed, // region is empty and has memory committed 115 _regular, // region is for regular allocations 116 _humongous_start, // region is the humongous start 117 _humongous_cont, // region is the humongous continuation 118 _pinned_humongous_start, // region is both humongous start and pinned 119 _cset, // region is in collection set 120 _pinned, // region is pinned 121 _pinned_cset, // region is pinned and in cset (evac failure path) 122 _trash, // region contains only trash 123 _REGION_STATES_NUM // last 124 }; 125 126 static const char* region_state_to_string(RegionState s) { 127 switch (s) { 128 case _empty_uncommitted: return "Empty Uncommitted"; 129 case _empty_committed: return "Empty Committed"; 130 case _regular: return "Regular"; 131 case _humongous_start: return "Humongous Start"; 132 case _humongous_cont: return "Humongous Continuation"; 133 case _pinned_humongous_start: return "Humongous Start, Pinned"; 134 case _cset: return "Collection Set"; 135 case _pinned: return "Pinned"; 136 case _pinned_cset: return "Collection Set, Pinned"; 137 case _trash: return "Trash"; 138 default: 139 ShouldNotReachHere(); 140 return ""; 141 } 142 } 143 144 // This method protects from accidental changes in enum order: 145 int region_state_to_ordinal(RegionState s) const { 146 switch (s) { 147 case _empty_uncommitted: return 0; 148 case _empty_committed: return 1; 149 case _regular: return 2; 150 case _humongous_start: return 3; 151 case _humongous_cont: return 4; 152 case _cset: return 5; 153 case _pinned: return 6; 154 case _trash: return 7; 155 case _pinned_cset: return 8; 156 case _pinned_humongous_start: return 9; 157 default: 158 ShouldNotReachHere(); 159 return -1; 160 } 161 } 162 163 void report_illegal_transition(const char* method); 164 165 public: 166 static const int region_states_num() { 167 return _REGION_STATES_NUM; 168 } 169 170 // Allowed transitions from the outside code: 171 void make_regular_allocation(ShenandoahRegionAffiliation affiliation); 172 void make_young_maybe(); 173 void make_regular_bypass(); 174 void make_humongous_start(); 175 void make_humongous_cont(); 176 void make_humongous_start_bypass(ShenandoahRegionAffiliation affiliation); 177 void make_humongous_cont_bypass(ShenandoahRegionAffiliation affiliation); 178 void make_pinned(); 179 void make_unpinned(); 180 void make_cset(); 181 void make_trash(); 182 void make_trash_immediate(); 183 void make_empty(); 184 void make_uncommitted(); 185 void make_committed_bypass(); 186 187 // Individual states: 188 bool is_empty_uncommitted() const { return _state == _empty_uncommitted; } 189 bool is_empty_committed() const { return _state == _empty_committed; } 190 bool is_regular() const { return _state == _regular; } 191 bool is_humongous_continuation() const { return _state == _humongous_cont; } 192 193 // Participation in logical groups: 194 bool is_empty() const { return is_empty_committed() || is_empty_uncommitted(); } 195 bool is_active() const { return !is_empty() && !is_trash(); } 196 bool is_trash() const { return _state == _trash; } 197 bool is_humongous_start() const { return _state == _humongous_start || _state == _pinned_humongous_start; } 198 bool is_humongous() const { return is_humongous_start() || is_humongous_continuation(); } 199 bool is_committed() const { return !is_empty_uncommitted(); } 200 bool is_cset() const { return _state == _cset || _state == _pinned_cset; } 201 bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; } 202 inline bool is_young() const; 203 inline bool is_old() const; 204 205 // Macro-properties: 206 bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; } 207 bool is_stw_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); } 208 209 RegionState state() const { return _state; } 210 int state_ordinal() const { return region_state_to_ordinal(_state); } 211 212 void record_pin(); 213 void record_unpin(); 214 size_t pin_count() const; 215 216 void clear_young_lab_flags(); 217 void set_young_lab_flag(); 218 bool has_young_lab_flag(); 219 220 private: 221 static size_t RegionCount; 222 static size_t RegionSizeBytes; 223 static size_t RegionSizeWords; 224 static size_t RegionSizeBytesShift; 225 static size_t RegionSizeWordsShift; 226 static size_t RegionSizeBytesMask; 227 static size_t RegionSizeWordsMask; 228 static size_t HumongousThresholdBytes; 229 static size_t HumongousThresholdWords; 230 static size_t MaxTLABSizeBytes; 231 static size_t MaxTLABSizeWords; 232 233 // Never updated fields 234 size_t const _index; 235 HeapWord* const _bottom; 236 HeapWord* const _end; 237 238 // Rarely updated fields 239 HeapWord* _new_top; 240 double _empty_time; 241 242 // Seldom updated fields 243 RegionState _state; 244 HeapWord* _coalesce_and_fill_boundary; // for old regions not selected as collection set candidates. 245 246 // Frequently updated fields 247 HeapWord* _top; 248 249 size_t _tlab_allocs; 250 size_t _gclab_allocs; 251 size_t _plab_allocs; 252 253 bool _has_young_lab; 254 255 volatile size_t _live_data; 256 volatile size_t _critical_pins; 257 258 HeapWord* volatile _update_watermark; 259 260 uint _age; 261 262 public: 263 ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed); 264 265 static const size_t MIN_NUM_REGIONS = 10; 266 267 // Return adjusted max heap size 268 static size_t setup_sizes(size_t max_heap_size); 269 270 double empty_time() { 271 return _empty_time; 272 } 273 274 inline static size_t required_regions(size_t bytes) { 275 return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift(); 276 } 277 278 inline static size_t region_count() { 279 return ShenandoahHeapRegion::RegionCount; 280 } 281 282 inline static size_t region_size_bytes() { 283 return ShenandoahHeapRegion::RegionSizeBytes; 284 } 285 286 inline static size_t region_size_words() { 287 return ShenandoahHeapRegion::RegionSizeWords; 288 } 289 290 inline static size_t region_size_bytes_shift() { 291 return ShenandoahHeapRegion::RegionSizeBytesShift; 292 } 293 294 inline static size_t region_size_words_shift() { 295 return ShenandoahHeapRegion::RegionSizeWordsShift; 296 } 297 298 inline static size_t region_size_bytes_mask() { 299 return ShenandoahHeapRegion::RegionSizeBytesMask; 300 } 301 302 inline static size_t region_size_words_mask() { 303 return ShenandoahHeapRegion::RegionSizeWordsMask; 304 } 305 306 // Convert to jint with sanity checking 307 inline static jint region_size_bytes_jint() { 308 assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity"); 309 return (jint)ShenandoahHeapRegion::RegionSizeBytes; 310 } 311 312 // Convert to jint with sanity checking 313 inline static jint region_size_words_jint() { 314 assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity"); 315 return (jint)ShenandoahHeapRegion::RegionSizeWords; 316 } 317 318 // Convert to jint with sanity checking 319 inline static jint region_size_bytes_shift_jint() { 320 assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity"); 321 return (jint)ShenandoahHeapRegion::RegionSizeBytesShift; 322 } 323 324 // Convert to jint with sanity checking 325 inline static jint region_size_words_shift_jint() { 326 assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity"); 327 return (jint)ShenandoahHeapRegion::RegionSizeWordsShift; 328 } 329 330 inline static size_t humongous_threshold_bytes() { 331 return ShenandoahHeapRegion::HumongousThresholdBytes; 332 } 333 334 inline static size_t humongous_threshold_words() { 335 return ShenandoahHeapRegion::HumongousThresholdWords; 336 } 337 338 inline static size_t max_tlab_size_bytes() { 339 return ShenandoahHeapRegion::MaxTLABSizeBytes; 340 } 341 342 inline static size_t max_tlab_size_words() { 343 return ShenandoahHeapRegion::MaxTLABSizeWords; 344 } 345 346 inline size_t index() const { 347 return _index; 348 } 349 350 // Allocation (return nullptr if full) 351 inline HeapWord* allocate_aligned(size_t word_size, ShenandoahAllocRequest &req, size_t alignment_in_words); 352 353 // Allocation (return nullptr if full) 354 inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest req); 355 356 inline void clear_live_data(); 357 void set_live_data(size_t s); 358 359 // Increase live data for newly allocated region 360 inline void increase_live_data_alloc_words(size_t s); 361 362 // Increase live data for region scanned with GC 363 inline void increase_live_data_gc_words(size_t s); 364 365 inline bool has_live() const; 366 inline size_t get_live_data_bytes() const; 367 inline size_t get_live_data_words() const; 368 369 inline size_t garbage() const; 370 371 void print_on(outputStream* st) const; 372 373 void recycle(); 374 375 inline void begin_preemptible_coalesce_and_fill() { 376 _coalesce_and_fill_boundary = _bottom; 377 } 378 379 inline void end_preemptible_coalesce_and_fill() { 380 _coalesce_and_fill_boundary = _end; 381 } 382 383 inline void suspend_coalesce_and_fill(HeapWord* next_focus) { 384 _coalesce_and_fill_boundary = next_focus; 385 } 386 387 inline HeapWord* resume_coalesce_and_fill() { 388 return _coalesce_and_fill_boundary; 389 } 390 391 // Coalesce contiguous spans of garbage objects by filling header and reregistering start locations with remembered set. 392 // This is used by old-gen GC following concurrent marking to make old-gen HeapRegions parseable. Return true iff 393 // region is completely coalesced and filled. Returns false if cancelled before task is complete. 394 bool oop_fill_and_coalesce(); 395 396 // Like oop_fill_and_coalesce(), but without honoring cancellation requests. 397 bool oop_fill_and_coalesce_wo_cancel(); 398 399 // During global collections, this service iterates through an old-gen heap region that is not part of collection 400 // set to fill and register ranges of dead memory. Note that live objects were previously registered. Some dead objects 401 // that are subsumed into coalesced ranges of dead memory need to be "unregistered". 402 void global_oop_iterate_and_fill_dead(OopIterateClosure* cl); 403 void oop_iterate_humongous(OopIterateClosure* cl); 404 void oop_iterate_humongous(OopIterateClosure* cl, HeapWord* start, size_t words); 405 406 // Invoke closure on every reference contained within the humongous object that spans this humongous 407 // region if the reference is contained within a DIRTY card and the reference is no more than words following 408 // start within the humongous object. 409 void oop_iterate_humongous_slice(OopIterateClosure* cl, bool dirty_only, HeapWord* start, size_t words, bool write_table); 410 411 HeapWord* block_start(const void* p) const; 412 size_t block_size(const HeapWord* p) const; 413 bool block_is_obj(const HeapWord* p) const { return p < top(); } 414 415 // Find humongous start region that this region belongs to 416 ShenandoahHeapRegion* humongous_start_region() const; 417 418 HeapWord* top() const { return _top; } 419 void set_top(HeapWord* v) { _top = v; } 420 421 HeapWord* new_top() const { return _new_top; } 422 void set_new_top(HeapWord* v) { _new_top = v; } 423 424 HeapWord* bottom() const { return _bottom; } 425 HeapWord* end() const { return _end; } 426 427 size_t capacity() const { return byte_size(bottom(), end()); } 428 size_t used() const { return byte_size(bottom(), top()); } 429 size_t free() const { return byte_size(top(), end()); } 430 431 // Does this region contain this address? 432 bool contains(HeapWord* p) const { 433 return (bottom() <= p) && (p < top()); 434 } 435 436 inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t); 437 void reset_alloc_metadata(); 438 size_t get_shared_allocs() const; 439 size_t get_tlab_allocs() const; 440 size_t get_gclab_allocs() const; 441 size_t get_plab_allocs() const; 442 443 inline HeapWord* get_update_watermark() const; 444 inline void set_update_watermark(HeapWord* w); 445 inline void set_update_watermark_at_safepoint(HeapWord* w); 446 447 inline ShenandoahRegionAffiliation affiliation() const; 448 449 void set_affiliation(ShenandoahRegionAffiliation new_affiliation); 450 451 uint age() { return _age; } 452 void increment_age() { _age++; } 453 void decrement_age() { if (_age-- == 0) { _age = 0; } } 454 void reset_age() { _age = 0; } 455 456 // Sets all remembered set cards to dirty. Returns the number of regions spanned by the associated humongous object. 457 size_t promote_humongous(); 458 459 private: 460 void do_commit(); 461 void do_uncommit(); 462 463 // This is an old-region that was not part of the collection set during a GLOBAL collection. We coalesce the dead 464 // objects, but do not need to register the live objects as they are already registered. 465 void global_oop_iterate_objects_and_fill_dead(OopIterateClosure* cl); 466 467 inline void internal_increase_live_data(size_t s); 468 469 void set_state(RegionState to); 470 }; 471 472 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP