1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_SPACE_HPP 26 #define SHARE_GC_SHARED_SPACE_HPP 27 28 #include "gc/shared/blockOffsetTable.hpp" 29 #include "gc/shared/cardTable.hpp" 30 #include "gc/shared/workerThread.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/iterator.hpp" 33 #include "memory/memRegion.hpp" 34 #include "oops/markWord.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "utilities/align.hpp" 37 #include "utilities/macros.hpp" 38 #if INCLUDE_SERIALGC 39 #include "gc/serial/serialBlockOffsetTable.hpp" 40 #endif 41 42 // A space is an abstraction for the "storage units" backing 43 // up the generation abstraction. It includes specific 44 // implementations for keeping track of free and used space, 45 // for iterating over objects and free blocks, etc. 46 47 // Forward decls. 48 class Space; 49 class ContiguousSpace; 50 #if INCLUDE_SERIALGC 51 class BlockOffsetArray; 52 class BlockOffsetArrayContigSpace; 53 class BlockOffsetTable; 54 #endif 55 class Generation; 56 class CompactibleSpace; 57 class CardTableRS; 58 class DirtyCardToOopClosure; 59 class SlidingForwarding; 60 class FilteringClosure; 61 62 // A Space describes a heap area. Class Space is an abstract 63 // base class. 64 // 65 // Space supports allocation, size computation and GC support is provided. 66 // 67 // Invariant: bottom() and end() are on page_size boundaries and 68 // bottom() <= top() <= end() 69 // top() is inclusive and end() is exclusive. 70 71 class Space: public CHeapObj<mtGC> { 72 friend class VMStructs; 73 protected: 74 HeapWord* _bottom; 75 HeapWord* _end; 76 77 // Used in support of save_marks() 78 HeapWord* _saved_mark_word; 79 80 Space(): 81 _bottom(nullptr), _end(nullptr) { } 82 83 public: 84 // Accessors 85 HeapWord* bottom() const { return _bottom; } 86 HeapWord* end() const { return _end; } 87 virtual void set_bottom(HeapWord* value) { _bottom = value; } 88 virtual void set_end(HeapWord* value) { _end = value; } 89 90 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } 91 92 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } 93 94 // Returns true if this object has been allocated since a 95 // generation's "save_marks" call. 96 virtual bool obj_allocated_since_save_marks(const oop obj) const { 97 return cast_from_oop<HeapWord*>(obj) >= saved_mark_word(); 98 } 99 100 // Returns a subregion of the space containing only the allocated objects in 101 // the space. 102 virtual MemRegion used_region() const = 0; 103 104 // Returns a region that is guaranteed to contain (at least) all objects 105 // allocated at the time of the last call to "save_marks". If the space 106 // initializes its DirtyCardToOopClosure's specifying the "contig" option 107 // (that is, if the space is contiguous), then this region must contain only 108 // such objects: the memregion will be from the bottom of the region to the 109 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of 110 // the space must distinguish between objects in the region allocated before 111 // and after the call to save marks. 112 MemRegion used_region_at_save_marks() const { 113 return MemRegion(bottom(), saved_mark_word()); 114 } 115 116 // Initialization. 117 // "initialize" should be called once on a space, before it is used for 118 // any purpose. The "mr" arguments gives the bounds of the space, and 119 // the "clear_space" argument should be true unless the memory in "mr" is 120 // known to be zeroed. 121 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 122 123 // The "clear" method must be called on a region that may have 124 // had allocation performed in it, but is now to be considered empty. 125 virtual void clear(bool mangle_space); 126 127 // For detecting GC bugs. Should only be called at GC boundaries, since 128 // some unused space may be used as scratch space during GC's. 129 // We also call this when expanding a space to satisfy an allocation 130 // request. See bug #4668531 131 virtual void mangle_unused_area() = 0; 132 virtual void mangle_unused_area_complete() = 0; 133 134 // Testers 135 bool is_empty() const { return used() == 0; } 136 137 // Returns true iff the given the space contains the 138 // given address as part of an allocated object. For 139 // certain kinds of spaces, this might be a potentially 140 // expensive operation. To prevent performance problems 141 // on account of its inadvertent use in product jvm's, 142 // we restrict its use to assertion checks only. 143 bool is_in(const void* p) const { 144 return used_region().contains(p); 145 } 146 bool is_in(oop obj) const { 147 return is_in((void*)obj); 148 } 149 150 // Returns true iff the given reserved memory of the space contains the 151 // given address. 152 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } 153 154 // Returns true iff the given block is not allocated. 155 virtual bool is_free_block(const HeapWord* p) const = 0; 156 157 // Test whether p is double-aligned 158 static bool is_aligned(void* p) { 159 return ::is_aligned(p, sizeof(double)); 160 } 161 162 // Size computations. Sizes are in bytes. 163 size_t capacity() const { return byte_size(bottom(), end()); } 164 virtual size_t used() const = 0; 165 virtual size_t free() const = 0; 166 167 // Iterate over all the ref-containing fields of all objects in the 168 // space, calling "cl.do_oop" on each. Fields in objects allocated by 169 // applications of the closure are not included in the iteration. 170 virtual void oop_iterate(OopIterateClosure* cl); 171 172 // Iterate over all objects in the space, calling "cl.do_object" on 173 // each. Objects allocated by applications of the closure are not 174 // included in the iteration. 175 virtual void object_iterate(ObjectClosure* blk) = 0; 176 177 // If "p" is in the space, returns the address of the start of the 178 // "block" that contains "p". We say "block" instead of "object" since 179 // some heaps may not pack objects densely; a chunk may either be an 180 // object or a non-object. If "p" is not in the space, return null. 181 virtual HeapWord* block_start_const(const void* p) const = 0; 182 183 // The non-const version may have benevolent side effects on the data 184 // structure supporting these calls, possibly speeding up future calls. 185 // The default implementation, however, is simply to call the const 186 // version. 187 virtual HeapWord* block_start(const void* p); 188 189 // Requires "addr" to be the start of a chunk, and returns its size. 190 // "addr + size" is required to be the start of a new chunk, or the end 191 // of the active area of the heap. 192 virtual size_t block_size(const HeapWord* addr) const = 0; 193 194 // Requires "addr" to be the start of a block, and returns "TRUE" iff 195 // the block is an object. 196 virtual bool block_is_obj(const HeapWord* addr) const = 0; 197 198 // Requires "addr" to be the start of a block, and returns "TRUE" iff 199 // the block is an object and the object is alive. 200 virtual bool obj_is_alive(const HeapWord* addr) const; 201 202 // Allocation (return null if full). Assumes the caller has established 203 // mutually exclusive access to the space. 204 virtual HeapWord* allocate(size_t word_size) = 0; 205 206 // Allocation (return null if full). Enforces mutual exclusion internally. 207 virtual HeapWord* par_allocate(size_t word_size) = 0; 208 209 #if INCLUDE_SERIALGC 210 // Mark-sweep-compact support: all spaces can update pointers to objects 211 // moving as a part of compaction. 212 virtual void adjust_pointers() = 0; 213 #endif 214 215 virtual void print() const; 216 virtual void print_on(outputStream* st) const; 217 virtual void print_short() const; 218 virtual void print_short_on(outputStream* st) const; 219 220 221 // IF "this" is a ContiguousSpace, return it, else return null. 222 virtual ContiguousSpace* toContiguousSpace() { 223 return nullptr; 224 } 225 226 // Debugging 227 virtual void verify() const = 0; 228 }; 229 230 // A dirty card to oop closure for contiguous spaces (ContiguousSpace and 231 // sub-classes). It knows how to filter out objects that are outside of the 232 // _boundary. 233 // (Note that because of the imprecise nature of the write barrier, this may 234 // iterate over oops beyond the region.) 235 // 236 // Assumptions: 237 // 1. That the actual top of any area in a memory region 238 // contained by the space is bounded by the end of the contiguous 239 // region of the space. 240 // 2. That the space is really made up of objects and not just 241 // blocks. 242 243 class DirtyCardToOopClosure: public MemRegionClosure { 244 protected: 245 OopIterateClosure* _cl; 246 Space* _sp; 247 HeapWord* _min_done; // Need a downwards traversal to compensate 248 // imprecise write barrier; this is the 249 // lowest location already done (or, 250 // alternatively, the lowest address that 251 // shouldn't be done again. null means infinity.) 252 NOT_PRODUCT(HeapWord* _last_bottom;) 253 254 // Get the actual top of the area on which the closure will 255 // operate, given where the top is assumed to be (the end of the 256 // memory region passed to do_MemRegion) and where the object 257 // at the top is assumed to start. For example, an object may 258 // start at the top but actually extend past the assumed top, 259 // in which case the top becomes the end of the object. 260 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 261 262 // Walk the given memory region from bottom to (actual) top 263 // looking for objects and applying the oop closure (_cl) to 264 // them. The base implementation of this treats the area as 265 // blocks, where a block may or may not be an object. Sub- 266 // classes should override this to provide more accurate 267 // or possibly more efficient walking. 268 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); 269 270 // Walk the given memory region, from bottom to top, applying 271 // the given oop closure to (possibly) all objects found. The 272 // given oop closure may or may not be the same as the oop 273 // closure with which this closure was created, as it may 274 // be a filtering closure which makes use of the _boundary. 275 // We offer two signatures, so the FilteringClosure static type is 276 // apparent. 277 void walk_mem_region_with_cl(MemRegion mr, 278 HeapWord* bottom, HeapWord* top, 279 OopIterateClosure* cl); 280 public: 281 DirtyCardToOopClosure(Space* sp, OopIterateClosure* cl) : 282 _cl(cl), _sp(sp), _min_done(nullptr) { 283 NOT_PRODUCT(_last_bottom = nullptr); 284 } 285 286 void do_MemRegion(MemRegion mr) override; 287 }; 288 289 // A structure to represent a point at which objects are being copied 290 // during compaction. 291 class CompactPoint : public StackObj { 292 public: 293 Generation* gen; 294 CompactibleSpace* space; 295 296 CompactPoint(Generation* g = nullptr) : 297 gen(g), space(nullptr) {} 298 }; 299 300 // A space that supports compaction operations. This is usually, but not 301 // necessarily, a space that is normally contiguous. But, for example, a 302 // free-list-based space whose normal collection is a mark-sweep without 303 // compaction could still support compaction in full GC's. 304 class CompactibleSpace: public Space { 305 friend class VMStructs; 306 private: 307 HeapWord* _compaction_top; 308 CompactibleSpace* _next_compaction_space; 309 310 template <class SpaceType> 311 static inline void verify_up_to_first_dead(SpaceType* space) NOT_DEBUG_RETURN; 312 313 template <class SpaceType> 314 static inline void clear_empty_region(SpaceType* space); 315 316 public: 317 CompactibleSpace() : 318 _compaction_top(nullptr), _next_compaction_space(nullptr) {} 319 320 void initialize(MemRegion mr, bool clear_space, bool mangle_space) override; 321 void clear(bool mangle_space) override; 322 323 // Used temporarily during a compaction phase to hold the value 324 // top should have when compaction is complete. 325 HeapWord* compaction_top() const { return _compaction_top; } 326 327 void set_compaction_top(HeapWord* value) { 328 assert(value == nullptr || (value >= bottom() && value <= end()), 329 "should point inside space"); 330 _compaction_top = value; 331 } 332 333 // Perform operations on the space needed after a compaction 334 // has been performed. 335 virtual void reset_after_compaction() = 0; 336 337 // Returns the next space (in the current generation) to be compacted in 338 // the global compaction order. Also is used to select the next 339 // space into which to compact. 340 341 virtual CompactibleSpace* next_compaction_space() const { 342 return _next_compaction_space; 343 } 344 345 void set_next_compaction_space(CompactibleSpace* csp) { 346 _next_compaction_space = csp; 347 } 348 349 #if INCLUDE_SERIALGC 350 // MarkSweep support phase2 351 352 // Start the process of compaction of the current space: compute 353 // post-compaction addresses, and insert forwarding pointers. The fields 354 // "cp->gen" and "cp->compaction_space" are the generation and space into 355 // which we are currently compacting. This call updates "cp" as necessary, 356 // and leaves the "compaction_top" of the final value of 357 // "cp->compaction_space" up-to-date. Offset tables may be updated in 358 // this phase as if the final copy had occurred; if so, "cp->threshold" 359 // indicates when the next such action should be taken. 360 virtual void prepare_for_compaction(CompactPoint* cp) = 0; 361 // MarkSweep support phase3 362 void adjust_pointers() override; 363 // MarkSweep support phase4 364 virtual void compact(); 365 #endif // INCLUDE_SERIALGC 366 367 // The maximum percentage of objects that can be dead in the compacted 368 // live part of a compacted space ("deadwood" support.) 369 virtual size_t allowed_dead_ratio() const { return 0; }; 370 371 // Some contiguous spaces may maintain some data structures that should 372 // be updated whenever an allocation crosses a boundary. This function 373 // initializes these data structures for further updates. 374 virtual void initialize_threshold() { } 375 376 // "q" is an object of the given "size" that should be forwarded; 377 // "cp" names the generation ("gen") and containing "this" (which must 378 // also equal "cp->space"). "compact_top" is where in "this" the 379 // next object should be forwarded to. If there is room in "this" for 380 // the object, insert an appropriate forwarding pointer in "q". 381 // If not, go to the next compaction space (there must 382 // be one, since compaction must succeed -- we go to the first space of 383 // the previous generation if necessary, updating "cp"), reset compact_top 384 // and then forward. In either case, returns the new value of "compact_top". 385 // Invokes the "alloc_block" function of the then-current compaction 386 // space. 387 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, 388 HeapWord* compact_top, SlidingForwarding* const forwarding); 389 protected: 390 // Used during compaction. 391 HeapWord* _first_dead; 392 HeapWord* _end_of_live; 393 394 // This the function to invoke when an allocation of an object covering 395 // "start" to "end" occurs to update other internal data structures. 396 virtual void alloc_block(HeapWord* start, HeapWord* the_end) { } 397 }; 398 399 class GenSpaceMangler; 400 401 // A space in which the free area is contiguous. It therefore supports 402 // faster allocation, and compaction. 403 class ContiguousSpace: public CompactibleSpace { 404 friend class VMStructs; 405 406 protected: 407 HeapWord* _top; 408 // A helper for mangling the unused area of the space in debug builds. 409 GenSpaceMangler* _mangler; 410 411 GenSpaceMangler* mangler() { return _mangler; } 412 413 // Allocation helpers (return null if full). 414 inline HeapWord* allocate_impl(size_t word_size); 415 inline HeapWord* par_allocate_impl(size_t word_size); 416 417 public: 418 ContiguousSpace(); 419 ~ContiguousSpace(); 420 421 void initialize(MemRegion mr, bool clear_space, bool mangle_space) override; 422 void clear(bool mangle_space) override; 423 424 // Accessors 425 HeapWord* top() const { return _top; } 426 void set_top(HeapWord* value) { _top = value; } 427 428 void set_saved_mark() { _saved_mark_word = top(); } 429 430 bool saved_mark_at_top() const { return saved_mark_word() == top(); } 431 432 // In debug mode mangle (write it with a particular bit 433 // pattern) the unused part of a space. 434 435 // Used to save the address in a space for later use during mangling. 436 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; 437 // Used to save the space's current top for later use during mangling. 438 void set_top_for_allocations() PRODUCT_RETURN; 439 440 // Mangle regions in the space from the current top up to the 441 // previously mangled part of the space. 442 void mangle_unused_area() override PRODUCT_RETURN; 443 // Mangle [top, end) 444 void mangle_unused_area_complete() override PRODUCT_RETURN; 445 446 // Do some sparse checking on the area that should have been mangled. 447 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; 448 // Check the complete area that should have been mangled. 449 // This code may be null depending on the macro DEBUG_MANGLING. 450 void check_mangled_unused_area_complete() PRODUCT_RETURN; 451 452 // Size computations: sizes in bytes. 453 size_t used() const override { return byte_size(bottom(), top()); } 454 size_t free() const override { return byte_size(top(), end()); } 455 456 bool is_free_block(const HeapWord* p) const override; 457 458 // In a contiguous space we have a more obvious bound on what parts 459 // contain objects. 460 MemRegion used_region() const override { return MemRegion(bottom(), top()); } 461 462 // Allocation (return null if full) 463 HeapWord* allocate(size_t word_size) override; 464 HeapWord* par_allocate(size_t word_size) override; 465 466 // Iteration 467 void oop_iterate(OopIterateClosure* cl) override; 468 void object_iterate(ObjectClosure* blk) override; 469 470 // Compaction support 471 void reset_after_compaction() override { 472 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); 473 set_top(compaction_top()); 474 } 475 476 // Apply "blk->do_oop" to the addresses of all reference fields in objects 477 // starting with the _saved_mark_word, which was noted during a generation's 478 // save_marks and is required to denote the head of an object. 479 // Fields in objects allocated by applications of the closure 480 // *are* included in the iteration. 481 // Updates _saved_mark_word to point to just after the last object 482 // iterated over. 483 template <typename OopClosureType> 484 void oop_since_save_marks_iterate(OopClosureType* blk); 485 486 // Same as object_iterate, but starting from "mark", which is required 487 // to denote the start of an object. Objects allocated by 488 // applications of the closure *are* included in the iteration. 489 virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk); 490 491 // Very inefficient implementation. 492 HeapWord* block_start_const(const void* p) const override; 493 size_t block_size(const HeapWord* p) const override; 494 // If a block is in the allocated area, it is an object. 495 bool block_is_obj(const HeapWord* p) const override { return p < top(); } 496 497 // Addresses for inlined allocation 498 HeapWord** top_addr() { return &_top; } 499 HeapWord** end_addr() { return &_end; } 500 501 #if INCLUDE_SERIALGC 502 // Overrides for more efficient compaction support. 503 void prepare_for_compaction(CompactPoint* cp) override; 504 #endif 505 506 void print_on(outputStream* st) const override; 507 508 // Checked dynamic downcasts. 509 ContiguousSpace* toContiguousSpace() override { 510 return this; 511 } 512 513 // Debugging 514 void verify() const override; 515 }; 516 517 #if INCLUDE_SERIALGC 518 519 // Class TenuredSpace is used by TenuredGeneration; it supports an efficient 520 // "block_start" operation via a BlockOffsetArray (whose BlockOffsetSharedArray 521 // may be shared with other spaces.) 522 523 class TenuredSpace: public ContiguousSpace { 524 friend class VMStructs; 525 protected: 526 BlockOffsetArrayContigSpace _offsets; 527 Mutex _par_alloc_lock; 528 529 // Mark sweep support 530 size_t allowed_dead_ratio() const override; 531 public: 532 // Constructor 533 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, 534 MemRegion mr); 535 536 void set_bottom(HeapWord* value) override; 537 void set_end(HeapWord* value) override; 538 539 void clear(bool mangle_space) override; 540 541 inline HeapWord* block_start_const(const void* p) const override; 542 543 // Add offset table update. 544 inline HeapWord* allocate(size_t word_size) override; 545 inline HeapWord* par_allocate(size_t word_size) override; 546 547 // MarkSweep support phase3 548 void initialize_threshold() override; 549 void alloc_block(HeapWord* start, HeapWord* end) override; 550 551 void print_on(outputStream* st) const override; 552 553 // Debugging 554 void verify() const override; 555 }; 556 #endif //INCLUDE_SERIALGC 557 558 #endif // SHARE_GC_SHARED_SPACE_HPP