1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_SPACE_HPP 26 #define SHARE_GC_SHARED_SPACE_HPP 27 28 #include "gc/shared/blockOffsetTable.hpp" 29 #include "gc/shared/cardTable.hpp" 30 #include "gc/shared/workgroup.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/iterator.hpp" 33 #include "memory/memRegion.hpp" 34 #include "oops/markWord.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "utilities/align.hpp" 37 #include "utilities/macros.hpp" 38 39 // A space is an abstraction for the "storage units" backing 40 // up the generation abstraction. It includes specific 41 // implementations for keeping track of free and used space, 42 // for iterating over objects and free blocks, etc. 43 44 // Forward decls. 45 class Space; 46 class BlockOffsetArray; 47 class BlockOffsetArrayContigSpace; 48 class Generation; 49 class CompactibleSpace; 50 class BlockOffsetTable; 51 class CardTableRS; 52 class DirtyCardToOopClosure; 53 class SlidingForwarding; 54 55 // A Space describes a heap area. Class Space is an abstract 56 // base class. 57 // 58 // Space supports allocation, size computation and GC support is provided. 59 // 60 // Invariant: bottom() and end() are on page_size boundaries and 61 // bottom() <= top() <= end() 62 // top() is inclusive and end() is exclusive. 63 64 class Space: public CHeapObj<mtGC> { 65 friend class VMStructs; 66 protected: 67 HeapWord* _bottom; 68 HeapWord* _end; 69 70 // Used in support of save_marks() 71 HeapWord* _saved_mark_word; 72 73 Space(): 74 _bottom(NULL), _end(NULL) { } 75 76 public: 77 // Accessors 78 HeapWord* bottom() const { return _bottom; } 79 HeapWord* end() const { return _end; } 80 virtual void set_bottom(HeapWord* value) { _bottom = value; } 81 virtual void set_end(HeapWord* value) { _end = value; } 82 83 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } 84 85 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } 86 87 // Returns true if this object has been allocated since a 88 // generation's "save_marks" call. 89 virtual bool obj_allocated_since_save_marks(const oop obj) const { 90 return cast_from_oop<HeapWord*>(obj) >= saved_mark_word(); 91 } 92 93 // Returns a subregion of the space containing only the allocated objects in 94 // the space. 95 virtual MemRegion used_region() const = 0; 96 97 // Returns a region that is guaranteed to contain (at least) all objects 98 // allocated at the time of the last call to "save_marks". If the space 99 // initializes its DirtyCardToOopClosure's specifying the "contig" option 100 // (that is, if the space is contiguous), then this region must contain only 101 // such objects: the memregion will be from the bottom of the region to the 102 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of 103 // the space must distinguish between objects in the region allocated before 104 // and after the call to save marks. 105 MemRegion used_region_at_save_marks() const { 106 return MemRegion(bottom(), saved_mark_word()); 107 } 108 109 // Initialization. 110 // "initialize" should be called once on a space, before it is used for 111 // any purpose. The "mr" arguments gives the bounds of the space, and 112 // the "clear_space" argument should be true unless the memory in "mr" is 113 // known to be zeroed. 114 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 115 116 // The "clear" method must be called on a region that may have 117 // had allocation performed in it, but is now to be considered empty. 118 virtual void clear(bool mangle_space); 119 120 // For detecting GC bugs. Should only be called at GC boundaries, since 121 // some unused space may be used as scratch space during GC's. 122 // We also call this when expanding a space to satisfy an allocation 123 // request. See bug #4668531 124 virtual void mangle_unused_area() = 0; 125 virtual void mangle_unused_area_complete() = 0; 126 127 // Testers 128 bool is_empty() const { return used() == 0; } 129 bool not_empty() const { return used() > 0; } 130 131 // Returns true iff the given the space contains the 132 // given address as part of an allocated object. For 133 // certain kinds of spaces, this might be a potentially 134 // expensive operation. To prevent performance problems 135 // on account of its inadvertent use in product jvm's, 136 // we restrict its use to assertion checks only. 137 bool is_in(const void* p) const { 138 return used_region().contains(p); 139 } 140 bool is_in(oop obj) const { 141 return is_in((void*)obj); 142 } 143 144 // Returns true iff the given reserved memory of the space contains the 145 // given address. 146 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } 147 148 // Returns true iff the given block is not allocated. 149 virtual bool is_free_block(const HeapWord* p) const = 0; 150 151 // Test whether p is double-aligned 152 static bool is_aligned(void* p) { 153 return ::is_aligned(p, sizeof(double)); 154 } 155 156 // Size computations. Sizes are in bytes. 157 size_t capacity() const { return byte_size(bottom(), end()); } 158 virtual size_t used() const = 0; 159 virtual size_t free() const = 0; 160 161 // Iterate over all the ref-containing fields of all objects in the 162 // space, calling "cl.do_oop" on each. Fields in objects allocated by 163 // applications of the closure are not included in the iteration. 164 virtual void oop_iterate(OopIterateClosure* cl); 165 166 // Iterate over all objects in the space, calling "cl.do_object" on 167 // each. Objects allocated by applications of the closure are not 168 // included in the iteration. 169 virtual void object_iterate(ObjectClosure* blk) = 0; 170 171 // Create and return a new dirty card to oop closure. Can be 172 // overridden to return the appropriate type of closure 173 // depending on the type of space in which the closure will 174 // operate. ResourceArea allocated. 175 virtual DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl, 176 CardTable::PrecisionStyle precision, 177 HeapWord* boundary); 178 179 // If "p" is in the space, returns the address of the start of the 180 // "block" that contains "p". We say "block" instead of "object" since 181 // some heaps may not pack objects densely; a chunk may either be an 182 // object or a non-object. If "p" is not in the space, return NULL. 183 virtual HeapWord* block_start_const(const void* p) const = 0; 184 185 // The non-const version may have benevolent side effects on the data 186 // structure supporting these calls, possibly speeding up future calls. 187 // The default implementation, however, is simply to call the const 188 // version. 189 virtual HeapWord* block_start(const void* p); 190 191 // Requires "addr" to be the start of a chunk, and returns its size. 192 // "addr + size" is required to be the start of a new chunk, or the end 193 // of the active area of the heap. 194 virtual size_t block_size(const HeapWord* addr) const = 0; 195 196 // Requires "addr" to be the start of a block, and returns "TRUE" iff 197 // the block is an object. 198 virtual bool block_is_obj(const HeapWord* addr) const = 0; 199 200 // Requires "addr" to be the start of a block, and returns "TRUE" iff 201 // the block is an object and the object is alive. 202 virtual bool obj_is_alive(const HeapWord* addr) const; 203 204 // Allocation (return NULL if full). Assumes the caller has established 205 // mutually exclusive access to the space. 206 virtual HeapWord* allocate(size_t word_size) = 0; 207 208 // Allocation (return NULL if full). Enforces mutual exclusion internally. 209 virtual HeapWord* par_allocate(size_t word_size) = 0; 210 211 #if INCLUDE_SERIALGC 212 // Mark-sweep-compact support: all spaces can update pointers to objects 213 // moving as a part of compaction. 214 virtual void adjust_pointers() = 0; 215 #endif 216 217 virtual void print() const; 218 virtual void print_on(outputStream* st) const; 219 virtual void print_short() const; 220 virtual void print_short_on(outputStream* st) const; 221 222 223 // IF "this" is a ContiguousSpace, return it, else return NULL. 224 virtual ContiguousSpace* toContiguousSpace() { 225 return NULL; 226 } 227 228 // Debugging 229 virtual void verify() const = 0; 230 }; 231 232 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an 233 // OopClosure to (the addresses of) all the ref-containing fields that could 234 // be modified by virtue of the given MemRegion being dirty. (Note that 235 // because of the imprecise nature of the write barrier, this may iterate 236 // over oops beyond the region.) 237 // This base type for dirty card to oop closures handles memory regions 238 // in non-contiguous spaces with no boundaries, and should be sub-classed 239 // to support other space types. See ContiguousDCTOC for a sub-class 240 // that works with ContiguousSpaces. 241 242 class DirtyCardToOopClosure: public MemRegionClosureRO { 243 protected: 244 OopIterateClosure* _cl; 245 Space* _sp; 246 CardTable::PrecisionStyle _precision; 247 HeapWord* _boundary; // If non-NULL, process only non-NULL oops 248 // pointing below boundary. 249 HeapWord* _min_done; // ObjHeadPreciseArray precision requires 250 // a downwards traversal; this is the 251 // lowest location already done (or, 252 // alternatively, the lowest address that 253 // shouldn't be done again. NULL means infinity.) 254 NOT_PRODUCT(HeapWord* _last_bottom;) 255 NOT_PRODUCT(HeapWord* _last_explicit_min_done;) 256 257 // Get the actual top of the area on which the closure will 258 // operate, given where the top is assumed to be (the end of the 259 // memory region passed to do_MemRegion) and where the object 260 // at the top is assumed to start. For example, an object may 261 // start at the top but actually extend past the assumed top, 262 // in which case the top becomes the end of the object. 263 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 264 265 // Walk the given memory region from bottom to (actual) top 266 // looking for objects and applying the oop closure (_cl) to 267 // them. The base implementation of this treats the area as 268 // blocks, where a block may or may not be an object. Sub- 269 // classes should override this to provide more accurate 270 // or possibly more efficient walking. 271 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); 272 273 public: 274 DirtyCardToOopClosure(Space* sp, OopIterateClosure* cl, 275 CardTable::PrecisionStyle precision, 276 HeapWord* boundary) : 277 _cl(cl), _sp(sp), _precision(precision), _boundary(boundary), 278 _min_done(NULL) { 279 NOT_PRODUCT(_last_bottom = NULL); 280 NOT_PRODUCT(_last_explicit_min_done = NULL); 281 } 282 283 void do_MemRegion(MemRegion mr); 284 285 void set_min_done(HeapWord* min_done) { 286 _min_done = min_done; 287 NOT_PRODUCT(_last_explicit_min_done = _min_done); 288 } 289 #ifndef PRODUCT 290 void set_last_bottom(HeapWord* last_bottom) { 291 _last_bottom = last_bottom; 292 } 293 #endif 294 }; 295 296 // A structure to represent a point at which objects are being copied 297 // during compaction. 298 class CompactPoint : public StackObj { 299 public: 300 Generation* gen; 301 CompactibleSpace* space; 302 HeapWord* threshold; 303 304 CompactPoint(Generation* g = NULL) : 305 gen(g), space(NULL), threshold(0) {} 306 }; 307 308 // A space that supports compaction operations. This is usually, but not 309 // necessarily, a space that is normally contiguous. But, for example, a 310 // free-list-based space whose normal collection is a mark-sweep without 311 // compaction could still support compaction in full GC's. 312 // 313 // The compaction operations are implemented by the 314 // scan_and_{adjust_pointers,compact,forward} function templates. 315 // The following are, non-virtual, auxiliary functions used by these function templates: 316 // - scan_limit() 317 // - scanned_block_is_obj() 318 // - scanned_block_size() 319 // - adjust_obj_size() 320 // - obj_size() 321 // These functions are to be used exclusively by the scan_and_* function templates, 322 // and must be defined for all (non-abstract) subclasses of CompactibleSpace. 323 // 324 // NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior 325 // in any of the auxiliary functions must also override the corresponding 326 // prepare_for_compaction/adjust_pointers/compact functions using them. 327 // If not, such changes will not be used or have no effect on the compaction operations. 328 // 329 // This translates to the following dependencies: 330 // Overrides/definitions of 331 // - scan_limit 332 // - scanned_block_is_obj 333 // - scanned_block_size 334 // require override/definition of prepare_for_compaction(). 335 // Similar dependencies exist between 336 // - adjust_obj_size and adjust_pointers() 337 // - obj_size and compact(). 338 // 339 // Additionally, this also means that changes to block_size() or block_is_obj() that 340 // should be effective during the compaction operations must provide a corresponding 341 // definition of scanned_block_size/scanned_block_is_obj respectively. 342 class CompactibleSpace: public Space { 343 friend class VMStructs; 344 private: 345 HeapWord* _compaction_top; 346 CompactibleSpace* _next_compaction_space; 347 348 // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support. 349 inline size_t adjust_obj_size(size_t size) const { 350 return size; 351 } 352 353 inline size_t obj_size(const HeapWord* addr) const; 354 355 template <class SpaceType> 356 static inline void verify_up_to_first_dead(SpaceType* space) NOT_DEBUG_RETURN; 357 358 template <class SpaceType> 359 static inline void clear_empty_region(SpaceType* space); 360 361 public: 362 CompactibleSpace() : 363 _compaction_top(NULL), _next_compaction_space(NULL) {} 364 365 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 366 virtual void clear(bool mangle_space); 367 368 // Used temporarily during a compaction phase to hold the value 369 // top should have when compaction is complete. 370 HeapWord* compaction_top() const { return _compaction_top; } 371 372 void set_compaction_top(HeapWord* value) { 373 assert(value == NULL || (value >= bottom() && value <= end()), 374 "should point inside space"); 375 _compaction_top = value; 376 } 377 378 // Perform operations on the space needed after a compaction 379 // has been performed. 380 virtual void reset_after_compaction() = 0; 381 382 // Returns the next space (in the current generation) to be compacted in 383 // the global compaction order. Also is used to select the next 384 // space into which to compact. 385 386 virtual CompactibleSpace* next_compaction_space() const { 387 return _next_compaction_space; 388 } 389 390 void set_next_compaction_space(CompactibleSpace* csp) { 391 _next_compaction_space = csp; 392 } 393 394 #if INCLUDE_SERIALGC 395 // MarkSweep support phase2 396 397 // Start the process of compaction of the current space: compute 398 // post-compaction addresses, and insert forwarding pointers. The fields 399 // "cp->gen" and "cp->compaction_space" are the generation and space into 400 // which we are currently compacting. This call updates "cp" as necessary, 401 // and leaves the "compaction_top" of the final value of 402 // "cp->compaction_space" up-to-date. Offset tables may be updated in 403 // this phase as if the final copy had occurred; if so, "cp->threshold" 404 // indicates when the next such action should be taken. 405 virtual void prepare_for_compaction(CompactPoint* cp) = 0; 406 // MarkSweep support phase3 407 virtual void adjust_pointers(); 408 // MarkSweep support phase4 409 virtual void compact(); 410 #endif // INCLUDE_SERIALGC 411 412 // The maximum percentage of objects that can be dead in the compacted 413 // live part of a compacted space ("deadwood" support.) 414 virtual size_t allowed_dead_ratio() const { return 0; }; 415 416 // Some contiguous spaces may maintain some data structures that should 417 // be updated whenever an allocation crosses a boundary. This function 418 // returns the first such boundary. 419 // (The default implementation returns the end of the space, so the 420 // boundary is never crossed.) 421 virtual HeapWord* initialize_threshold() { return end(); } 422 423 // "q" is an object of the given "size" that should be forwarded; 424 // "cp" names the generation ("gen") and containing "this" (which must 425 // also equal "cp->space"). "compact_top" is where in "this" the 426 // next object should be forwarded to. If there is room in "this" for 427 // the object, insert an appropriate forwarding pointer in "q". 428 // If not, go to the next compaction space (there must 429 // be one, since compaction must succeed -- we go to the first space of 430 // the previous generation if necessary, updating "cp"), reset compact_top 431 // and then forward. In either case, returns the new value of "compact_top". 432 // If the forwarding crosses "cp->threshold", invokes the "cross_threshold" 433 // function of the then-current compaction space, and updates "cp->threshold 434 // accordingly". 435 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, 436 HeapWord* compact_top, SlidingForwarding* const forwarding); 437 438 // Return a size with adjustments as required of the space. 439 virtual size_t adjust_object_size_v(size_t size) const { return size; } 440 441 void set_first_dead(HeapWord* value) { _first_dead = value; } 442 void set_end_of_live(HeapWord* value) { _end_of_live = value; } 443 444 protected: 445 // Used during compaction. 446 HeapWord* _first_dead; 447 HeapWord* _end_of_live; 448 449 // This the function is invoked when an allocation of an object covering 450 // "start" to "end occurs crosses the threshold; returns the next 451 // threshold. (The default implementation does nothing.) 452 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { 453 return end(); 454 } 455 456 // Below are template functions for scan_and_* algorithms (avoiding virtual calls). 457 // The space argument should be a subclass of CompactibleSpace, implementing 458 // scan_limit(), scanned_block_is_obj(), and scanned_block_size(), 459 // and possibly also overriding obj_size(), and adjust_obj_size(). 460 // These functions should avoid virtual calls whenever possible. 461 462 #if INCLUDE_SERIALGC 463 // Frequently calls adjust_obj_size(). 464 template <class SpaceType> 465 static inline void scan_and_adjust_pointers(SpaceType* space); 466 #endif 467 468 // Frequently calls obj_size(). 469 template <class SpaceType> 470 static inline void scan_and_compact(SpaceType* space); 471 472 // Frequently calls scanned_block_is_obj() and scanned_block_size(). 473 // Requires the scan_limit() function. 474 template <class SpaceType> 475 static inline void scan_and_forward(SpaceType* space, CompactPoint* cp); 476 }; 477 478 class GenSpaceMangler; 479 480 // A space in which the free area is contiguous. It therefore supports 481 // faster allocation, and compaction. 482 class ContiguousSpace: public CompactibleSpace { 483 friend class VMStructs; 484 // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class 485 template <typename SpaceType> 486 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); 487 488 private: 489 // Auxiliary functions for scan_and_forward support. 490 // See comments for CompactibleSpace for more information. 491 inline HeapWord* scan_limit() const { 492 return top(); 493 } 494 495 inline bool scanned_block_is_obj(const HeapWord* addr) const { 496 return true; // Always true, since scan_limit is top 497 } 498 499 inline size_t scanned_block_size(const HeapWord* addr) const; 500 501 protected: 502 HeapWord* _top; 503 // A helper for mangling the unused area of the space in debug builds. 504 GenSpaceMangler* _mangler; 505 506 GenSpaceMangler* mangler() { return _mangler; } 507 508 // Allocation helpers (return NULL if full). 509 inline HeapWord* allocate_impl(size_t word_size); 510 inline HeapWord* par_allocate_impl(size_t word_size); 511 512 public: 513 ContiguousSpace(); 514 ~ContiguousSpace(); 515 516 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); 517 virtual void clear(bool mangle_space); 518 519 // Accessors 520 HeapWord* top() const { return _top; } 521 void set_top(HeapWord* value) { _top = value; } 522 523 void set_saved_mark() { _saved_mark_word = top(); } 524 void reset_saved_mark() { _saved_mark_word = bottom(); } 525 526 bool saved_mark_at_top() const { return saved_mark_word() == top(); } 527 528 // In debug mode mangle (write it with a particular bit 529 // pattern) the unused part of a space. 530 531 // Used to save the an address in a space for later use during mangling. 532 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; 533 // Used to save the space's current top for later use during mangling. 534 void set_top_for_allocations() PRODUCT_RETURN; 535 536 // Mangle regions in the space from the current top up to the 537 // previously mangled part of the space. 538 void mangle_unused_area() PRODUCT_RETURN; 539 // Mangle [top, end) 540 void mangle_unused_area_complete() PRODUCT_RETURN; 541 542 // Do some sparse checking on the area that should have been mangled. 543 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; 544 // Check the complete area that should have been mangled. 545 // This code may be NULL depending on the macro DEBUG_MANGLING. 546 void check_mangled_unused_area_complete() PRODUCT_RETURN; 547 548 // Size computations: sizes in bytes. 549 size_t capacity() const { return byte_size(bottom(), end()); } 550 size_t used() const { return byte_size(bottom(), top()); } 551 size_t free() const { return byte_size(top(), end()); } 552 553 virtual bool is_free_block(const HeapWord* p) const; 554 555 // In a contiguous space we have a more obvious bound on what parts 556 // contain objects. 557 MemRegion used_region() const { return MemRegion(bottom(), top()); } 558 559 // Allocation (return NULL if full) 560 virtual HeapWord* allocate(size_t word_size); 561 virtual HeapWord* par_allocate(size_t word_size); 562 563 // Iteration 564 void oop_iterate(OopIterateClosure* cl); 565 void object_iterate(ObjectClosure* blk); 566 567 // Compaction support 568 virtual void reset_after_compaction() { 569 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); 570 set_top(compaction_top()); 571 } 572 573 // Override. 574 DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl, 575 CardTable::PrecisionStyle precision, 576 HeapWord* boundary); 577 578 // Apply "blk->do_oop" to the addresses of all reference fields in objects 579 // starting with the _saved_mark_word, which was noted during a generation's 580 // save_marks and is required to denote the head of an object. 581 // Fields in objects allocated by applications of the closure 582 // *are* included in the iteration. 583 // Updates _saved_mark_word to point to just after the last object 584 // iterated over. 585 template <typename OopClosureType> 586 void oop_since_save_marks_iterate(OopClosureType* blk); 587 588 // Same as object_iterate, but starting from "mark", which is required 589 // to denote the start of an object. Objects allocated by 590 // applications of the closure *are* included in the iteration. 591 virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk); 592 593 // Very inefficient implementation. 594 virtual HeapWord* block_start_const(const void* p) const; 595 size_t block_size(const HeapWord* p) const; 596 // If a block is in the allocated area, it is an object. 597 bool block_is_obj(const HeapWord* p) const { return p < top(); } 598 599 // Addresses for inlined allocation 600 HeapWord** top_addr() { return &_top; } 601 HeapWord** end_addr() { return &_end; } 602 603 #if INCLUDE_SERIALGC 604 // Overrides for more efficient compaction support. 605 void prepare_for_compaction(CompactPoint* cp); 606 #endif 607 608 virtual void print_on(outputStream* st) const; 609 610 // Checked dynamic downcasts. 611 virtual ContiguousSpace* toContiguousSpace() { 612 return this; 613 } 614 615 // Debugging 616 virtual void verify() const; 617 618 // Used to increase collection frequency. "factor" of 0 means entire 619 // space. 620 void allocate_temporary_filler(int factor); 621 }; 622 623 624 // A dirty card to oop closure that does filtering. 625 // It knows how to filter out objects that are outside of the _boundary. 626 class FilteringDCTOC : public DirtyCardToOopClosure { 627 protected: 628 // Override. 629 void walk_mem_region(MemRegion mr, 630 HeapWord* bottom, HeapWord* top); 631 632 // Walk the given memory region, from bottom to top, applying 633 // the given oop closure to (possibly) all objects found. The 634 // given oop closure may or may not be the same as the oop 635 // closure with which this closure was created, as it may 636 // be a filtering closure which makes use of the _boundary. 637 // We offer two signatures, so the FilteringClosure static type is 638 // apparent. 639 virtual void walk_mem_region_with_cl(MemRegion mr, 640 HeapWord* bottom, HeapWord* top, 641 OopIterateClosure* cl) = 0; 642 virtual void walk_mem_region_with_cl(MemRegion mr, 643 HeapWord* bottom, HeapWord* top, 644 FilteringClosure* cl) = 0; 645 646 public: 647 FilteringDCTOC(Space* sp, OopIterateClosure* cl, 648 CardTable::PrecisionStyle precision, 649 HeapWord* boundary) : 650 DirtyCardToOopClosure(sp, cl, precision, boundary) {} 651 }; 652 653 // A dirty card to oop closure for contiguous spaces 654 // (ContiguousSpace and sub-classes). 655 // It is a FilteringClosure, as defined above, and it knows: 656 // 657 // 1. That the actual top of any area in a memory region 658 // contained by the space is bounded by the end of the contiguous 659 // region of the space. 660 // 2. That the space is really made up of objects and not just 661 // blocks. 662 663 class ContiguousSpaceDCTOC : public FilteringDCTOC { 664 protected: 665 // Overrides. 666 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 667 668 virtual void walk_mem_region_with_cl(MemRegion mr, 669 HeapWord* bottom, HeapWord* top, 670 OopIterateClosure* cl); 671 virtual void walk_mem_region_with_cl(MemRegion mr, 672 HeapWord* bottom, HeapWord* top, 673 FilteringClosure* cl); 674 675 public: 676 ContiguousSpaceDCTOC(ContiguousSpace* sp, OopIterateClosure* cl, 677 CardTable::PrecisionStyle precision, 678 HeapWord* boundary) : 679 FilteringDCTOC(sp, cl, precision, boundary) 680 {} 681 }; 682 683 // A ContigSpace that Supports an efficient "block_start" operation via 684 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with 685 // other spaces.) This is the abstract base class for old generation 686 // (tenured) spaces. 687 688 class OffsetTableContigSpace: public ContiguousSpace { 689 friend class VMStructs; 690 protected: 691 BlockOffsetArrayContigSpace _offsets; 692 Mutex _par_alloc_lock; 693 694 public: 695 // Constructor 696 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 697 MemRegion mr); 698 699 void set_bottom(HeapWord* value); 700 void set_end(HeapWord* value); 701 702 void clear(bool mangle_space); 703 704 inline HeapWord* block_start_const(const void* p) const; 705 706 // Add offset table update. 707 virtual inline HeapWord* allocate(size_t word_size); 708 inline HeapWord* par_allocate(size_t word_size); 709 710 // MarkSweep support phase3 711 virtual HeapWord* initialize_threshold(); 712 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 713 714 virtual void print_on(outputStream* st) const; 715 716 // Debugging 717 void verify() const; 718 }; 719 720 721 // Class TenuredSpace is used by TenuredGeneration 722 723 class TenuredSpace: public OffsetTableContigSpace { 724 friend class VMStructs; 725 protected: 726 // Mark sweep support 727 size_t allowed_dead_ratio() const; 728 public: 729 // Constructor 730 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, 731 MemRegion mr) : 732 OffsetTableContigSpace(sharedOffsetArray, mr) {} 733 }; 734 #endif // SHARE_GC_SHARED_SPACE_HPP