1 /*
2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHARED_SPACE_HPP
26 #define SHARE_GC_SHARED_SPACE_HPP
27
28 #include "gc/shared/blockOffsetTable.hpp"
29 #include "gc/shared/cardTable.hpp"
30 #include "gc/shared/workgroup.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/iterator.hpp"
33 #include "memory/memRegion.hpp"
34 #include "oops/markWord.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "utilities/align.hpp"
37 #include "utilities/macros.hpp"
38
39 // A space is an abstraction for the "storage units" backing
40 // up the generation abstraction. It includes specific
41 // implementations for keeping track of free and used space,
42 // for iterating over objects and free blocks, etc.
43
44 // Forward decls.
45 class Space;
46 class BlockOffsetArray;
47 class BlockOffsetArrayContigSpace;
48 class Generation;
49 class CompactibleSpace;
50 class BlockOffsetTable;
51 class CardTableRS;
52 class DirtyCardToOopClosure;
53
54 // A Space describes a heap area. Class Space is an abstract
55 // base class.
56 //
57 // Space supports allocation, size computation and GC support is provided.
58 //
59 // Invariant: bottom() and end() are on page_size boundaries and
60 // bottom() <= top() <= end()
61 // top() is inclusive and end() is exclusive.
62
63 class Space: public CHeapObj<mtGC> {
64 friend class VMStructs;
65 protected:
66 HeapWord* _bottom;
67 HeapWord* _end;
68
69 // Used in support of save_marks()
70 HeapWord* _saved_mark_word;
71
72 Space():
73 _bottom(NULL), _end(NULL) { }
74
75 public:
76 // Accessors
77 HeapWord* bottom() const { return _bottom; }
78 HeapWord* end() const { return _end; }
79 virtual void set_bottom(HeapWord* value) { _bottom = value; }
80 virtual void set_end(HeapWord* value) { _end = value; }
81
82 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
83
84 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
85
86 // Returns true if this object has been allocated since a
87 // generation's "save_marks" call.
88 virtual bool obj_allocated_since_save_marks(const oop obj) const {
89 return cast_from_oop<HeapWord*>(obj) >= saved_mark_word();
90 }
91
92 // Returns a subregion of the space containing only the allocated objects in
93 // the space.
94 virtual MemRegion used_region() const = 0;
95
96 // Returns a region that is guaranteed to contain (at least) all objects
97 // allocated at the time of the last call to "save_marks". If the space
98 // initializes its DirtyCardToOopClosure's specifying the "contig" option
99 // (that is, if the space is contiguous), then this region must contain only
100 // such objects: the memregion will be from the bottom of the region to the
101 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
102 // the space must distinguish between objects in the region allocated before
103 // and after the call to save marks.
104 MemRegion used_region_at_save_marks() const {
105 return MemRegion(bottom(), saved_mark_word());
106 }
107
108 // Initialization.
109 // "initialize" should be called once on a space, before it is used for
110 // any purpose. The "mr" arguments gives the bounds of the space, and
111 // the "clear_space" argument should be true unless the memory in "mr" is
112 // known to be zeroed.
113 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
114
115 // The "clear" method must be called on a region that may have
116 // had allocation performed in it, but is now to be considered empty.
117 virtual void clear(bool mangle_space);
118
119 // For detecting GC bugs. Should only be called at GC boundaries, since
120 // some unused space may be used as scratch space during GC's.
121 // We also call this when expanding a space to satisfy an allocation
122 // request. See bug #4668531
123 virtual void mangle_unused_area() = 0;
124 virtual void mangle_unused_area_complete() = 0;
125
126 // Testers
127 bool is_empty() const { return used() == 0; }
128 bool not_empty() const { return used() > 0; }
129
130 // Returns true iff the given the space contains the
131 // given address as part of an allocated object. For
132 // certain kinds of spaces, this might be a potentially
133 // expensive operation. To prevent performance problems
134 // on account of its inadvertent use in product jvm's,
135 // we restrict its use to assertion checks only.
136 bool is_in(const void* p) const {
137 return used_region().contains(p);
138 }
139 bool is_in(oop obj) const {
140 return is_in((void*)obj);
141 }
142
143 // Returns true iff the given reserved memory of the space contains the
144 // given address.
145 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
146
147 // Returns true iff the given block is not allocated.
148 virtual bool is_free_block(const HeapWord* p) const = 0;
149
150 // Test whether p is double-aligned
151 static bool is_aligned(void* p) {
152 return ::is_aligned(p, sizeof(double));
153 }
154
155 // Size computations. Sizes are in bytes.
156 size_t capacity() const { return byte_size(bottom(), end()); }
157 virtual size_t used() const = 0;
158 virtual size_t free() const = 0;
159
160 // Iterate over all the ref-containing fields of all objects in the
161 // space, calling "cl.do_oop" on each. Fields in objects allocated by
162 // applications of the closure are not included in the iteration.
163 virtual void oop_iterate(OopIterateClosure* cl);
164
165 // Iterate over all objects in the space, calling "cl.do_object" on
166 // each. Objects allocated by applications of the closure are not
167 // included in the iteration.
168 virtual void object_iterate(ObjectClosure* blk) = 0;
169
170 // Create and return a new dirty card to oop closure. Can be
171 // overridden to return the appropriate type of closure
172 // depending on the type of space in which the closure will
173 // operate. ResourceArea allocated.
174 virtual DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
175 CardTable::PrecisionStyle precision,
176 HeapWord* boundary);
177
178 // If "p" is in the space, returns the address of the start of the
179 // "block" that contains "p". We say "block" instead of "object" since
180 // some heaps may not pack objects densely; a chunk may either be an
181 // object or a non-object. If "p" is not in the space, return NULL.
182 virtual HeapWord* block_start_const(const void* p) const = 0;
183
184 // The non-const version may have benevolent side effects on the data
185 // structure supporting these calls, possibly speeding up future calls.
186 // The default implementation, however, is simply to call the const
187 // version.
188 virtual HeapWord* block_start(const void* p);
189
190 // Requires "addr" to be the start of a chunk, and returns its size.
191 // "addr + size" is required to be the start of a new chunk, or the end
192 // of the active area of the heap.
193 virtual size_t block_size(const HeapWord* addr) const = 0;
194
195 // Requires "addr" to be the start of a block, and returns "TRUE" iff
196 // the block is an object.
197 virtual bool block_is_obj(const HeapWord* addr) const = 0;
198
199 // Requires "addr" to be the start of a block, and returns "TRUE" iff
200 // the block is an object and the object is alive.
201 virtual bool obj_is_alive(const HeapWord* addr) const;
202
203 // Allocation (return NULL if full). Assumes the caller has established
204 // mutually exclusive access to the space.
205 virtual HeapWord* allocate(size_t word_size) = 0;
206
207 // Allocation (return NULL if full). Enforces mutual exclusion internally.
208 virtual HeapWord* par_allocate(size_t word_size) = 0;
209
210 #if INCLUDE_SERIALGC
211 // Mark-sweep-compact support: all spaces can update pointers to objects
212 // moving as a part of compaction.
213 virtual void adjust_pointers() = 0;
214 #endif
215
216 virtual void print() const;
217 virtual void print_on(outputStream* st) const;
218 virtual void print_short() const;
219 virtual void print_short_on(outputStream* st) const;
220
221
222 // IF "this" is a ContiguousSpace, return it, else return NULL.
223 virtual ContiguousSpace* toContiguousSpace() {
224 return NULL;
225 }
226
227 // Debugging
228 virtual void verify() const = 0;
229 };
230
231 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
232 // OopClosure to (the addresses of) all the ref-containing fields that could
233 // be modified by virtue of the given MemRegion being dirty. (Note that
234 // because of the imprecise nature of the write barrier, this may iterate
235 // over oops beyond the region.)
236 // This base type for dirty card to oop closures handles memory regions
237 // in non-contiguous spaces with no boundaries, and should be sub-classed
238 // to support other space types. See ContiguousDCTOC for a sub-class
239 // that works with ContiguousSpaces.
240
241 class DirtyCardToOopClosure: public MemRegionClosureRO {
242 protected:
243 OopIterateClosure* _cl;
244 Space* _sp;
245 CardTable::PrecisionStyle _precision;
246 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
247 // pointing below boundary.
248 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
249 // a downwards traversal; this is the
250 // lowest location already done (or,
251 // alternatively, the lowest address that
252 // shouldn't be done again. NULL means infinity.)
253 NOT_PRODUCT(HeapWord* _last_bottom;)
254 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
255
256 // Get the actual top of the area on which the closure will
257 // operate, given where the top is assumed to be (the end of the
258 // memory region passed to do_MemRegion) and where the object
259 // at the top is assumed to start. For example, an object may
260 // start at the top but actually extend past the assumed top,
261 // in which case the top becomes the end of the object.
262 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
263
264 // Walk the given memory region from bottom to (actual) top
265 // looking for objects and applying the oop closure (_cl) to
266 // them. The base implementation of this treats the area as
267 // blocks, where a block may or may not be an object. Sub-
268 // classes should override this to provide more accurate
269 // or possibly more efficient walking.
270 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
271
272 public:
273 DirtyCardToOopClosure(Space* sp, OopIterateClosure* cl,
274 CardTable::PrecisionStyle precision,
275 HeapWord* boundary) :
276 _cl(cl), _sp(sp), _precision(precision), _boundary(boundary),
277 _min_done(NULL) {
278 NOT_PRODUCT(_last_bottom = NULL);
279 NOT_PRODUCT(_last_explicit_min_done = NULL);
280 }
281
282 void do_MemRegion(MemRegion mr);
283
284 void set_min_done(HeapWord* min_done) {
285 _min_done = min_done;
286 NOT_PRODUCT(_last_explicit_min_done = _min_done);
287 }
288 #ifndef PRODUCT
289 void set_last_bottom(HeapWord* last_bottom) {
290 _last_bottom = last_bottom;
291 }
292 #endif
293 };
294
295 // A structure to represent a point at which objects are being copied
296 // during compaction.
297 class CompactPoint : public StackObj {
298 public:
299 Generation* gen;
300 CompactibleSpace* space;
301 HeapWord* threshold;
302
303 CompactPoint(Generation* g = NULL) :
304 gen(g), space(NULL), threshold(0) {}
305 };
306
307 // A space that supports compaction operations. This is usually, but not
308 // necessarily, a space that is normally contiguous. But, for example, a
309 // free-list-based space whose normal collection is a mark-sweep without
310 // compaction could still support compaction in full GC's.
311 //
312 // The compaction operations are implemented by the
313 // scan_and_{adjust_pointers,compact,forward} function templates.
314 // The following are, non-virtual, auxiliary functions used by these function templates:
315 // - scan_limit()
316 // - scanned_block_is_obj()
317 // - scanned_block_size()
318 // - adjust_obj_size()
319 // - obj_size()
320 // These functions are to be used exclusively by the scan_and_* function templates,
321 // and must be defined for all (non-abstract) subclasses of CompactibleSpace.
322 //
323 // NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior
324 // in any of the auxiliary functions must also override the corresponding
325 // prepare_for_compaction/adjust_pointers/compact functions using them.
326 // If not, such changes will not be used or have no effect on the compaction operations.
327 //
328 // This translates to the following dependencies:
329 // Overrides/definitions of
330 // - scan_limit
331 // - scanned_block_is_obj
332 // - scanned_block_size
333 // require override/definition of prepare_for_compaction().
334 // Similar dependencies exist between
335 // - adjust_obj_size and adjust_pointers()
336 // - obj_size and compact().
337 //
338 // Additionally, this also means that changes to block_size() or block_is_obj() that
339 // should be effective during the compaction operations must provide a corresponding
340 // definition of scanned_block_size/scanned_block_is_obj respectively.
341 class CompactibleSpace: public Space {
342 friend class VMStructs;
343 private:
344 HeapWord* _compaction_top;
345 CompactibleSpace* _next_compaction_space;
346
347 // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
348 inline size_t adjust_obj_size(size_t size) const {
349 return size;
350 }
351
352 inline size_t obj_size(const HeapWord* addr) const;
353
354 template <class SpaceType>
355 static inline void verify_up_to_first_dead(SpaceType* space) NOT_DEBUG_RETURN;
356
357 template <class SpaceType>
358 static inline void clear_empty_region(SpaceType* space);
359
360 public:
361 CompactibleSpace() :
362 _compaction_top(NULL), _next_compaction_space(NULL) {}
363
364 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
365 virtual void clear(bool mangle_space);
366
367 // Used temporarily during a compaction phase to hold the value
368 // top should have when compaction is complete.
369 HeapWord* compaction_top() const { return _compaction_top; }
370
371 void set_compaction_top(HeapWord* value) {
372 assert(value == NULL || (value >= bottom() && value <= end()),
373 "should point inside space");
374 _compaction_top = value;
375 }
376
377 // Perform operations on the space needed after a compaction
378 // has been performed.
379 virtual void reset_after_compaction() = 0;
380
381 // Returns the next space (in the current generation) to be compacted in
382 // the global compaction order. Also is used to select the next
383 // space into which to compact.
384
385 virtual CompactibleSpace* next_compaction_space() const {
386 return _next_compaction_space;
387 }
388
389 void set_next_compaction_space(CompactibleSpace* csp) {
390 _next_compaction_space = csp;
391 }
392
393 #if INCLUDE_SERIALGC
394 // MarkSweep support phase2
395
396 // Start the process of compaction of the current space: compute
397 // post-compaction addresses, and insert forwarding pointers. The fields
398 // "cp->gen" and "cp->compaction_space" are the generation and space into
399 // which we are currently compacting. This call updates "cp" as necessary,
400 // and leaves the "compaction_top" of the final value of
401 // "cp->compaction_space" up-to-date. Offset tables may be updated in
402 // this phase as if the final copy had occurred; if so, "cp->threshold"
403 // indicates when the next such action should be taken.
404 virtual void prepare_for_compaction(CompactPoint* cp) = 0;
405 // MarkSweep support phase3
406 virtual void adjust_pointers();
407 // MarkSweep support phase4
408 virtual void compact();
409 #endif // INCLUDE_SERIALGC
410
411 // The maximum percentage of objects that can be dead in the compacted
412 // live part of a compacted space ("deadwood" support.)
413 virtual size_t allowed_dead_ratio() const { return 0; };
414
415 // Some contiguous spaces may maintain some data structures that should
416 // be updated whenever an allocation crosses a boundary. This function
417 // returns the first such boundary.
418 // (The default implementation returns the end of the space, so the
419 // boundary is never crossed.)
420 virtual HeapWord* initialize_threshold() { return end(); }
421
422 // "q" is an object of the given "size" that should be forwarded;
423 // "cp" names the generation ("gen") and containing "this" (which must
424 // also equal "cp->space"). "compact_top" is where in "this" the
425 // next object should be forwarded to. If there is room in "this" for
426 // the object, insert an appropriate forwarding pointer in "q".
427 // If not, go to the next compaction space (there must
428 // be one, since compaction must succeed -- we go to the first space of
429 // the previous generation if necessary, updating "cp"), reset compact_top
430 // and then forward. In either case, returns the new value of "compact_top".
431 // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
432 // function of the then-current compaction space, and updates "cp->threshold
433 // accordingly".
434 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
435 HeapWord* compact_top);
436
437 // Return a size with adjustments as required of the space.
438 virtual size_t adjust_object_size_v(size_t size) const { return size; }
439
440 void set_first_dead(HeapWord* value) { _first_dead = value; }
441 void set_end_of_live(HeapWord* value) { _end_of_live = value; }
442
443 protected:
444 // Used during compaction.
445 HeapWord* _first_dead;
446 HeapWord* _end_of_live;
447
448 // This the function is invoked when an allocation of an object covering
449 // "start" to "end occurs crosses the threshold; returns the next
450 // threshold. (The default implementation does nothing.)
451 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
452 return end();
453 }
454
455 // Below are template functions for scan_and_* algorithms (avoiding virtual calls).
456 // The space argument should be a subclass of CompactibleSpace, implementing
457 // scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
458 // and possibly also overriding obj_size(), and adjust_obj_size().
459 // These functions should avoid virtual calls whenever possible.
460
461 #if INCLUDE_SERIALGC
462 // Frequently calls adjust_obj_size().
463 template <class SpaceType>
464 static inline void scan_and_adjust_pointers(SpaceType* space);
465 #endif
466
467 // Frequently calls obj_size().
468 template <class SpaceType>
469 static inline void scan_and_compact(SpaceType* space);
470
471 // Frequently calls scanned_block_is_obj() and scanned_block_size().
472 // Requires the scan_limit() function.
473 template <class SpaceType>
474 static inline void scan_and_forward(SpaceType* space, CompactPoint* cp);
475 };
476
477 class GenSpaceMangler;
478
479 // A space in which the free area is contiguous. It therefore supports
480 // faster allocation, and compaction.
481 class ContiguousSpace: public CompactibleSpace {
482 friend class VMStructs;
483 // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
484 template <typename SpaceType>
485 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
486
487 private:
488 // Auxiliary functions for scan_and_forward support.
489 // See comments for CompactibleSpace for more information.
490 inline HeapWord* scan_limit() const {
491 return top();
492 }
493
494 inline bool scanned_block_is_obj(const HeapWord* addr) const {
495 return true; // Always true, since scan_limit is top
496 }
497
498 inline size_t scanned_block_size(const HeapWord* addr) const;
499
500 protected:
501 HeapWord* _top;
502 // A helper for mangling the unused area of the space in debug builds.
503 GenSpaceMangler* _mangler;
504
505 GenSpaceMangler* mangler() { return _mangler; }
506
507 // Allocation helpers (return NULL if full).
508 inline HeapWord* allocate_impl(size_t word_size);
509 inline HeapWord* par_allocate_impl(size_t word_size);
510
511 public:
512 ContiguousSpace();
513 ~ContiguousSpace();
514
515 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
516 virtual void clear(bool mangle_space);
517
518 // Accessors
519 HeapWord* top() const { return _top; }
520 void set_top(HeapWord* value) { _top = value; }
521
522 void set_saved_mark() { _saved_mark_word = top(); }
523 void reset_saved_mark() { _saved_mark_word = bottom(); }
524
525 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
526
527 // In debug mode mangle (write it with a particular bit
528 // pattern) the unused part of a space.
529
530 // Used to save the an address in a space for later use during mangling.
531 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
532 // Used to save the space's current top for later use during mangling.
533 void set_top_for_allocations() PRODUCT_RETURN;
534
535 // Mangle regions in the space from the current top up to the
536 // previously mangled part of the space.
537 void mangle_unused_area() PRODUCT_RETURN;
538 // Mangle [top, end)
539 void mangle_unused_area_complete() PRODUCT_RETURN;
540
541 // Do some sparse checking on the area that should have been mangled.
542 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
543 // Check the complete area that should have been mangled.
544 // This code may be NULL depending on the macro DEBUG_MANGLING.
545 void check_mangled_unused_area_complete() PRODUCT_RETURN;
546
547 // Size computations: sizes in bytes.
548 size_t capacity() const { return byte_size(bottom(), end()); }
549 size_t used() const { return byte_size(bottom(), top()); }
550 size_t free() const { return byte_size(top(), end()); }
551
552 virtual bool is_free_block(const HeapWord* p) const;
553
554 // In a contiguous space we have a more obvious bound on what parts
555 // contain objects.
556 MemRegion used_region() const { return MemRegion(bottom(), top()); }
557
558 // Allocation (return NULL if full)
559 virtual HeapWord* allocate(size_t word_size);
560 virtual HeapWord* par_allocate(size_t word_size);
561
562 // Iteration
563 void oop_iterate(OopIterateClosure* cl);
564 void object_iterate(ObjectClosure* blk);
565
566 // Compaction support
567 virtual void reset_after_compaction() {
568 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
569 set_top(compaction_top());
570 }
571
572 // Override.
573 DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
574 CardTable::PrecisionStyle precision,
575 HeapWord* boundary);
576
577 // Apply "blk->do_oop" to the addresses of all reference fields in objects
578 // starting with the _saved_mark_word, which was noted during a generation's
579 // save_marks and is required to denote the head of an object.
580 // Fields in objects allocated by applications of the closure
581 // *are* included in the iteration.
582 // Updates _saved_mark_word to point to just after the last object
583 // iterated over.
584 template <typename OopClosureType>
585 void oop_since_save_marks_iterate(OopClosureType* blk);
586
587 // Same as object_iterate, but starting from "mark", which is required
588 // to denote the start of an object. Objects allocated by
589 // applications of the closure *are* included in the iteration.
590 virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
591
592 // Very inefficient implementation.
593 virtual HeapWord* block_start_const(const void* p) const;
594 size_t block_size(const HeapWord* p) const;
595 // If a block is in the allocated area, it is an object.
596 bool block_is_obj(const HeapWord* p) const { return p < top(); }
597
598 // Addresses for inlined allocation
599 HeapWord** top_addr() { return &_top; }
600 HeapWord** end_addr() { return &_end; }
601
602 #if INCLUDE_SERIALGC
603 // Overrides for more efficient compaction support.
604 void prepare_for_compaction(CompactPoint* cp);
605 #endif
606
607 virtual void print_on(outputStream* st) const;
608
609 // Checked dynamic downcasts.
610 virtual ContiguousSpace* toContiguousSpace() {
611 return this;
612 }
613
614 // Debugging
615 virtual void verify() const;
616
617 // Used to increase collection frequency. "factor" of 0 means entire
618 // space.
619 void allocate_temporary_filler(int factor);
620 };
621
622
623 // A dirty card to oop closure that does filtering.
624 // It knows how to filter out objects that are outside of the _boundary.
625 class FilteringDCTOC : public DirtyCardToOopClosure {
626 protected:
627 // Override.
628 void walk_mem_region(MemRegion mr,
629 HeapWord* bottom, HeapWord* top);
630
631 // Walk the given memory region, from bottom to top, applying
632 // the given oop closure to (possibly) all objects found. The
633 // given oop closure may or may not be the same as the oop
634 // closure with which this closure was created, as it may
635 // be a filtering closure which makes use of the _boundary.
636 // We offer two signatures, so the FilteringClosure static type is
637 // apparent.
638 virtual void walk_mem_region_with_cl(MemRegion mr,
639 HeapWord* bottom, HeapWord* top,
640 OopIterateClosure* cl) = 0;
641 virtual void walk_mem_region_with_cl(MemRegion mr,
642 HeapWord* bottom, HeapWord* top,
643 FilteringClosure* cl) = 0;
644
645 public:
646 FilteringDCTOC(Space* sp, OopIterateClosure* cl,
647 CardTable::PrecisionStyle precision,
648 HeapWord* boundary) :
649 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
650 };
651
652 // A dirty card to oop closure for contiguous spaces
653 // (ContiguousSpace and sub-classes).
654 // It is a FilteringClosure, as defined above, and it knows:
655 //
656 // 1. That the actual top of any area in a memory region
657 // contained by the space is bounded by the end of the contiguous
658 // region of the space.
659 // 2. That the space is really made up of objects and not just
660 // blocks.
661
662 class ContiguousSpaceDCTOC : public FilteringDCTOC {
663 protected:
664 // Overrides.
665 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
666
667 virtual void walk_mem_region_with_cl(MemRegion mr,
668 HeapWord* bottom, HeapWord* top,
669 OopIterateClosure* cl);
670 virtual void walk_mem_region_with_cl(MemRegion mr,
671 HeapWord* bottom, HeapWord* top,
672 FilteringClosure* cl);
673
674 public:
675 ContiguousSpaceDCTOC(ContiguousSpace* sp, OopIterateClosure* cl,
676 CardTable::PrecisionStyle precision,
677 HeapWord* boundary) :
678 FilteringDCTOC(sp, cl, precision, boundary)
679 {}
680 };
681
682 // A ContigSpace that Supports an efficient "block_start" operation via
683 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
684 // other spaces.) This is the abstract base class for old generation
685 // (tenured) spaces.
686
687 class OffsetTableContigSpace: public ContiguousSpace {
688 friend class VMStructs;
689 protected:
690 BlockOffsetArrayContigSpace _offsets;
691 Mutex _par_alloc_lock;
692
693 public:
694 // Constructor
695 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
696 MemRegion mr);
697
698 void set_bottom(HeapWord* value);
699 void set_end(HeapWord* value);
700
701 void clear(bool mangle_space);
702
703 inline HeapWord* block_start_const(const void* p) const;
704
705 // Add offset table update.
706 virtual inline HeapWord* allocate(size_t word_size);
707 inline HeapWord* par_allocate(size_t word_size);
708
709 // MarkSweep support phase3
710 virtual HeapWord* initialize_threshold();
711 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
712
713 virtual void print_on(outputStream* st) const;
714
715 // Debugging
716 void verify() const;
717 };
718
719
720 // Class TenuredSpace is used by TenuredGeneration
721
722 class TenuredSpace: public OffsetTableContigSpace {
723 friend class VMStructs;
724 protected:
725 // Mark sweep support
726 size_t allowed_dead_ratio() const;
727 public:
728 // Constructor
729 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
730 MemRegion mr) :
731 OffsetTableContigSpace(sharedOffsetArray, mr) {}
732 };
733 #endif // SHARE_GC_SHARED_SPACE_HPP