1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_SPACE_HPP
 26 #define SHARE_GC_SHARED_SPACE_HPP
 27 
 28 #include "gc/shared/blockOffsetTable.hpp"
 29 #include "gc/shared/cardTable.hpp"
 30 #include "gc/shared/workerThread.hpp"
 31 #include "memory/allocation.hpp"
 32 #include "memory/iterator.hpp"
 33 #include "memory/memRegion.hpp"
 34 #include "oops/markWord.hpp"
 35 #include "runtime/mutexLocker.hpp"
 36 #include "utilities/align.hpp"
 37 #include "utilities/macros.hpp"
 38 #if INCLUDE_SERIALGC
 39 #include "gc/serial/serialBlockOffsetTable.hpp"
 40 #endif
 41 
 42 // A space is an abstraction for the "storage units" backing
 43 // up the generation abstraction. It includes specific
 44 // implementations for keeping track of free and used space,
 45 // for iterating over objects and free blocks, etc.
 46 
 47 // Forward decls.
 48 class Space;
 49 class ContiguousSpace;
 50 class Generation;
 51 class ContiguousSpace;
 52 class CardTableRS;
 53 class DirtyCardToOopClosure;
 54 class FilteringClosure;
 55 
 56 // A Space describes a heap area. Class Space is an abstract
 57 // base class.
 58 //
 59 // Space supports allocation, size computation and GC support is provided.
 60 //
 61 // Invariant: bottom() and end() are on page_size boundaries and
 62 // bottom() <= top() <= end()
 63 // top() is inclusive and end() is exclusive.
 64 
 65 class Space: public CHeapObj<mtGC> {
 66   friend class VMStructs;
 67  protected:
 68   HeapWord* _bottom;
 69   HeapWord* _end;
 70 
 71   // Used in support of save_marks()
 72   HeapWord* _saved_mark_word;
 73 
 74   Space():
 75     _bottom(nullptr), _end(nullptr) { }
 76 
 77  public:
 78   // Accessors
 79   HeapWord* bottom() const         { return _bottom; }
 80   HeapWord* end() const            { return _end;    }
 81   virtual void set_bottom(HeapWord* value) { _bottom = value; }
 82   virtual void set_end(HeapWord* value)    { _end = value; }
 83 
 84   HeapWord* saved_mark_word() const  { return _saved_mark_word; }
 85 
 86   void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
 87 
 88   // Returns true if this object has been allocated since a
 89   // generation's "save_marks" call.
 90   bool obj_allocated_since_save_marks(const oop obj) const {
 91     return cast_from_oop<HeapWord*>(obj) >= saved_mark_word();
 92   }
 93 
 94   // Returns a subregion of the space containing only the allocated objects in
 95   // the space.
 96   virtual MemRegion used_region() const = 0;
 97 
 98   // Returns a region that is guaranteed to contain (at least) all objects
 99   // allocated at the time of the last call to "save_marks".  If the space
100   // initializes its DirtyCardToOopClosure's specifying the "contig" option
101   // (that is, if the space is contiguous), then this region must contain only
102   // such objects: the memregion will be from the bottom of the region to the
103   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
104   // the space must distinguish between objects in the region allocated before
105   // and after the call to save marks.
106   MemRegion used_region_at_save_marks() const {
107     return MemRegion(bottom(), saved_mark_word());
108   }
109 
110   // For detecting GC bugs.  Should only be called at GC boundaries, since
111   // some unused space may be used as scratch space during GC's.
112   // We also call this when expanding a space to satisfy an allocation
113   // request. See bug #4668531
114   virtual void mangle_unused_area() = 0;
115   virtual void mangle_unused_area_complete() = 0;
116 
117   // Testers
118   bool is_empty() const              { return used() == 0; }
119 
120   // Returns true iff the given the space contains the
121   // given address as part of an allocated object. For
122   // certain kinds of spaces, this might be a potentially
123   // expensive operation. To prevent performance problems
124   // on account of its inadvertent use in product jvm's,
125   // we restrict its use to assertion checks only.
126   bool is_in(const void* p) const {
127     return used_region().contains(p);
128   }
129   bool is_in(oop obj) const {
130     return is_in((void*)obj);
131   }
132 
133   // Returns true iff the given reserved memory of the space contains the
134   // given address.
135   bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
136 
137   // Returns true iff the given block is not allocated.
138   virtual bool is_free_block(const HeapWord* p) const = 0;
139 
140   // Test whether p is double-aligned
141   static bool is_aligned(void* p) {
142     return ::is_aligned(p, sizeof(double));
143   }
144 
145   // Size computations.  Sizes are in bytes.
146   size_t capacity()     const { return byte_size(bottom(), end()); }
147   virtual size_t used() const = 0;
148   virtual size_t free() const = 0;
149 
150   // Iterate over all objects in the space, calling "cl.do_object" on
151   // each.  Objects allocated by applications of the closure are not
152   // included in the iteration.
153   virtual void object_iterate(ObjectClosure* blk) = 0;
154 
155   // If "p" is in the space, returns the address of the start of the
156   // "block" that contains "p".  We say "block" instead of "object" since
157   // some heaps may not pack objects densely; a chunk may either be an
158   // object or a non-object.  If "p" is not in the space, return null.
159   virtual HeapWord* block_start_const(const void* p) const = 0;
160 
161   // The non-const version may have benevolent side effects on the data
162   // structure supporting these calls, possibly speeding up future calls.
163   // The default implementation, however, is simply to call the const
164   // version.
165   HeapWord* block_start(const void* p);
166 
167   // Requires "addr" to be the start of a chunk, and returns its size.
168   // "addr + size" is required to be the start of a new chunk, or the end
169   // of the active area of the heap.
170   virtual size_t block_size(const HeapWord* addr) const = 0;
171 
172   // Requires "addr" to be the start of a block, and returns "TRUE" iff
173   // the block is an object.
174   virtual bool block_is_obj(const HeapWord* addr) const = 0;
175 
176   // Requires "addr" to be the start of a block, and returns "TRUE" iff
177   // the block is an object and the object is alive.
178   bool obj_is_alive(const HeapWord* addr) const;
179 
180   // Allocation (return null if full).  Assumes the caller has established
181   // mutually exclusive access to the space.
182   virtual HeapWord* allocate(size_t word_size) = 0;
183 
184   // Allocation (return null if full).  Enforces mutual exclusion internally.
185   virtual HeapWord* par_allocate(size_t word_size) = 0;
186 
187 #if INCLUDE_SERIALGC
188   // Mark-sweep-compact support: all spaces can update pointers to objects
189   // moving as a part of compaction.
190   virtual void adjust_pointers() = 0;
191 #endif
192 
193   void print() const;
194   virtual void print_on(outputStream* st) const;
195   void print_short() const;
196   void print_short_on(outputStream* st) const;
197 };
198 
199 // A structure to represent a point at which objects are being copied
200 // during compaction.
201 class CompactPoint : public StackObj {
202 public:
203   Generation* gen;
204   ContiguousSpace* space;
205 
206   CompactPoint(Generation* g = nullptr) :
207     gen(g), space(nullptr) {}
208 };
209 
210 class GenSpaceMangler;
211 
212 // A space in which the free area is contiguous.  It therefore supports
213 // faster allocation, and compaction.
214 class ContiguousSpace: public Space {
215   friend class VMStructs;
216 
217 private:
218   HeapWord* _compaction_top;
219   ContiguousSpace* _next_compaction_space;
220 
221   static inline void verify_up_to_first_dead(ContiguousSpace* space) NOT_DEBUG_RETURN;
222 
223   static inline void clear_empty_region(ContiguousSpace* space);
224 
225 #if INCLUDE_SERIALGC
226   template <bool ALT_FWD>
227   void prepare_for_compaction_impl(CompactPoint* cp);
228 
229   template <bool ALT_FWD>
230   void adjust_pointers_impl();
231 
232   template <bool ALT_FWD>
233   void compact_impl();
234 #endif
235 
236 protected:
237   HeapWord* _top;
238   // A helper for mangling the unused area of the space in debug builds.
239   GenSpaceMangler* _mangler;
240 
241   // Used during compaction.
242   HeapWord* _first_dead;
243   HeapWord* _end_of_live;
244 
245   // This the function to invoke when an allocation of an object covering
246   // "start" to "end" occurs to update other internal data structures.
247   virtual void update_for_block(HeapWord* start, HeapWord* the_end) { }
248 
249   GenSpaceMangler* mangler() { return _mangler; }
250 
251   // Allocation helpers (return null if full).
252   inline HeapWord* allocate_impl(size_t word_size);
253   inline HeapWord* par_allocate_impl(size_t word_size);
254 
255  public:
256   ContiguousSpace();
257   ~ContiguousSpace();
258 
259   // Initialization.
260   // "initialize" should be called once on a space, before it is used for
261   // any purpose.  The "mr" arguments gives the bounds of the space, and
262   // the "clear_space" argument should be true unless the memory in "mr" is
263   // known to be zeroed.
264   void initialize(MemRegion mr, bool clear_space, bool mangle_space);
265 
266   // The "clear" method must be called on a region that may have
267   // had allocation performed in it, but is now to be considered empty.
268   virtual void clear(bool mangle_space);
269 
270   // Used temporarily during a compaction phase to hold the value
271   // top should have when compaction is complete.
272   HeapWord* compaction_top() const { return _compaction_top;    }
273 
274   void set_compaction_top(HeapWord* value) {
275     assert(value == nullptr || (value >= bottom() && value <= end()),
276       "should point inside space");
277     _compaction_top = value;
278   }
279 
280   // Returns the next space (in the current generation) to be compacted in
281   // the global compaction order.  Also is used to select the next
282   // space into which to compact.
283 
284   virtual ContiguousSpace* next_compaction_space() const {
285     return _next_compaction_space;
286   }
287 
288   void set_next_compaction_space(ContiguousSpace* csp) {
289     _next_compaction_space = csp;
290   }
291 
292 #if INCLUDE_SERIALGC
293   // MarkSweep support phase2
294 
295   // Start the process of compaction of the current space: compute
296   // post-compaction addresses, and insert forwarding pointers.  The fields
297   // "cp->gen" and "cp->compaction_space" are the generation and space into
298   // which we are currently compacting.  This call updates "cp" as necessary,
299   // and leaves the "compaction_top" of the final value of
300   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
301   // this phase as if the final copy had occurred; if so, "cp->threshold"
302   // indicates when the next such action should be taken.
303   void prepare_for_compaction(CompactPoint* cp);
304   // MarkSweep support phase3
305   void adjust_pointers() override;
306   // MarkSweep support phase4
307   virtual void compact();
308 #endif // INCLUDE_SERIALGC
309 
310   // The maximum percentage of objects that can be dead in the compacted
311   // live part of a compacted space ("deadwood" support.)
312   virtual size_t allowed_dead_ratio() const { return 0; };
313 
314   // "q" is an object of the given "size" that should be forwarded;
315   // "cp" names the generation ("gen") and containing "this" (which must
316   // also equal "cp->space").  "compact_top" is where in "this" the
317   // next object should be forwarded to.  If there is room in "this" for
318   // the object, insert an appropriate forwarding pointer in "q".
319   // If not, go to the next compaction space (there must
320   // be one, since compaction must succeed -- we go to the first space of
321   // the previous generation if necessary, updating "cp"), reset compact_top
322   // and then forward.  In either case, returns the new value of "compact_top".
323   // Invokes the "update_for_block" function of the then-current compaction
324   // space.
325   template <bool ALT_FWD>
326   HeapWord* forward(oop q, size_t size, CompactPoint* cp,
327                     HeapWord* compact_top);
328 
329   // Accessors
330   HeapWord* top() const            { return _top;    }
331   void set_top(HeapWord* value)    { _top = value; }
332 
333   void set_saved_mark()            { _saved_mark_word = top();    }
334 
335   bool saved_mark_at_top() const { return saved_mark_word() == top(); }
336 
337   // In debug mode mangle (write it with a particular bit
338   // pattern) the unused part of a space.
339 
340   // Used to save the address in a space for later use during mangling.
341   void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
342   // Used to save the space's current top for later use during mangling.
343   void set_top_for_allocations() PRODUCT_RETURN;
344 
345   // Mangle regions in the space from the current top up to the
346   // previously mangled part of the space.
347   void mangle_unused_area() override PRODUCT_RETURN;
348   // Mangle [top, end)
349   void mangle_unused_area_complete() override PRODUCT_RETURN;
350 
351   // Do some sparse checking on the area that should have been mangled.
352   void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
353   // Check the complete area that should have been mangled.
354   // This code may be null depending on the macro DEBUG_MANGLING.
355   void check_mangled_unused_area_complete() PRODUCT_RETURN;
356 
357   // Size computations: sizes in bytes.
358   size_t used() const override   { return byte_size(bottom(), top()); }
359   size_t free() const override   { return byte_size(top(),    end()); }
360 
361   bool is_free_block(const HeapWord* p) const override;
362 
363   // In a contiguous space we have a more obvious bound on what parts
364   // contain objects.
365   MemRegion used_region() const override { return MemRegion(bottom(), top()); }
366 
367   // Allocation (return null if full)
368   HeapWord* allocate(size_t word_size) override;
369   HeapWord* par_allocate(size_t word_size) override;
370 
371   // Iteration
372   void object_iterate(ObjectClosure* blk) override;
373 
374   // Compaction support
375   void reset_after_compaction() {
376     assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
377     set_top(compaction_top());
378   }
379 
380   // Apply "blk->do_oop" to the addresses of all reference fields in objects
381   // starting with the _saved_mark_word, which was noted during a generation's
382   // save_marks and is required to denote the head of an object.
383   // Fields in objects allocated by applications of the closure
384   // *are* included in the iteration.
385   // Updates _saved_mark_word to point to just after the last object
386   // iterated over.
387   template <typename OopClosureType>
388   void oop_since_save_marks_iterate(OopClosureType* blk);
389 
390   // Same as object_iterate, but starting from "mark", which is required
391   // to denote the start of an object.  Objects allocated by
392   // applications of the closure *are* included in the iteration.
393   virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
394 
395   // Very inefficient implementation.
396   HeapWord* block_start_const(const void* p) const override;
397   size_t block_size(const HeapWord* p) const override;
398   // If a block is in the allocated area, it is an object.
399   bool block_is_obj(const HeapWord* p) const override { return p < top(); }
400 
401   // Addresses for inlined allocation
402   HeapWord** top_addr() { return &_top; }
403   HeapWord** end_addr() { return &_end; }
404 
405   void print_on(outputStream* st) const override;
406 
407   // Debugging
408   void verify() const;
409 };
410 
411 #if INCLUDE_SERIALGC
412 
413 // Class TenuredSpace is used by TenuredGeneration; it supports an efficient
414 // "block_start" operation via a SerialBlockOffsetTable.
415 
416 class TenuredSpace: public ContiguousSpace {
417   friend class VMStructs;
418  protected:
419   SerialBlockOffsetTable _offsets;
420 
421   // Mark sweep support
422   size_t allowed_dead_ratio() const override;
423  public:
424   // Constructor
425   TenuredSpace(SerialBlockOffsetSharedArray* sharedOffsetArray,
426                MemRegion mr);
427 
428   HeapWord* block_start_const(const void* addr) const override;
429 
430   // Add offset table update.
431   inline HeapWord* allocate(size_t word_size) override;
432   inline HeapWord* par_allocate(size_t word_size) override;
433 
434   // MarkSweep support phase3
435   void update_for_block(HeapWord* start, HeapWord* end) override;
436 
437   void print_on(outputStream* st) const override;
438 };
439 #endif //INCLUDE_SERIALGC
440 
441 #endif // SHARE_GC_SHARED_SPACE_HPP