1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_SPACE_HPP
 26 #define SHARE_GC_SHARED_SPACE_HPP
 27 
 28 #include "gc/shared/blockOffsetTable.hpp"
 29 #include "gc/shared/cardTable.hpp"
 30 #include "gc/shared/workerThread.hpp"
 31 #include "memory/allocation.hpp"
 32 #include "memory/iterator.hpp"
 33 #include "memory/memRegion.hpp"
 34 #include "oops/markWord.hpp"
 35 #include "runtime/mutexLocker.hpp"
 36 #include "utilities/align.hpp"
 37 #include "utilities/macros.hpp"
 38 #if INCLUDE_SERIALGC
 39 #include "gc/serial/serialBlockOffsetTable.hpp"
 40 #endif
 41 
 42 // A space is an abstraction for the "storage units" backing
 43 // up the generation abstraction. It includes specific
 44 // implementations for keeping track of free and used space,
 45 // for iterating over objects and free blocks, etc.
 46 
 47 // Forward decls.
 48 class Space;
 49 class ContiguousSpace;
 50 class Generation;
 51 class ContiguousSpace;
 52 class CardTableRS;
 53 class DirtyCardToOopClosure;
 54 class FilteringClosure;
 55 
 56 // A Space describes a heap area. Class Space is an abstract
 57 // base class.
 58 //
 59 // Space supports allocation, size computation and GC support is provided.
 60 //
 61 // Invariant: bottom() and end() are on page_size boundaries and
 62 // bottom() <= top() <= end()
 63 // top() is inclusive and end() is exclusive.
 64 
 65 class Space: public CHeapObj<mtGC> {
 66   friend class VMStructs;
 67  protected:
 68   HeapWord* _bottom;
 69   HeapWord* _end;
 70 
 71   // Used in support of save_marks()
 72   HeapWord* _saved_mark_word;
 73 
 74   Space():
 75     _bottom(nullptr), _end(nullptr) { }
 76 
 77  public:
 78   // Accessors
 79   HeapWord* bottom() const         { return _bottom; }
 80   HeapWord* end() const            { return _end;    }
 81   virtual void set_bottom(HeapWord* value) { _bottom = value; }
 82   virtual void set_end(HeapWord* value)    { _end = value; }
 83 
 84   HeapWord* saved_mark_word() const  { return _saved_mark_word; }
 85 
 86   void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
 87 
 88   // Returns true if this object has been allocated since a
 89   // generation's "save_marks" call.
 90   bool obj_allocated_since_save_marks(const oop obj) const {
 91     return cast_from_oop<HeapWord*>(obj) >= saved_mark_word();
 92   }
 93 
 94   // Returns a subregion of the space containing only the allocated objects in
 95   // the space.
 96   virtual MemRegion used_region() const = 0;
 97 
 98   // Returns a region that is guaranteed to contain (at least) all objects
 99   // allocated at the time of the last call to "save_marks".  If the space
100   // initializes its DirtyCardToOopClosure's specifying the "contig" option
101   // (that is, if the space is contiguous), then this region must contain only
102   // such objects: the memregion will be from the bottom of the region to the
103   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
104   // the space must distinguish between objects in the region allocated before
105   // and after the call to save marks.
106   MemRegion used_region_at_save_marks() const {
107     return MemRegion(bottom(), saved_mark_word());
108   }
109 
110   // For detecting GC bugs.  Should only be called at GC boundaries, since
111   // some unused space may be used as scratch space during GC's.
112   // We also call this when expanding a space to satisfy an allocation
113   // request. See bug #4668531
114   virtual void mangle_unused_area() = 0;
115   virtual void mangle_unused_area_complete() = 0;
116 
117   // Testers
118   bool is_empty() const              { return used() == 0; }
119 
120   // Returns true iff the given the space contains the
121   // given address as part of an allocated object. For
122   // certain kinds of spaces, this might be a potentially
123   // expensive operation. To prevent performance problems
124   // on account of its inadvertent use in product jvm's,
125   // we restrict its use to assertion checks only.
126   bool is_in(const void* p) const {
127     return used_region().contains(p);
128   }
129   bool is_in(oop obj) const {
130     return is_in((void*)obj);
131   }
132 
133   // Returns true iff the given reserved memory of the space contains the
134   // given address.
135   bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
136 
137   // Returns true iff the given block is not allocated.
138   virtual bool is_free_block(const HeapWord* p) const = 0;
139 
140   // Test whether p is double-aligned
141   static bool is_aligned(void* p) {
142     return ::is_aligned(p, sizeof(double));
143   }
144 
145   // Size computations.  Sizes are in bytes.
146   size_t capacity()     const { return byte_size(bottom(), end()); }
147   virtual size_t used() const = 0;
148   virtual size_t free() const = 0;
149 
150   // Iterate over all objects in the space, calling "cl.do_object" on
151   // each.  Objects allocated by applications of the closure are not
152   // included in the iteration.
153   virtual void object_iterate(ObjectClosure* blk) = 0;
154 
155   // If "p" is in the space, returns the address of the start of the
156   // "block" that contains "p".  We say "block" instead of "object" since
157   // some heaps may not pack objects densely; a chunk may either be an
158   // object or a non-object.  If "p" is not in the space, return null.
159   virtual HeapWord* block_start_const(const void* p) const = 0;
160 
161   // The non-const version may have benevolent side effects on the data
162   // structure supporting these calls, possibly speeding up future calls.
163   // The default implementation, however, is simply to call the const
164   // version.
165   HeapWord* block_start(const void* p);
166 
167   // Requires "addr" to be the start of a chunk, and returns its size.
168   // "addr + size" is required to be the start of a new chunk, or the end
169   // of the active area of the heap.
170   virtual size_t block_size(const HeapWord* addr) const = 0;
171 
172   // Requires "addr" to be the start of a block, and returns "TRUE" iff
173   // the block is an object.
174   virtual bool block_is_obj(const HeapWord* addr) const = 0;
175 
176   // Requires "addr" to be the start of a block, and returns "TRUE" iff
177   // the block is an object and the object is alive.
178   bool obj_is_alive(const HeapWord* addr) const;
179 
180   // Allocation (return null if full).  Assumes the caller has established
181   // mutually exclusive access to the space.
182   virtual HeapWord* allocate(size_t word_size) = 0;
183 
184   // Allocation (return null if full).  Enforces mutual exclusion internally.
185   virtual HeapWord* par_allocate(size_t word_size) = 0;
186 
187 #if INCLUDE_SERIALGC
188   // Mark-sweep-compact support: all spaces can update pointers to objects
189   // moving as a part of compaction.
190   virtual void adjust_pointers() = 0;
191 #endif
192 
193   void print() const;
194   virtual void print_on(outputStream* st) const;
195   void print_short() const;
196   void print_short_on(outputStream* st) const;
197 };
198 
199 // A structure to represent a point at which objects are being copied
200 // during compaction.
201 class CompactPoint : public StackObj {
202 public:
203   Generation* gen;
204   ContiguousSpace* space;
205 
206   CompactPoint(Generation* g = nullptr) :
207     gen(g), space(nullptr) {}
208 };
209 
210 class GenSpaceMangler;
211 
212 // A space in which the free area is contiguous.  It therefore supports
213 // faster allocation, and compaction.
214 class ContiguousSpace: public Space {
215   friend class VMStructs;
216 
217 private:
218   HeapWord* _compaction_top;
219   ContiguousSpace* _next_compaction_space;
220 
221   static inline void verify_up_to_first_dead(ContiguousSpace* space) NOT_DEBUG_RETURN;
222 
223   static inline void clear_empty_region(ContiguousSpace* space);
224 
225  protected:
226   HeapWord* _top;
227   // A helper for mangling the unused area of the space in debug builds.
228   GenSpaceMangler* _mangler;
229 
230   // Used during compaction.
231   HeapWord* _first_dead;
232   HeapWord* _end_of_live;
233 
234   // This the function to invoke when an allocation of an object covering
235   // "start" to "end" occurs to update other internal data structures.
236   virtual void update_for_block(HeapWord* start, HeapWord* the_end) { }
237 
238   GenSpaceMangler* mangler() { return _mangler; }
239 
240   // Allocation helpers (return null if full).
241   inline HeapWord* allocate_impl(size_t word_size);
242   inline HeapWord* par_allocate_impl(size_t word_size);
243 
244  public:
245   ContiguousSpace();
246   ~ContiguousSpace();
247 
248   // Initialization.
249   // "initialize" should be called once on a space, before it is used for
250   // any purpose.  The "mr" arguments gives the bounds of the space, and
251   // the "clear_space" argument should be true unless the memory in "mr" is
252   // known to be zeroed.
253   void initialize(MemRegion mr, bool clear_space, bool mangle_space);
254 
255   // The "clear" method must be called on a region that may have
256   // had allocation performed in it, but is now to be considered empty.
257   virtual void clear(bool mangle_space);
258 
259   // Used temporarily during a compaction phase to hold the value
260   // top should have when compaction is complete.
261   HeapWord* compaction_top() const { return _compaction_top;    }
262 
263   void set_compaction_top(HeapWord* value) {
264     assert(value == nullptr || (value >= bottom() && value <= end()),
265       "should point inside space");
266     _compaction_top = value;
267   }
268 
269   // Returns the next space (in the current generation) to be compacted in
270   // the global compaction order.  Also is used to select the next
271   // space into which to compact.
272 
273   virtual ContiguousSpace* next_compaction_space() const {
274     return _next_compaction_space;
275   }
276 
277   void set_next_compaction_space(ContiguousSpace* csp) {
278     _next_compaction_space = csp;
279   }
280 
281 #if INCLUDE_SERIALGC
282   // MarkSweep support phase2
283 
284   // Start the process of compaction of the current space: compute
285   // post-compaction addresses, and insert forwarding pointers.  The fields
286   // "cp->gen" and "cp->compaction_space" are the generation and space into
287   // which we are currently compacting.  This call updates "cp" as necessary,
288   // and leaves the "compaction_top" of the final value of
289   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
290   // this phase as if the final copy had occurred; if so, "cp->threshold"
291   // indicates when the next such action should be taken.
292   void prepare_for_compaction(CompactPoint* cp);
293   // MarkSweep support phase3
294   void adjust_pointers() override;
295   // MarkSweep support phase4
296   virtual void compact();
297 #endif // INCLUDE_SERIALGC
298 
299   // The maximum percentage of objects that can be dead in the compacted
300   // live part of a compacted space ("deadwood" support.)
301   virtual size_t allowed_dead_ratio() const { return 0; };
302 
303   // "q" is an object of the given "size" that should be forwarded;
304   // "cp" names the generation ("gen") and containing "this" (which must
305   // also equal "cp->space").  "compact_top" is where in "this" the
306   // next object should be forwarded to.  If there is room in "this" for
307   // the object, insert an appropriate forwarding pointer in "q".
308   // If not, go to the next compaction space (there must
309   // be one, since compaction must succeed -- we go to the first space of
310   // the previous generation if necessary, updating "cp"), reset compact_top
311   // and then forward.  In either case, returns the new value of "compact_top".
312   // Invokes the "update_for_block" function of the then-current compaction
313   // space.
314   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
315                     HeapWord* compact_top);
316 
317   // Accessors
318   HeapWord* top() const            { return _top;    }
319   void set_top(HeapWord* value)    { _top = value; }
320 
321   void set_saved_mark()            { _saved_mark_word = top();    }
322 
323   bool saved_mark_at_top() const { return saved_mark_word() == top(); }
324 
325   // In debug mode mangle (write it with a particular bit
326   // pattern) the unused part of a space.
327 
328   // Used to save the address in a space for later use during mangling.
329   void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
330   // Used to save the space's current top for later use during mangling.
331   void set_top_for_allocations() PRODUCT_RETURN;
332 
333   // Mangle regions in the space from the current top up to the
334   // previously mangled part of the space.
335   void mangle_unused_area() override PRODUCT_RETURN;
336   // Mangle [top, end)
337   void mangle_unused_area_complete() override PRODUCT_RETURN;
338 
339   // Do some sparse checking on the area that should have been mangled.
340   void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
341   // Check the complete area that should have been mangled.
342   // This code may be null depending on the macro DEBUG_MANGLING.
343   void check_mangled_unused_area_complete() PRODUCT_RETURN;
344 
345   // Size computations: sizes in bytes.
346   size_t used() const override   { return byte_size(bottom(), top()); }
347   size_t free() const override   { return byte_size(top(),    end()); }
348 
349   bool is_free_block(const HeapWord* p) const override;
350 
351   // In a contiguous space we have a more obvious bound on what parts
352   // contain objects.
353   MemRegion used_region() const override { return MemRegion(bottom(), top()); }
354 
355   // Allocation (return null if full)
356   HeapWord* allocate(size_t word_size) override;
357   HeapWord* par_allocate(size_t word_size) override;
358 
359   // Iteration
360   void object_iterate(ObjectClosure* blk) override;
361 
362   // Compaction support
363   void reset_after_compaction() {
364     assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
365     set_top(compaction_top());
366   }
367 
368   // Apply "blk->do_oop" to the addresses of all reference fields in objects
369   // starting with the _saved_mark_word, which was noted during a generation's
370   // save_marks and is required to denote the head of an object.
371   // Fields in objects allocated by applications of the closure
372   // *are* included in the iteration.
373   // Updates _saved_mark_word to point to just after the last object
374   // iterated over.
375   template <typename OopClosureType>
376   void oop_since_save_marks_iterate(OopClosureType* blk);
377 
378   // Same as object_iterate, but starting from "mark", which is required
379   // to denote the start of an object.  Objects allocated by
380   // applications of the closure *are* included in the iteration.
381   virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
382 
383   // Very inefficient implementation.
384   HeapWord* block_start_const(const void* p) const override;
385   size_t block_size(const HeapWord* p) const override;
386   // If a block is in the allocated area, it is an object.
387   bool block_is_obj(const HeapWord* p) const override { return p < top(); }
388 
389   // Addresses for inlined allocation
390   HeapWord** top_addr() { return &_top; }
391   HeapWord** end_addr() { return &_end; }
392 
393   void print_on(outputStream* st) const override;
394 
395   // Debugging
396   void verify() const;
397 };
398 
399 #if INCLUDE_SERIALGC
400 
401 // Class TenuredSpace is used by TenuredGeneration; it supports an efficient
402 // "block_start" operation via a SerialBlockOffsetTable.
403 
404 class TenuredSpace: public ContiguousSpace {
405   friend class VMStructs;
406  protected:
407   SerialBlockOffsetTable _offsets;
408 
409   // Mark sweep support
410   size_t allowed_dead_ratio() const override;
411  public:
412   // Constructor
413   TenuredSpace(SerialBlockOffsetSharedArray* sharedOffsetArray,
414                MemRegion mr);
415 
416   HeapWord* block_start_const(const void* addr) const override;
417 
418   // Add offset table update.
419   inline HeapWord* allocate(size_t word_size) override;
420   inline HeapWord* par_allocate(size_t word_size) override;
421 
422   // MarkSweep support phase3
423   void update_for_block(HeapWord* start, HeapWord* end) override;
424 
425   void print_on(outputStream* st) const override;
426 };
427 #endif //INCLUDE_SERIALGC
428 
429 #endif // SHARE_GC_SHARED_SPACE_HPP