1 /*
  2  * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_SATBMARKQUEUE_HPP
 26 #define SHARE_GC_SHARED_SATBMARKQUEUE_HPP
 27 
 28 #include "gc/shared/bufferNode.hpp"
 29 #include "memory/allocation.hpp"
 30 #include "memory/padded.hpp"
 31 #include "oops/oopsHierarchy.hpp"
 32 #include "runtime/atomic.hpp"
 33 #include "utilities/align.hpp"
 34 #include "utilities/debug.hpp"
 35 #include "utilities/globalDefinitions.hpp"
 36 #include "utilities/sizes.hpp"
 37 
 38 class Thread;
 39 class Monitor;
 40 class SATBMarkQueueSet;
 41 
 42 // Base class for processing the contents of a SATB buffer.
 43 class SATBBufferClosure : public StackObj {
 44 protected:
 45   ~SATBBufferClosure() { }
 46 
 47 public:
 48   // Process the SATB entries in the designated buffer range.
 49   virtual void do_buffer(void** buffer, size_t size) = 0;
 50 };
 51 
 52 // A queue whose elements are (possibly stale) pointers to object heads.
 53 class SATBMarkQueue {
 54   friend class VMStructs;
 55   friend class SATBMarkQueueSet;
 56 
 57 private:
 58   NONCOPYABLE(SATBMarkQueue);
 59 
 60   // The buffer.
 61   void** _buf;
 62 
 63   // The (byte) index at which an object was last enqueued.  Starts at
 64   // capacity (in bytes) (indicating an empty buffer) and goes towards zero.
 65   // Value is always pointer-size aligned.
 66   size_t _index;
 67 
 68   static const size_t _element_size = sizeof(void*);
 69 
 70   static size_t byte_index_to_index(size_t ind) {
 71     assert(is_aligned(ind, _element_size), "precondition");
 72     return ind / _element_size;
 73   }
 74 
 75   static size_t index_to_byte_index(size_t ind) {
 76     return ind * _element_size;
 77   }
 78 
 79   // Per-queue (so thread-local) cache of the SATBMarkQueueSet's
 80   // active state, to support inline barriers in compiled code.
 81   bool _active;
 82 
 83 public:
 84   SATBMarkQueue(SATBMarkQueueSet* qset);
 85 
 86   // Queue must be flushed
 87   ~SATBMarkQueue();
 88 
 89   void** buffer() const { return _buf; }
 90 
 91   void set_buffer(void** buffer) { _buf = buffer; }
 92 
 93   size_t index() const {
 94     return byte_index_to_index(_index);
 95   }
 96 
 97   void set_index(size_t new_index) {
 98     assert(new_index <= current_capacity(), "precondition");
 99     _index = index_to_byte_index(new_index);
100   }
101 
102   // Returns the capacity of the buffer, or 0 if the queue doesn't currently
103   // have a buffer.
104   size_t current_capacity() const;
105 
106   bool is_empty() const { return index() == current_capacity(); }
107   size_t size() const { return current_capacity() - index(); }
108 
109   bool is_active() const { return _active; }
110   void set_active(bool value) { _active = value; }
111 
112 #ifndef PRODUCT
113   // Helpful for debugging
114   void print(const char* name);
115 #endif // PRODUCT
116 
117   // Compiler support.
118   static ByteSize byte_offset_of_index() {
119     return byte_offset_of(SATBMarkQueue, _index);
120   }
121 
122   static constexpr ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
123 
124   static ByteSize byte_offset_of_buf() {
125     return byte_offset_of(SATBMarkQueue, _buf);
126   }
127 
128   static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
129 
130   static ByteSize byte_offset_of_active() {
131     return byte_offset_of(SATBMarkQueue, _active);
132   }
133 
134   static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
135 };
136 
137 
138 // A SATBMarkQueueSet represents resources common to a set of SATBMarkQueues.
139 // In particular, the individual queues allocate buffers from this shared
140 // set, and return completed buffers to the set.
141 // A completed buffer is a buffer the mutator is finished with, and
142 // is ready to be processed by the collector.  It need not be full.
143 
144 class SATBMarkQueueSet {
145 
146   BufferNode::Allocator* _allocator;
147 
148   NONCOPYABLE(SATBMarkQueueSet);
149 
150   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_PADDING_SIZE, 0);
151   PaddedEnd<BufferNode::Stack> _list;
152   Atomic<size_t> _count_and_process_flag;
153   // These are rarely (if ever) changed, so same cache line as count.
154   size_t _process_completed_buffers_threshold;
155   size_t _buffer_enqueue_threshold;
156   // SATB is only active during marking.  Enqueuing is only done when active.
157   bool _all_active;
158   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_PADDING_SIZE, 4 * sizeof(size_t));
159 
160   BufferNode* get_completed_buffer();
161   void abandon_completed_buffers();
162 
163   // Discard any buffered enqueued data.
164   void reset_queue(SATBMarkQueue& queue);
165 
166   // Add value to queue's buffer, returning true.  If buffer is full
167   // or if queue doesn't have a buffer, does nothing and returns false.
168   bool try_enqueue(SATBMarkQueue& queue, void* value);
169 
170   // Add value to queue's buffer.  The queue must have a non-full buffer.
171   // Used after an initial try_enqueue has failed and the situation resolved.
172   void retry_enqueue(SATBMarkQueue& queue, void* value);
173 
174   // Installs a new buffer into queue.
175   // Returns the old buffer, or null if queue didn't have a buffer.
176   BufferNode* exchange_buffer_with_new(SATBMarkQueue& queue);
177 
178   // Installs a new buffer into queue.
179   void install_new_buffer(SATBMarkQueue& queue);
180 
181 #ifdef ASSERT
182   void dump_active_states(bool expected_active);
183   void verify_active_states(bool expected_active);
184 #endif // ASSERT
185 
186 protected:
187   SATBMarkQueueSet(BufferNode::Allocator* allocator);
188 
189   ~SATBMarkQueueSet();
190 
191   void handle_zero_index(SATBMarkQueue& queue);
192 
193   // Return true if the queue's buffer should be enqueued, even if not full.
194   // The default method uses the buffer enqueue threshold.
195   bool should_enqueue_buffer(SATBMarkQueue& queue);
196 
197   template<typename Filter>
198   void apply_filter(Filter filter, SATBMarkQueue& queue);
199 
200 public:
201   virtual SATBMarkQueue& satb_queue_for_thread(Thread* const t) const = 0;
202 
203   bool is_active() const { return _all_active; }
204 
205   // Apply "set_active(active)" to all SATB queues in the set. It should be
206   // called only with the world stopped. The method will assert that the
207   // SATB queues of all threads it visits, as well as the SATB queue
208   // set itself, has an active value same as expected_active.
209   void set_active_all_threads(bool active, bool expected_active);
210 
211   void set_process_completed_buffers_threshold(size_t value);
212 
213   size_t buffer_enqueue_threshold() const { return _buffer_enqueue_threshold; }
214 
215   void set_buffer_enqueue_threshold_percentage(uint value);
216 
217   // If there exists some completed buffer, pop and process it, and
218   // return true.  Otherwise return false.  Processing a buffer
219   // consists of applying the closure to the active range of the
220   // buffer; the leading entries may be excluded due to filtering.
221   bool apply_closure_to_completed_buffer(SATBBufferClosure* cl);
222 
223   void flush_queue(SATBMarkQueue& queue);
224 
225   // Add obj to queue.  This qset and the queue must be active.
226   void enqueue_known_active(SATBMarkQueue& queue, oop obj);
227   virtual void filter(SATBMarkQueue& queue) = 0;
228   void enqueue_completed_buffer(BufferNode* node);
229 
230   // The number of buffers in the list.  Racy and not updated atomically
231   // with the set of completed buffers.
232   size_t completed_buffers_num() const {
233     return _count_and_process_flag.load_relaxed() >> 1;
234   }
235 
236   // Return true if completed buffers should be processed.
237   bool process_completed_buffers() const {
238     return (_count_and_process_flag.load_relaxed() & 1) != 0;
239   }
240 
241   // Return the associated BufferNode allocator.
242   BufferNode::Allocator* allocator() const { return _allocator; }
243 
244   // Return the buffer for a BufferNode of size buffer_capacity().
245   void** allocate_buffer();
246 
247   // Return an empty buffer to the free list.  The node is required
248   // to have been allocated with a size of buffer_capacity().
249   void deallocate_buffer(BufferNode* node);
250 
251   size_t buffer_capacity() const {
252     return _allocator->buffer_capacity();
253   }
254 
255 #ifndef PRODUCT
256   // Helpful for debugging
257   void print_all(const char* msg);
258 #endif // PRODUCT
259 
260   // If a marking is being abandoned, reset any unprocessed log buffers.
261   void abandon_partial_marking();
262 };
263 
264 // Removes entries from queue's buffer that are no longer needed, as
265 // determined by filter. If e is a void* entry in queue's buffer,
266 // filter_out(e) must be a valid expression whose value is convertible
267 // to bool. Entries are removed (filtered out) if the result is true,
268 // retained if false.
269 template<typename Filter>
270 inline void SATBMarkQueueSet::apply_filter(Filter filter_out, SATBMarkQueue& queue) {
271   void** buf = queue.buffer();
272 
273   if (buf == nullptr) {
274     // Nothing to do, and avoid pointer arithmetic on nullptr below.
275     return;
276   }
277 
278   // Two-fingered compaction toward the end.
279   void** src = buf + queue.index();
280   void** dst = buf + queue.current_capacity();
281   assert(src <= dst, "invariant");
282   for ( ; src < dst; ++src) {
283     // Search low to high for an entry to keep.
284     void* entry = *src;
285     if (entry != nullptr && !filter_out(entry)) {
286       // Found keeper.  Search high to low for an entry to discard.
287       while (src < --dst) {
288         if (*dst == nullptr || filter_out(*dst)) {
289           *dst = entry;         // Replace discard with keeper.
290           break;
291         }
292       }
293       // If discard search failed (src == dst), the outer loop will also end.
294     }
295   }
296   // dst points to the lowest retained entry, or the end of the buffer
297   // if all the entries were filtered out.
298   queue.set_index(dst - buf);
299 }
300 
301 #endif // SHARE_GC_SHARED_SATBMARKQUEUE_HPP