1 /*
  2  * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
 26 #define SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
 27 
 28 #include "gc/shared/oopStorage.hpp"
 29 
 30 #include "memory/allocation.hpp"
 31 #include "metaprogramming/conditional.hpp"
 32 #include "metaprogramming/isConst.hpp"
 33 #include "oops/oop.hpp"
 34 #include "runtime/objectMonitor.hpp"
 35 #include "runtime/safepoint.hpp"
 36 #include "utilities/align.hpp"
 37 #include "utilities/count_trailing_zeros.hpp"
 38 #include "utilities/debug.hpp"
 39 #include "utilities/globalDefinitions.hpp"
 40 
 41 // Array of all active blocks.  Refcounted for lock-free reclaim of
 42 // old array when a new array is allocated for expansion.
 43 class OopStorage::ActiveArray {
 44   friend class OopStorage::TestAccess;
 45 
 46   size_t _size;
 47   volatile size_t _block_count;
 48   mutable volatile int _refcount;
 49   // Block* _blocks[1];            // Pseudo flexible array member.
 50 
 51   ActiveArray(size_t size);
 52   ~ActiveArray();
 53 
 54   NONCOPYABLE(ActiveArray);
 55 
 56   static size_t blocks_offset();
 57   Block* const* base_ptr() const;
 58 
 59   Block* const* block_ptr(size_t index) const;
 60   Block** block_ptr(size_t index);
 61 
 62 public:
 63   static ActiveArray* create(size_t size,
 64                              MEMFLAGS memflags = mtGC,
 65                              AllocFailType alloc_fail = AllocFailStrategy::EXIT_OOM);
 66   static void destroy(ActiveArray* ba);
 67 
 68   inline Block* at(size_t i) const;
 69 
 70   size_t size() const;
 71   size_t block_count() const;
 72   size_t block_count_acquire() const;
 73   void increment_refcount() const;
 74   bool decrement_refcount() const; // Return true if zero, otherwise false
 75 
 76   // Support for OopStorage::allocate.
 77   // Add block to the end of the array.  Updates block count at the
 78   // end of the operation, with a release_store. Returns true if the
 79   // block was added, false if there was no room available.
 80   // precondition: owner's _allocation_mutex is locked, or at safepoint.
 81   bool push(Block* block);
 82 
 83   // Support OopStorage::delete_empty_blocks_xxx operations.
 84   // Remove block from the array.
 85   // precondition: block must be present at its active_index element.
 86   void remove(Block* block);
 87 
 88   void copy_from(const ActiveArray* from);
 89 };
 90 
 91 inline size_t OopStorage::ActiveArray::blocks_offset() {
 92   return align_up(sizeof(ActiveArray), sizeof(Block*));
 93 }
 94 
 95 inline OopStorage::Block* const* OopStorage::ActiveArray::base_ptr() const {
 96   const void* ptr = reinterpret_cast<const char*>(this) + blocks_offset();
 97   return reinterpret_cast<Block* const*>(ptr);
 98 }
 99 
100 inline OopStorage::Block* const* OopStorage::ActiveArray::block_ptr(size_t index) const {
101   return base_ptr() + index;
102 }
103 
104 inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) {
105   return const_cast<Block**>(base_ptr() + index);
106 }
107 
108 inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
109   assert(index < _block_count, "precondition");
110   return *block_ptr(index);
111 }
112 
113 // A Block has an embedded AllocationListEntry to provide the links between
114 // Blocks in an AllocationList.
115 class OopStorage::AllocationListEntry {
116   friend class OopStorage::AllocationList;
117 
118   // Members are mutable, and we deal exclusively with pointers to
119   // const, to make const blocks easier to use; a block being const
120   // doesn't prevent modifying its list state.
121   mutable const Block* _prev;
122   mutable const Block* _next;
123 
124   NONCOPYABLE(AllocationListEntry);
125 
126 public:
127   AllocationListEntry();
128   ~AllocationListEntry();
129 };
130 
131 // Fixed-sized array of oops, plus bookkeeping data.
132 // All blocks are in the storage's _active_array, at the block's _active_index.
133 // Non-full blocks are in the storage's _allocation_list, linked through the
134 // block's _allocation_list_entry.  Empty blocks are at the end of that list.
135 class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
136   // _data must be the first non-static data member, for alignment.
137   oop _data[BitsPerWord];
138   static const unsigned _data_pos = 0; // Position of _data.
139 
140   volatile uintx _allocated_bitmask; // One bit per _data element.
141   intptr_t _owner_address;
142   void* _memory;              // Unaligned storage containing block.
143   size_t _active_index;
144   AllocationListEntry _allocation_list_entry;
145   Block* volatile _deferred_updates_next;
146   volatile uintx _release_refcount;
147 
148   Block(const OopStorage* owner, void* memory);
149   ~Block();
150 
151   void check_index(unsigned index) const;
152   unsigned get_index(const oop* ptr) const;
153   void atomic_add_allocated(uintx add);
154 
155   template<typename F, typename BlockPtr>
156   static bool iterate_impl(F f, BlockPtr b);
157 
158   NONCOPYABLE(Block);
159 
160 public:
161   const AllocationListEntry& allocation_list_entry() const;
162 
163   static size_t allocation_size();
164   static size_t allocation_alignment_shift();
165 
166   oop* get_pointer(unsigned index);
167   const oop* get_pointer(unsigned index) const;
168 
169   uintx bitmask_for_index(unsigned index) const;
170   uintx bitmask_for_entry(const oop* ptr) const;
171 
172   // Allocation bitmask accessors are racy.
173   bool is_full() const;
174   bool is_empty() const;
175   uintx allocated_bitmask() const;
176 
177   bool is_safe_to_delete() const;
178 
179   Block* deferred_updates_next() const;
180   void set_deferred_updates_next(Block* new_next);
181 
182   bool contains(const oop* ptr) const;
183 
184   size_t active_index() const;
185   void set_active_index(size_t index);
186   static size_t active_index_safe(const Block* block); // Returns 0 if access fails.
187 
188   // Returns NULL if ptr is not in a block or not allocated in that block.
189   static Block* block_for_ptr(const OopStorage* owner, const oop* ptr);
190 
191   oop* allocate();
192   uintx allocate_all();
193   static Block* new_block(const OopStorage* owner);
194   static void delete_block(const Block& block);
195 
196   void release_entries(uintx releasing, OopStorage* owner);
197 
198   template<typename F> bool iterate(F f);
199   template<typename F> bool iterate(F f) const;
200 }; // class Block
201 
202 inline OopStorage::Block* OopStorage::AllocationList::head() {
203   return const_cast<Block*>(_head);
204 }
205 
206 inline OopStorage::Block* OopStorage::AllocationList::tail() {
207   return const_cast<Block*>(_tail);
208 }
209 
210 inline const OopStorage::Block* OopStorage::AllocationList::chead() const {
211   return _head;
212 }
213 
214 inline const OopStorage::Block* OopStorage::AllocationList::ctail() const {
215   return _tail;
216 }
217 
218 inline OopStorage::Block* OopStorage::AllocationList::prev(Block& block) {
219   return const_cast<Block*>(block.allocation_list_entry()._prev);
220 }
221 
222 inline OopStorage::Block* OopStorage::AllocationList::next(Block& block) {
223   return const_cast<Block*>(block.allocation_list_entry()._next);
224 }
225 
226 inline const OopStorage::Block* OopStorage::AllocationList::prev(const Block& block) const {
227   return block.allocation_list_entry()._prev;
228 }
229 
230 inline const OopStorage::Block* OopStorage::AllocationList::next(const Block& block) const {
231   return block.allocation_list_entry()._next;
232 }
233 
234 template<typename Closure>
235 class OopStorage::OopFn {
236 public:
237   explicit OopFn(Closure* cl) : _cl(cl) {}
238 
239   template<typename OopPtr>     // [const] oop*
240   bool operator()(OopPtr ptr) const {
241     _cl->do_oop(ptr);
242     return true;
243   }
244 
245 private:
246   Closure* _cl;
247 };
248 
249 template<typename Closure>
250 inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) {
251   return OopFn<Closure>(cl);
252 }
253 
254 template<typename IsAlive, typename F>
255 class OopStorage::IfAliveFn {
256 public:
257   IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {}
258 
259   bool operator()(oop* ptr) const {
260     bool result = true;
261     oop v = *ptr;
262     if (v != NULL) {
263       if (_is_alive->do_object_b(v)) {
264         result = _f(ptr);
265       } else {
266         ObjectMonitor::maybe_deflate_dead(ptr);
267         *ptr = NULL;            // Clear dead value.
268       }
269     }
270     return result;
271   }
272 
273 private:
274   IsAlive* _is_alive;
275   F _f;
276 };
277 
278 template<typename IsAlive, typename F>
279 inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) {
280   return IfAliveFn<IsAlive, F>(is_alive, f);
281 }
282 
283 template<typename F>
284 class OopStorage::SkipNullFn {
285 public:
286   SkipNullFn(F f) : _f(f) {}
287 
288   template<typename OopPtr>     // [const] oop*
289   bool operator()(OopPtr ptr) const {
290     return (*ptr != NULL) ? _f(ptr) : true;
291   }
292 
293 private:
294   F _f;
295 };
296 
297 template<typename F>
298 inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
299   return SkipNullFn<F>(f);
300 }
301 
302 // Inline Block accesses for use in iteration loops.
303 
304 inline const OopStorage::AllocationListEntry& OopStorage::Block::allocation_list_entry() const {
305   return _allocation_list_entry;
306 }
307 
308 inline void OopStorage::Block::check_index(unsigned index) const {
309   assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
310 }
311 
312 inline oop* OopStorage::Block::get_pointer(unsigned index) {
313   check_index(index);
314   return &_data[index];
315 }
316 
317 inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
318   check_index(index);
319   return &_data[index];
320 }
321 
322 inline uintx OopStorage::Block::allocated_bitmask() const {
323   return _allocated_bitmask;
324 }
325 
326 inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
327   check_index(index);
328   return uintx(1) << index;
329 }
330 
331 // Provide const or non-const iteration, depending on whether BlockPtr
332 // is const Block* or Block*, respectively.
333 template<typename F, typename BlockPtr> // BlockPtr := [const] Block*
334 inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) {
335   uintx bitmask = block->allocated_bitmask();
336   while (bitmask != 0) {
337     unsigned index = count_trailing_zeros(bitmask);
338     bitmask ^= block->bitmask_for_index(index);
339     if (!f(block->get_pointer(index))) {
340       return false;
341     }
342   }
343   return true;
344 }
345 
346 template<typename F>
347 inline bool OopStorage::Block::iterate(F f) {
348   return iterate_impl(f, this);
349 }
350 
351 template<typename F>
352 inline bool OopStorage::Block::iterate(F f) const {
353   return iterate_impl(f, this);
354 }
355 
356 //////////////////////////////////////////////////////////////////////////////
357 // Support for serial iteration, always at a safepoint.
358 
359 // Provide const or non-const iteration, depending on whether Storage is
360 // const OopStorage* or OopStorage*, respectively.
361 template<typename F, typename Storage> // Storage := [const] OopStorage
362 inline bool OopStorage::iterate_impl(F f, Storage* storage) {
363   assert_at_safepoint();
364   // Propagate const/non-const iteration to the block layer, by using
365   // const or non-const blocks as corresponding to Storage.
366   typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr;
367   ActiveArray* blocks = storage->_active_array;
368   size_t limit = blocks->block_count();
369   for (size_t i = 0; i < limit; ++i) {
370     BlockPtr block = blocks->at(i);
371     if (!block->iterate(f)) {
372       return false;
373     }
374   }
375   return true;
376 }
377 
378 template<typename F>
379 inline bool OopStorage::iterate_safepoint(F f) {
380   return iterate_impl(f, this);
381 }
382 
383 template<typename F>
384 inline bool OopStorage::iterate_safepoint(F f) const {
385   return iterate_impl(f, this);
386 }
387 
388 template<typename Closure>
389 inline void OopStorage::oops_do(Closure* cl) {
390   iterate_safepoint(oop_fn(cl));
391 }
392 
393 template<typename Closure>
394 inline void OopStorage::oops_do(Closure* cl) const {
395   iterate_safepoint(oop_fn(cl));
396 }
397 
398 template<typename Closure>
399 inline void OopStorage::weak_oops_do(Closure* cl) {
400   iterate_safepoint(skip_null_fn(oop_fn(cl)));
401 }
402 
403 template<typename IsAliveClosure, typename Closure>
404 inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
405   iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl)));
406 }
407 
408 #endif // SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP