1 /* 2 * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP 26 #define SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP 27 28 #include "gc/shared/oopStorage.hpp" 29 30 #include "memory/allocation.hpp" 31 #include "metaprogramming/conditional.hpp" 32 #include "metaprogramming/isConst.hpp" 33 #include "oops/oop.hpp" 34 #include "runtime/safepoint.hpp" 35 #include "utilities/align.hpp" 36 #include "utilities/count_trailing_zeros.hpp" 37 #include "utilities/debug.hpp" 38 #include "utilities/globalDefinitions.hpp" 39 40 // Array of all active blocks. Refcounted for lock-free reclaim of 41 // old array when a new array is allocated for expansion. 42 class OopStorage::ActiveArray { 43 friend class OopStorage::TestAccess; 44 45 size_t _size; 46 volatile size_t _block_count; 47 mutable volatile int _refcount; 48 // Block* _blocks[1]; // Pseudo flexible array member. 49 50 ActiveArray(size_t size); 51 ~ActiveArray(); 52 53 NONCOPYABLE(ActiveArray); 54 55 static size_t blocks_offset(); 56 Block* const* base_ptr() const; 57 58 Block* const* block_ptr(size_t index) const; 59 Block** block_ptr(size_t index); 60 61 public: 62 static ActiveArray* create(size_t size, 63 MEMFLAGS memflags = mtGC, 64 AllocFailType alloc_fail = AllocFailStrategy::EXIT_OOM); 65 static void destroy(ActiveArray* ba); 66 67 inline Block* at(size_t i) const; 68 69 size_t size() const; 70 size_t block_count() const; 71 size_t block_count_acquire() const; 72 void increment_refcount() const; 73 bool decrement_refcount() const; // Return true if zero, otherwise false 74 75 // Support for OopStorage::allocate. 76 // Add block to the end of the array. Updates block count at the 77 // end of the operation, with a release_store. Returns true if the 78 // block was added, false if there was no room available. 79 // precondition: owner's _allocation_mutex is locked, or at safepoint. 80 bool push(Block* block); 81 82 // Support OopStorage::delete_empty_blocks_xxx operations. 83 // Remove block from the array. 84 // precondition: block must be present at its active_index element. 85 void remove(Block* block); 86 87 void copy_from(const ActiveArray* from); 88 }; 89 90 inline size_t OopStorage::ActiveArray::blocks_offset() { 91 return align_up(sizeof(ActiveArray), sizeof(Block*)); 92 } 93 94 inline OopStorage::Block* const* OopStorage::ActiveArray::base_ptr() const { 95 const void* ptr = reinterpret_cast<const char*>(this) + blocks_offset(); 96 return reinterpret_cast<Block* const*>(ptr); 97 } 98 99 inline OopStorage::Block* const* OopStorage::ActiveArray::block_ptr(size_t index) const { 100 return base_ptr() + index; 101 } 102 103 inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) { 104 return const_cast<Block**>(base_ptr() + index); 105 } 106 107 inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const { 108 assert(index < _block_count, "precondition"); 109 return *block_ptr(index); 110 } 111 112 // A Block has an embedded AllocationListEntry to provide the links between 113 // Blocks in an AllocationList. 114 class OopStorage::AllocationListEntry { 115 friend class OopStorage::AllocationList; 116 117 // Members are mutable, and we deal exclusively with pointers to 118 // const, to make const blocks easier to use; a block being const 119 // doesn't prevent modifying its list state. 120 mutable const Block* _prev; 121 mutable const Block* _next; 122 123 NONCOPYABLE(AllocationListEntry); 124 125 public: 126 AllocationListEntry(); 127 ~AllocationListEntry(); 128 }; 129 130 // Fixed-sized array of oops, plus bookkeeping data. 131 // All blocks are in the storage's _active_array, at the block's _active_index. 132 // Non-full blocks are in the storage's _allocation_list, linked through the 133 // block's _allocation_list_entry. Empty blocks are at the end of that list. 134 class OopStorage::Block /* No base class, to avoid messing up alignment. */ { 135 // _data must be the first non-static data member, for alignment. 136 oop _data[BitsPerWord]; 137 static const unsigned _data_pos = 0; // Position of _data. 138 139 volatile uintx _allocated_bitmask; // One bit per _data element. 140 intptr_t _owner_address; 141 void* _memory; // Unaligned storage containing block. 142 size_t _active_index; 143 AllocationListEntry _allocation_list_entry; 144 Block* volatile _deferred_updates_next; 145 volatile uintx _release_refcount; 146 147 Block(const OopStorage* owner, void* memory); 148 ~Block(); 149 150 void check_index(unsigned index) const; 151 unsigned get_index(const oop* ptr) const; 152 void atomic_add_allocated(uintx add); 153 154 template<typename F, typename BlockPtr> 155 static bool iterate_impl(F f, BlockPtr b); 156 157 NONCOPYABLE(Block); 158 159 public: 160 const AllocationListEntry& allocation_list_entry() const; 161 162 static size_t allocation_size(); 163 static size_t allocation_alignment_shift(); 164 165 oop* get_pointer(unsigned index); 166 const oop* get_pointer(unsigned index) const; 167 168 uintx bitmask_for_index(unsigned index) const; 169 uintx bitmask_for_entry(const oop* ptr) const; 170 171 // Allocation bitmask accessors are racy. 172 bool is_full() const; 173 bool is_empty() const; 174 uintx allocated_bitmask() const; 175 176 bool is_safe_to_delete() const; 177 178 Block* deferred_updates_next() const; 179 void set_deferred_updates_next(Block* new_next); 180 181 bool contains(const oop* ptr) const; 182 183 size_t active_index() const; 184 void set_active_index(size_t index); 185 static size_t active_index_safe(const Block* block); // Returns 0 if access fails. 186 187 // Returns NULL if ptr is not in a block or not allocated in that block. 188 static Block* block_for_ptr(const OopStorage* owner, const oop* ptr); 189 190 oop* allocate(); 191 uintx allocate_all(); 192 static Block* new_block(const OopStorage* owner); 193 static void delete_block(const Block& block); 194 195 void release_entries(uintx releasing, OopStorage* owner); 196 197 template<typename F> bool iterate(F f); 198 template<typename F> bool iterate(F f) const; 199 }; // class Block 200 201 inline OopStorage::Block* OopStorage::AllocationList::head() { 202 return const_cast<Block*>(_head); 203 } 204 205 inline OopStorage::Block* OopStorage::AllocationList::tail() { 206 return const_cast<Block*>(_tail); 207 } 208 209 inline const OopStorage::Block* OopStorage::AllocationList::chead() const { 210 return _head; 211 } 212 213 inline const OopStorage::Block* OopStorage::AllocationList::ctail() const { 214 return _tail; 215 } 216 217 inline OopStorage::Block* OopStorage::AllocationList::prev(Block& block) { 218 return const_cast<Block*>(block.allocation_list_entry()._prev); 219 } 220 221 inline OopStorage::Block* OopStorage::AllocationList::next(Block& block) { 222 return const_cast<Block*>(block.allocation_list_entry()._next); 223 } 224 225 inline const OopStorage::Block* OopStorage::AllocationList::prev(const Block& block) const { 226 return block.allocation_list_entry()._prev; 227 } 228 229 inline const OopStorage::Block* OopStorage::AllocationList::next(const Block& block) const { 230 return block.allocation_list_entry()._next; 231 } 232 233 template<typename Closure> 234 class OopStorage::OopFn { 235 public: 236 explicit OopFn(Closure* cl) : _cl(cl) {} 237 238 template<typename OopPtr> // [const] oop* 239 bool operator()(OopPtr ptr) const { 240 _cl->do_oop(ptr); 241 return true; 242 } 243 244 private: 245 Closure* _cl; 246 }; 247 248 template<typename Closure> 249 inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) { 250 return OopFn<Closure>(cl); 251 } 252 253 template<typename IsAlive, typename F> 254 class OopStorage::IfAliveFn { 255 public: 256 IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {} 257 258 bool operator()(oop* ptr) const { 259 bool result = true; 260 oop v = *ptr; 261 if (v != NULL) { 262 if (_is_alive->do_object_b(v)) { 263 result = _f(ptr); 264 } else { 265 *ptr = NULL; // Clear dead value. 266 } 267 } 268 return result; 269 } 270 271 private: 272 IsAlive* _is_alive; 273 F _f; 274 }; 275 276 template<typename IsAlive, typename F> 277 inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) { 278 return IfAliveFn<IsAlive, F>(is_alive, f); 279 } 280 281 template<typename F> 282 class OopStorage::SkipNullFn { 283 public: 284 SkipNullFn(F f) : _f(f) {} 285 286 template<typename OopPtr> // [const] oop* 287 bool operator()(OopPtr ptr) const { 288 return (*ptr != NULL) ? _f(ptr) : true; 289 } 290 291 private: 292 F _f; 293 }; 294 295 template<typename F> 296 inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) { 297 return SkipNullFn<F>(f); 298 } 299 300 // Inline Block accesses for use in iteration loops. 301 302 inline const OopStorage::AllocationListEntry& OopStorage::Block::allocation_list_entry() const { 303 return _allocation_list_entry; 304 } 305 306 inline void OopStorage::Block::check_index(unsigned index) const { 307 assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index); 308 } 309 310 inline oop* OopStorage::Block::get_pointer(unsigned index) { 311 check_index(index); 312 return &_data[index]; 313 } 314 315 inline const oop* OopStorage::Block::get_pointer(unsigned index) const { 316 check_index(index); 317 return &_data[index]; 318 } 319 320 inline uintx OopStorage::Block::allocated_bitmask() const { 321 return _allocated_bitmask; 322 } 323 324 inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const { 325 check_index(index); 326 return uintx(1) << index; 327 } 328 329 // Provide const or non-const iteration, depending on whether BlockPtr 330 // is const Block* or Block*, respectively. 331 template<typename F, typename BlockPtr> // BlockPtr := [const] Block* 332 inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) { 333 uintx bitmask = block->allocated_bitmask(); 334 while (bitmask != 0) { 335 unsigned index = count_trailing_zeros(bitmask); 336 bitmask ^= block->bitmask_for_index(index); 337 if (!f(block->get_pointer(index))) { 338 return false; 339 } 340 } 341 return true; 342 } 343 344 template<typename F> 345 inline bool OopStorage::Block::iterate(F f) { 346 return iterate_impl(f, this); 347 } 348 349 template<typename F> 350 inline bool OopStorage::Block::iterate(F f) const { 351 return iterate_impl(f, this); 352 } 353 354 ////////////////////////////////////////////////////////////////////////////// 355 // Support for serial iteration, always at a safepoint. 356 357 // Provide const or non-const iteration, depending on whether Storage is 358 // const OopStorage* or OopStorage*, respectively. 359 template<typename F, typename Storage> // Storage := [const] OopStorage 360 inline bool OopStorage::iterate_impl(F f, Storage* storage) { 361 assert_at_safepoint(); 362 // Propagate const/non-const iteration to the block layer, by using 363 // const or non-const blocks as corresponding to Storage. 364 typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr; 365 ActiveArray* blocks = storage->_active_array; 366 size_t limit = blocks->block_count(); 367 for (size_t i = 0; i < limit; ++i) { 368 BlockPtr block = blocks->at(i); 369 if (!block->iterate(f)) { 370 return false; 371 } 372 } 373 return true; 374 } 375 376 template<typename F> 377 inline bool OopStorage::iterate_safepoint(F f) { 378 return iterate_impl(f, this); 379 } 380 381 template<typename F> 382 inline bool OopStorage::iterate_safepoint(F f) const { 383 return iterate_impl(f, this); 384 } 385 386 template<typename Closure> 387 inline void OopStorage::oops_do(Closure* cl) { 388 iterate_safepoint(oop_fn(cl)); 389 } 390 391 template<typename Closure> 392 inline void OopStorage::oops_do(Closure* cl) const { 393 iterate_safepoint(oop_fn(cl)); 394 } 395 396 template<typename Closure> 397 inline void OopStorage::weak_oops_do(Closure* cl) { 398 iterate_safepoint(skip_null_fn(oop_fn(cl))); 399 } 400 401 template<typename IsAliveClosure, typename Closure> 402 inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) { 403 iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl))); 404 } 405 406 #endif // SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP