< prev index next >

src/hotspot/share/gc/shared/oopStorage.cpp

Print this page

        

*** 33,43 **** #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.hpp" - #include "runtime/os.hpp" #include "runtime/safepoint.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.hpp" #include "utilities/align.hpp" #include "utilities/count_trailing_zeros.hpp" --- 33,42 ----
*** 206,216 **** const unsigned block_alignment = sizeof(oop) * section_size; OopStorage::Block::Block(const OopStorage* owner, void* memory) : _data(), _allocated_bitmask(0), ! _owner_address(reinterpret_cast<intptr_t>(owner)), _memory(memory), _active_index(0), _allocation_list_entry(), _deferred_updates_next(NULL), _release_refcount(0) --- 205,215 ---- const unsigned block_alignment = sizeof(oop) * section_size; OopStorage::Block::Block(const OopStorage* owner, void* memory) : _data(), _allocated_bitmask(0), ! _owner(owner), _memory(memory), _active_index(0), _allocation_list_entry(), _deferred_updates_next(NULL), _release_refcount(0)
*** 226,236 **** assert(_release_refcount == 0, "deleting block while releasing"); assert(_deferred_updates_next == NULL, "deleting block with deferred update"); // Clear fields used by block_for_ptr and entry validation, which // might help catch bugs. Volatile to prevent dead-store elimination. const_cast<uintx volatile&>(_allocated_bitmask) = 0; ! const_cast<intptr_t volatile&>(_owner_address) = 0; } size_t OopStorage::Block::allocation_size() { // _data must be first member, so aligning Block aligns _data. STATIC_ASSERT(_data_pos == 0); --- 225,235 ---- assert(_release_refcount == 0, "deleting block while releasing"); assert(_deferred_updates_next == NULL, "deleting block with deferred update"); // Clear fields used by block_for_ptr and entry validation, which // might help catch bugs. Volatile to prevent dead-store elimination. const_cast<uintx volatile&>(_allocated_bitmask) = 0; ! const_cast<OopStorage* volatile&>(_owner) = NULL; } size_t OopStorage::Block::allocation_size() { // _data must be first member, so aligning Block aligns _data. STATIC_ASSERT(_data_pos == 0);
*** 354,364 **** // start position, the value at the owner position will be some oop // (possibly NULL), which can never match the owner. intptr_t owner_addr = reinterpret_cast<intptr_t>(owner); for (unsigned i = 0; i < section_count; ++i, section += section_size) { Block* candidate = reinterpret_cast<Block*>(section); ! if (SafeFetchN(&candidate->_owner_address, 0) == owner_addr) { return candidate; } } return NULL; } --- 353,365 ---- // start position, the value at the owner position will be some oop // (possibly NULL), which can never match the owner. intptr_t owner_addr = reinterpret_cast<intptr_t>(owner); for (unsigned i = 0; i < section_count; ++i, section += section_size) { Block* candidate = reinterpret_cast<Block*>(section); ! intptr_t* candidate_owner_addr ! = reinterpret_cast<intptr_t*>(&candidate->_owner); ! if (SafeFetchN(candidate_owner_addr, 0) == owner_addr) { return candidate; } } return NULL; }
*** 411,435 **** // is empty, for ease of empty block deletion processing. oop* OopStorage::allocate() { MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); Block* block = block_for_allocation(); if (block == NULL) return NULL; // Block allocation failed. assert(!block->is_full(), "invariant"); if (block->is_empty()) { // Transitioning from empty to not empty. ! log_trace(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block)); } oop* result = block->allocate(); assert(result != NULL, "allocation failed"); assert(!block->is_empty(), "postcondition"); Atomic::inc(&_allocation_count); // release updates outside lock. if (block->is_full()) { // Transitioning from not full to full. // Remove full blocks from consideration by future allocates. ! log_trace(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block)); _allocation_list.unlink(*block); } log_trace(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result)); return result; } --- 412,444 ---- // is empty, for ease of empty block deletion processing. oop* OopStorage::allocate() { MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); + // Note: Without this we might never perform cleanup. As it is, + // cleanup is only requested here, when completing a concurrent + // iteration, or when someone entirely else wakes up the service + // thread, which isn't ideal. But we can't notify in release(). + if (reduce_deferred_updates()) { + notify_needs_cleanup(); + } + Block* block = block_for_allocation(); if (block == NULL) return NULL; // Block allocation failed. assert(!block->is_full(), "invariant"); if (block->is_empty()) { // Transitioning from empty to not empty. ! log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block)); } oop* result = block->allocate(); assert(result != NULL, "allocation failed"); assert(!block->is_empty(), "postcondition"); Atomic::inc(&_allocation_count); // release updates outside lock. if (block->is_full()) { // Transitioning from not full to full. // Remove full blocks from consideration by future allocates. ! log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block)); _allocation_list.unlink(*block); } log_trace(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result)); return result; }
*** 463,489 **** return true; } OopStorage::Block* OopStorage::block_for_allocation() { assert_lock_strong(_allocation_mutex); while (true) { // Use the first block in _allocation_list for the allocation. Block* block = _allocation_list.head(); if (block != NULL) { return block; } else if (reduce_deferred_updates()) { ! // Might have added a block to the _allocation_list, so retry. } else if (try_add_block()) { ! // Successfully added a new block to the list, so retry. ! assert(_allocation_list.chead() != NULL, "invariant"); ! } else if (_allocation_list.chead() != NULL) { ! // Trying to add a block failed, but some other thread added to the ! // list while we'd dropped the lock over the new block allocation. ! } else if (!reduce_deferred_updates()) { // Once more before failure. // Attempt to add a block failed, no other thread added a block, // and no deferred updated added a block, then allocation failed. ! log_info(oopstorage, blocks)("%s: failed block allocation", name()); return NULL; } } } --- 472,501 ---- return true; } OopStorage::Block* OopStorage::block_for_allocation() { assert_lock_strong(_allocation_mutex); + while (true) { // Use the first block in _allocation_list for the allocation. Block* block = _allocation_list.head(); if (block != NULL) { return block; } else if (reduce_deferred_updates()) { ! MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); ! notify_needs_cleanup(); } else if (try_add_block()) { ! block = _allocation_list.head(); ! assert(block != NULL, "invariant"); ! return block; ! } else if (reduce_deferred_updates()) { // Once more before failure. ! MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); ! notify_needs_cleanup(); ! } else { // Attempt to add a block failed, no other thread added a block, // and no deferred updated added a block, then allocation failed. ! log_debug(oopstorage, blocks)("%s: failed block allocation", name()); return NULL; } } }
*** 571,589 **** static void log_release_transitions(uintx releasing, uintx old_allocated, const OopStorage* owner, const void* block) { ! LogTarget(Trace, oopstorage, blocks) lt; ! if (lt.is_enabled()) { ! LogStream ls(lt); ! if (is_full_bitmask(old_allocated)) { ! ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block)); ! } ! if (releasing == old_allocated) { ! ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block)); ! } } } void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) { assert(releasing != 0, "preconditon"); --- 583,599 ---- static void log_release_transitions(uintx releasing, uintx old_allocated, const OopStorage* owner, const void* block) { ! Log(oopstorage, blocks) log; ! LogStream ls(log.debug()); ! if (is_full_bitmask(old_allocated)) { ! ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block)); ! } ! if (releasing == old_allocated) { ! ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block)); } } void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) { assert(releasing != 0, "preconditon");
*** 606,616 **** // reduce_deferred_updates will make any needed changes related to this // block and _allocation_list. This deferral avoids _allocation_list // updates and the associated locking here. if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) { // Log transitions. Both transitions are possible in a single update. ! log_release_transitions(releasing, old_allocated, owner, this); // Attempt to claim responsibility for adding this block to the deferred // list, by setting the link to non-NULL by self-looping. If this fails, // then someone else has made such a claim and the deferred update has not // yet been processed and will include our change, so we don't need to do // anything further. --- 616,628 ---- // reduce_deferred_updates will make any needed changes related to this // block and _allocation_list. This deferral avoids _allocation_list // updates and the associated locking here. if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) { // Log transitions. Both transitions are possible in a single update. ! if (log_is_enabled(Debug, oopstorage, blocks)) { ! log_release_transitions(releasing, old_allocated, _owner, this); ! } // Attempt to claim responsibility for adding this block to the deferred // list, by setting the link to non-NULL by self-looping. If this fails, // then someone else has made such a claim and the deferred update has not // yet been processed and will include our change, so we don't need to do // anything further.
*** 621,640 **** _deferred_updates_next = (head == NULL) ? this : head; Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head); if (fetched == head) break; // Successful update. head = fetched; // Retry with updated head. } ! // Only request cleanup for to-empty transitions, not for from-full. ! // There isn't any rush to process from-full transitions. Allocation ! // will reduce deferrals before allocating new blocks, so may process ! // some. And the service thread will drain the entire deferred list ! // if there are any pending to-empty transitions. ! if (releasing == old_allocated) { ! owner->record_needs_cleanup(); ! } ! log_trace(oopstorage, blocks)("%s: deferred update " PTR_FORMAT, ! owner->name(), p2i(this)); } } // Release hold on empty block deletion. Atomic::dec(&_release_refcount); } --- 633,645 ---- _deferred_updates_next = (head == NULL) ? this : head; Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head); if (fetched == head) break; // Successful update. head = fetched; // Retry with updated head. } ! owner->record_needs_cleanup(); ! log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT, ! _owner->name(), p2i(this)); } } // Release hold on empty block deletion. Atomic::dec(&_release_refcount); }
*** 677,689 **** // Move empty block to end of list, for possible deletion. if (is_empty_bitmask(allocated)) { _allocation_list.unlink(*block); _allocation_list.push_back(*block); } ! log_trace(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT, name(), p2i(block)); return true; // Processed one pending update. } inline void check_release_entry(const oop* entry) { --- 682,695 ---- // Move empty block to end of list, for possible deletion. if (is_empty_bitmask(allocated)) { _allocation_list.unlink(*block); _allocation_list.push_back(*block); + notify_needs_cleanup(); } ! log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT, name(), p2i(block)); return true; // Processed one pending update. } inline void check_release_entry(const oop* entry) {
*** 726,749 **** block->release_entries(releasing, this); Atomic::sub(count, &_allocation_count); } } const size_t initial_active_array_size = 8; OopStorage::OopStorage(const char* name, Mutex* allocation_mutex, Mutex* active_mutex) : ! _name(os::strdup(name)), _active_array(ActiveArray::create(initial_active_array_size)), _allocation_list(), _deferred_updates(NULL), _allocation_mutex(allocation_mutex), _active_mutex(active_mutex), _allocation_count(0), _concurrent_iteration_count(0), ! _needs_cleanup(false) { _active_array->increment_refcount(); assert(_active_mutex->rank() < _allocation_mutex->rank(), "%s: active_mutex must have lower rank than allocation_mutex", _name); assert(Service_lock->rank() < _active_mutex->rank(), --- 732,766 ---- block->release_entries(releasing, this); Atomic::sub(count, &_allocation_count); } } + const char* dup_name(const char* name) { + char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC); + strcpy(dup, name); + return dup; + } + + // Possible values for OopStorage::_needs_cleanup. + const uint needs_cleanup_none = 0; // No cleanup needed. + const uint needs_cleanup_marked = 1; // Requested, but no notification made. + const uint needs_cleanup_notified = 2; // Requested and Service thread notified. + const size_t initial_active_array_size = 8; OopStorage::OopStorage(const char* name, Mutex* allocation_mutex, Mutex* active_mutex) : ! _name(dup_name(name)), _active_array(ActiveArray::create(initial_active_array_size)), _allocation_list(), _deferred_updates(NULL), _allocation_mutex(allocation_mutex), _active_mutex(active_mutex), _allocation_count(0), _concurrent_iteration_count(0), ! _needs_cleanup(needs_cleanup_none) { _active_array->increment_refcount(); assert(_active_mutex->rank() < _allocation_mutex->rank(), "%s: active_mutex must have lower rank than allocation_mutex", _name); assert(Service_lock->rank() < _active_mutex->rank(),
*** 774,869 **** for (size_t i = _active_array->block_count(); 0 < i; ) { block = _active_array->at(--i); Block::delete_block(*block); } ActiveArray::destroy(_active_array); ! os::free(const_cast<char*>(_name)); ! } ! ! // Managing service thread notifications. ! // ! // We don't want cleanup work to linger indefinitely, but we also don't want ! // to run the service thread too often. We're also very limited in what we ! // can do in a release operation, where cleanup work is created. ! // ! // When a release operation changes a block's state to empty, it records the ! // need for cleanup in both the associated storage object and in the global ! // request state. A safepoint cleanup task notifies the service thread when ! // there may be cleanup work for any storage object, based on the global ! // request state. But that notification is deferred if the service thread ! // has run recently, and we also avoid duplicate notifications. The service ! // thread updates the timestamp and resets the state flags on every iteration. ! ! // Global cleanup request state. ! static volatile bool needs_cleanup_requested = false; ! ! // Flag for avoiding duplicate notifications. ! static bool needs_cleanup_triggered = false; ! ! // Time after which a notification can be made. ! static jlong cleanup_trigger_permit_time = 0; ! ! // Minimum time since last service thread check before notification is ! // permitted. The value of 500ms was an arbitrary choice; frequent, but not ! // too frequent. ! const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC; ! ! void OopStorage::trigger_cleanup_if_needed() { ! MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag); ! if (Atomic::load(&needs_cleanup_requested) && ! !needs_cleanup_triggered && ! (os::javaTimeNanos() > cleanup_trigger_permit_time)) { ! needs_cleanup_triggered = true; ! ml.notify_all(); ! } } ! bool OopStorage::has_cleanup_work_and_reset() { ! assert_lock_strong(Service_lock); ! cleanup_trigger_permit_time = ! os::javaTimeNanos() + cleanup_trigger_defer_period; ! needs_cleanup_triggered = false; ! // Set the request flag false and return its old value. ! // Needs to be atomic to avoid dropping a concurrent request. ! // Can't use Atomic::xchg, which may not support bool. ! return Atomic::cmpxchg(false, &needs_cleanup_requested, true); } // Record that cleanup is needed, without notifying the Service thread. // Used by release(), where we can't lock even Service_lock. void OopStorage::record_needs_cleanup() { ! // Set local flag first, else service thread could wake up and miss ! // the request. This order may instead (rarely) unnecessarily notify. ! OrderAccess::release_store(&_needs_cleanup, true); ! OrderAccess::release_store_fence(&needs_cleanup_requested, true); } ! bool OopStorage::delete_empty_blocks() { ! // Service thread might have oopstorage work, but not for this object. ! // Check for deferred updates even though that's not a service thread ! // trigger; since we're here, we might as well process them. ! if (!OrderAccess::load_acquire(&_needs_cleanup) && ! (OrderAccess::load_acquire(&_deferred_updates) == NULL)) { ! return false; } MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); // Clear the request before processing. ! OrderAccess::release_store_fence(&_needs_cleanup, false); // Other threads could be adding to the empty block count or the // deferred update list while we're working. Set an upper bound on // how many updates we'll process and blocks we'll try to release, // so other threads can't cause an unbounded stay in this function. ! // We add a bit of slop because the reduce_deferred_updates clause ! // can cause blocks to be double counted. If there are few blocks ! // and many of them are deferred and empty, we might hit the limit ! // and spin the caller without doing very much work. Otherwise, ! // we don't normally hit the limit anyway, instead running out of ! // work to do. ! size_t limit = block_count() + 10; for (size_t i = 0; i < limit; ++i) { // Process deferred updates, which might make empty blocks available. // Continue checking once deletion starts, since additional updates // might become available while we're working. --- 791,837 ---- for (size_t i = _active_array->block_count(); 0 < i; ) { block = _active_array->at(--i); Block::delete_block(*block); } ActiveArray::destroy(_active_array); ! FREE_C_HEAP_ARRAY(char, _name); } ! // Called by service thread to check for pending work. ! bool OopStorage::needs_delete_empty_blocks() const { ! return Atomic::load(&_needs_cleanup) != needs_cleanup_none; } // Record that cleanup is needed, without notifying the Service thread. // Used by release(), where we can't lock even Service_lock. void OopStorage::record_needs_cleanup() { ! Atomic::cmpxchg(needs_cleanup_marked, &_needs_cleanup, needs_cleanup_none); } ! // Record that cleanup is needed, and notify the Service thread. ! void OopStorage::notify_needs_cleanup() { ! // Avoid re-notification if already notified. ! const uint notified = needs_cleanup_notified; ! if (Atomic::xchg(notified, &_needs_cleanup) != notified) { ! MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag); ! ml.notify_all(); } + } + bool OopStorage::delete_empty_blocks() { MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); // Clear the request before processing. ! Atomic::store(needs_cleanup_none, &_needs_cleanup); ! OrderAccess::fence(); // Other threads could be adding to the empty block count or the // deferred update list while we're working. Set an upper bound on // how many updates we'll process and blocks we'll try to release, // so other threads can't cause an unbounded stay in this function. ! size_t limit = block_count(); ! if (limit == 0) return false; // Empty storage; nothing at all to do. for (size_t i = 0; i < limit; ++i) { // Process deferred updates, which might make empty blocks available. // Continue checking once deletion starts, since additional updates // might become available while we're working.
*** 976,987 **** OopStorage::BasicParState::~BasicParState() { _storage->relinquish_block_array(_active_array); update_concurrent_iteration_count(-1); if (_concurrent) { ! // We may have deferred some cleanup work. ! const_cast<OopStorage*>(_storage)->record_needs_cleanup(); } } void OopStorage::BasicParState::update_concurrent_iteration_count(int value) { if (_concurrent) { --- 944,955 ---- OopStorage::BasicParState::~BasicParState() { _storage->relinquish_block_array(_active_array); update_concurrent_iteration_count(-1); if (_concurrent) { ! // We may have deferred some work. ! const_cast<OopStorage*>(_storage)->notify_needs_cleanup(); } } void OopStorage::BasicParState::update_concurrent_iteration_count(int value) { if (_concurrent) {
< prev index next >