< prev index next > src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
Print this page
// Shake up stalled waiters after budget update.
_need_notify_waiters.try_set();
}
- bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) {
+ template<bool FORCE>
+ bool ShenandoahPacer::claim_for_alloc(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate));
intptr_t cur = 0;
intptr_t new_val = 0;
do {
cur = Atomic::load(&_budget);
- if (cur < tax && !force) {
+ if (cur < tax && !FORCE) {
// Progress depleted, alas.
return false;
}
new_val = cur - tax;
} while (Atomic::cmpxchg(&_budget, cur, new_val, memory_order_relaxed) != cur);
return true;
}
+ template bool ShenandoahPacer::claim_for_alloc<true>(size_t words);
+ template bool ShenandoahPacer::claim_for_alloc<false>(size_t words);
+
void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
if (Atomic::load(&_epoch) != epoch) {
// Stale ticket, no need to unpace.
void ShenandoahPacer::pace_for_alloc(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
// Fast path: try to allocate right away
- bool claimed = claim_for_alloc(words, false);
+ bool claimed = claim_for_alloc<false>(words);
if (claimed) {
return;
}
- // Forcefully claim the budget: it may go negative at this point, and
- // GC should replenish for this and subsequent allocations. After this claim,
- // we would wait a bit until our claim is matched by additional progress,
- // or the time budget depletes.
- claimed = claim_for_alloc(words, true);
- assert(claimed, "Should always succeed");
-
// Threads that are attaching should not block at all: they are not
// fully initialized yet. Blocking them would be awkward.
// This is probably the path that allocates the thread oop itself.
//
// Thread which is not an active Java thread should also not block.
// This can happen during VM init when main thread is still not an
// active Java thread.
JavaThread* current = JavaThread::current();
if (current->is_attaching_via_jni() ||
!current->is_active_Java_thread()) {
+ claim_for_alloc<true>(words);
return;
}
- double start = os::elapsedTime();
-
- size_t max_ms = ShenandoahPacingMaxDelay;
- size_t total_ms = 0;
-
- while (true) {
+ jlong const max_delay = ShenandoahPacingMaxDelay * NANOSECS_PER_MILLISEC;
+ jlong const start_time = os::elapsed_counter();
+ while (!claimed && (os::elapsed_counter() - start_time) < max_delay) {
// We could instead assist GC, but this would suffice for now.
- size_t cur_ms = (max_ms > total_ms) ? (max_ms - total_ms) : 1;
- wait(cur_ms);
-
- double end = os::elapsedTime();
- total_ms = (size_t)((end - start) * 1000);
-
- if (total_ms > max_ms || Atomic::load(&_budget) >= 0) {
- // Exiting if either:
- // a) Spent local time budget to wait for enough GC progress.
- // Breaking out and allocating anyway, which may mean we outpace GC,
- // and start Degenerated GC cycle.
- // b) The budget had been replenished, which means our claim is satisfied.
- ShenandoahThreadLocalData::add_paced_time(JavaThread::current(), end - start);
- break;
- }
+ wait(1);
+ claimed = claim_for_alloc<false>(words);
+ }
+ if (!claimed) {
+ // Spent local time budget to wait for enough GC progress.
+ // Force allocating anyway, which may mean we outpace GC,
+ // and start Degenerated GC cycle.
+ claimed = claim_for_alloc<true>(words);
+ assert(claimed, "Should always succeed");
}
+ ShenandoahThreadLocalData::add_paced_time(current, (double)(os::elapsed_counter() - start_time) / NANOSECS_PER_SEC);
}
void ShenandoahPacer::wait(size_t time_ms) {
// Perform timed wait. It works like like sleep(), except without modifying
// the thread interruptible status. MonitorLocker also checks for safepoints.
out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average non-zero>",
sum / threads_nz * 1000, total * 1000, sum / threads_nz / total * 100);
}
out->cr();
}
+
+ void ShenandoahPeriodicPacerNotifyTask::task() {
+ assert(ShenandoahPacing, "Should not be here otherwise");
+ _pacer->notify_waiters();
+ }
< prev index next >