172 return (size_t) (_heap->max_capacity() * 0.1);
173 } else {
174 // Record history, and reply historical data
175 _progress_history->add(_progress);
176 Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
177 return (size_t) (_progress_history->avg() * HeapWordSize);
178 }
179 }
180
181 void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) {
182 size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize;
183 STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
184 Atomic::xchg(&_budget, (intptr_t)initial, memory_order_relaxed);
185 Atomic::store(&_tax_rate, tax_rate);
186 Atomic::inc(&_epoch);
187
188 // Shake up stalled waiters after budget update.
189 _need_notify_waiters.try_set();
190 }
191
192 bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) {
193 assert(ShenandoahPacing, "Only be here when pacing is enabled");
194
195 intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate));
196
197 intptr_t cur = 0;
198 intptr_t new_val = 0;
199 do {
200 cur = Atomic::load(&_budget);
201 if (cur < tax && !force) {
202 // Progress depleted, alas.
203 return false;
204 }
205 new_val = cur - tax;
206 } while (Atomic::cmpxchg(&_budget, cur, new_val, memory_order_relaxed) != cur);
207 return true;
208 }
209
210 void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) {
211 assert(ShenandoahPacing, "Only be here when pacing is enabled");
212
213 if (Atomic::load(&_epoch) != epoch) {
214 // Stale ticket, no need to unpace.
215 return;
216 }
217
218 size_t tax = MAX2<size_t>(1, words * Atomic::load(&_tax_rate));
219 add_budget(tax);
220 }
221
222 intptr_t ShenandoahPacer::epoch() {
223 return Atomic::load(&_epoch);
224 }
225
226 void ShenandoahPacer::pace_for_alloc(size_t words) {
227 assert(ShenandoahPacing, "Only be here when pacing is enabled");
228
229 // Fast path: try to allocate right away
230 bool claimed = claim_for_alloc(words, false);
231 if (claimed) {
232 return;
233 }
234
235 // Forcefully claim the budget: it may go negative at this point, and
236 // GC should replenish for this and subsequent allocations. After this claim,
237 // we would wait a bit until our claim is matched by additional progress,
238 // or the time budget depletes.
239 claimed = claim_for_alloc(words, true);
240 assert(claimed, "Should always succeed");
241
242 // Threads that are attaching should not block at all: they are not
243 // fully initialized yet. Blocking them would be awkward.
244 // This is probably the path that allocates the thread oop itself.
245 //
246 // Thread which is not an active Java thread should also not block.
247 // This can happen during VM init when main thread is still not an
248 // active Java thread.
249 JavaThread* current = JavaThread::current();
250 if (current->is_attaching_via_jni() ||
251 !current->is_active_Java_thread()) {
252 return;
253 }
254
255 double start = os::elapsedTime();
256
257 size_t max_ms = ShenandoahPacingMaxDelay;
258 size_t total_ms = 0;
259
260 while (true) {
261 // We could instead assist GC, but this would suffice for now.
262 size_t cur_ms = (max_ms > total_ms) ? (max_ms - total_ms) : 1;
263 wait(cur_ms);
264
265 double end = os::elapsedTime();
266 total_ms = (size_t)((end - start) * 1000);
267
268 if (total_ms > max_ms || Atomic::load(&_budget) >= 0) {
269 // Exiting if either:
270 // a) Spent local time budget to wait for enough GC progress.
271 // Breaking out and allocating anyway, which may mean we outpace GC,
272 // and start Degenerated GC cycle.
273 // b) The budget had been replenished, which means our claim is satisfied.
274 ShenandoahThreadLocalData::add_paced_time(JavaThread::current(), end - start);
275 break;
276 }
277 }
278 }
279
280 void ShenandoahPacer::wait(size_t time_ms) {
281 // Perform timed wait. It works like like sleep(), except without modifying
282 // the thread interruptible status. MonitorLocker also checks for safepoints.
283 assert(time_ms > 0, "Should not call this with zero argument, as it would stall until notify");
284 assert(time_ms <= LONG_MAX, "Sanity");
285 MonitorLocker locker(_wait_monitor);
286 _wait_monitor->wait((long)time_ms);
287 }
288
289 void ShenandoahPacer::notify_waiters() {
290 if (_need_notify_waiters.try_unset()) {
291 MonitorLocker locker(_wait_monitor);
292 _wait_monitor->notify_all();
293 }
294 }
295
296 void ShenandoahPacer::flush_stats_to_cycle() {
297 double sum = 0;
321 sum += d;
322 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): %s",
323 d * 1000, total * 1000, d/total*100, t->name());
324 }
325 threads_total++;
326 ShenandoahThreadLocalData::reset_paced_time(t);
327 }
328 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <total>",
329 sum * 1000, total * 1000, sum/total*100);
330
331 if (threads_total > 0) {
332 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average total>",
333 sum / threads_total * 1000, total * 1000, sum / threads_total / total * 100);
334 }
335 if (threads_nz > 0) {
336 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average non-zero>",
337 sum / threads_nz * 1000, total * 1000, sum / threads_nz / total * 100);
338 }
339 out->cr();
340 }
|
172 return (size_t) (_heap->max_capacity() * 0.1);
173 } else {
174 // Record history, and reply historical data
175 _progress_history->add(_progress);
176 Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
177 return (size_t) (_progress_history->avg() * HeapWordSize);
178 }
179 }
180
181 void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) {
182 size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize;
183 STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
184 Atomic::xchg(&_budget, (intptr_t)initial, memory_order_relaxed);
185 Atomic::store(&_tax_rate, tax_rate);
186 Atomic::inc(&_epoch);
187
188 // Shake up stalled waiters after budget update.
189 _need_notify_waiters.try_set();
190 }
191
192 template<bool FORCE>
193 bool ShenandoahPacer::claim_for_alloc(size_t words) {
194 assert(ShenandoahPacing, "Only be here when pacing is enabled");
195
196 intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate));
197
198 intptr_t cur = 0;
199 intptr_t new_val = 0;
200 do {
201 cur = Atomic::load(&_budget);
202 if (cur < tax && !FORCE) {
203 // Progress depleted, alas.
204 return false;
205 }
206 new_val = cur - tax;
207 } while (Atomic::cmpxchg(&_budget, cur, new_val, memory_order_relaxed) != cur);
208 return true;
209 }
210
211 template bool ShenandoahPacer::claim_for_alloc<true>(size_t words);
212 template bool ShenandoahPacer::claim_for_alloc<false>(size_t words);
213
214 void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) {
215 assert(ShenandoahPacing, "Only be here when pacing is enabled");
216
217 if (Atomic::load(&_epoch) != epoch) {
218 // Stale ticket, no need to unpace.
219 return;
220 }
221
222 size_t tax = MAX2<size_t>(1, words * Atomic::load(&_tax_rate));
223 add_budget(tax);
224 }
225
226 intptr_t ShenandoahPacer::epoch() {
227 return Atomic::load(&_epoch);
228 }
229
230 void ShenandoahPacer::pace_for_alloc(size_t words) {
231 assert(ShenandoahPacing, "Only be here when pacing is enabled");
232
233 // Fast path: try to allocate right away
234 bool claimed = claim_for_alloc<false>(words);
235 if (claimed) {
236 return;
237 }
238
239 // Threads that are attaching should not block at all: they are not
240 // fully initialized yet. Blocking them would be awkward.
241 // This is probably the path that allocates the thread oop itself.
242 //
243 // Thread which is not an active Java thread should also not block.
244 // This can happen during VM init when main thread is still not an
245 // active Java thread.
246 JavaThread* current = JavaThread::current();
247 if (current->is_attaching_via_jni() ||
248 !current->is_active_Java_thread()) {
249 claim_for_alloc<true>(words);
250 return;
251 }
252
253 jlong const max_delay = ShenandoahPacingMaxDelay * NANOSECS_PER_MILLISEC;
254 jlong const start_time = os::elapsed_counter();
255 while (!claimed && (os::elapsed_counter() - start_time) < max_delay) {
256 // We could instead assist GC, but this would suffice for now.
257 wait(1);
258 claimed = claim_for_alloc<false>(words);
259 }
260 if (!claimed) {
261 // Spent local time budget to wait for enough GC progress.
262 // Force allocating anyway, which may mean we outpace GC,
263 // and start Degenerated GC cycle.
264 claimed = claim_for_alloc<true>(words);
265 assert(claimed, "Should always succeed");
266 }
267 ShenandoahThreadLocalData::add_paced_time(current, (double)(os::elapsed_counter() - start_time) / NANOSECS_PER_SEC);
268 }
269
270 void ShenandoahPacer::wait(size_t time_ms) {
271 // Perform timed wait. It works like like sleep(), except without modifying
272 // the thread interruptible status. MonitorLocker also checks for safepoints.
273 assert(time_ms > 0, "Should not call this with zero argument, as it would stall until notify");
274 assert(time_ms <= LONG_MAX, "Sanity");
275 MonitorLocker locker(_wait_monitor);
276 _wait_monitor->wait((long)time_ms);
277 }
278
279 void ShenandoahPacer::notify_waiters() {
280 if (_need_notify_waiters.try_unset()) {
281 MonitorLocker locker(_wait_monitor);
282 _wait_monitor->notify_all();
283 }
284 }
285
286 void ShenandoahPacer::flush_stats_to_cycle() {
287 double sum = 0;
311 sum += d;
312 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): %s",
313 d * 1000, total * 1000, d/total*100, t->name());
314 }
315 threads_total++;
316 ShenandoahThreadLocalData::reset_paced_time(t);
317 }
318 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <total>",
319 sum * 1000, total * 1000, sum/total*100);
320
321 if (threads_total > 0) {
322 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average total>",
323 sum / threads_total * 1000, total * 1000, sum / threads_total / total * 100);
324 }
325 if (threads_nz > 0) {
326 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average non-zero>",
327 sum / threads_nz * 1000, total * 1000, sum / threads_nz / total * 100);
328 }
329 out->cr();
330 }
331
332 void ShenandoahPeriodicPacerNotifyTask::task() {
333 assert(ShenandoahPacing, "Should not be here otherwise");
334 _pacer->notify_waiters();
335 }
|