1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/shenandoahFreeSet.hpp" 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 29 #include "gc/shenandoah/shenandoahPacer.hpp" 30 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 31 #include "runtime/atomic.hpp" 32 #include "runtime/javaThread.inline.hpp" 33 #include "runtime/mutexLocker.hpp" 34 #include "runtime/threadSMR.hpp" 35 36 /* 37 * In normal concurrent cycle, we have to pace the application to let GC finish. 38 * 39 * Here, we do not know how large would be the collection set, and what are the 40 * relative performances of the each stage in the concurrent cycle, and so we have to 41 * make some assumptions. 42 * 43 * For concurrent mark, there is no clear notion of progress. The moderately accurate 44 * and easy to get metric is the amount of live objects the mark had encountered. But, 45 * that does directly correlate with the used heap, because the heap might be fully 46 * dead or fully alive. We cannot assume either of the extremes: we would either allow 47 * application to run out of memory if we assume heap is fully dead but it is not, and, 48 * conversely, we would pacify application excessively if we assume heap is fully alive 49 * but it is not. So we need to guesstimate the particular expected value for heap liveness. 50 * The best way to do this is apparently recording the past history. 51 * 52 * For concurrent evac and update-refs, we are walking the heap per-region, and so the 53 * notion of progress is clear: we get reported the "used" size from the processed regions 54 * and use the global heap-used as the baseline. 55 * 56 * The allocatable space when GC is running is "free" at the start of phase, but the 57 * accounted budget is based on "used". So, we need to adjust the tax knowing that. 58 */ 59 60 void ShenandoahPacer::setup_for_mark() { 61 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 62 63 size_t live = update_and_get_progress_history(); 64 size_t free = _heap->free_set()->available(); 65 66 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 67 size_t taxable = free - non_taxable; 68 69 double tax = 1.0 * live / taxable; // base tax for available free space 70 tax *= 1; // mark can succeed with immediate garbage, claim all available space 71 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 72 73 restart_with(non_taxable, tax); 74 75 log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 76 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 77 byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), 78 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 79 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 80 tax); 81 } 82 83 void ShenandoahPacer::setup_for_evac() { 84 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 85 86 size_t used = _heap->collection_set()->used(); 87 size_t free = _heap->free_set()->available(); 88 89 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 90 size_t taxable = free - non_taxable; 91 92 double tax = 1.0 * used / taxable; // base tax for available free space 93 tax *= 2; // evac is followed by update-refs, claim 1/2 of remaining free 94 tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase 95 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 96 97 restart_with(non_taxable, tax); 98 99 log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 100 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 101 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 102 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 103 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 104 tax); 105 } 106 107 void ShenandoahPacer::setup_for_updaterefs() { 108 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 109 110 size_t used = _heap->used(); 111 size_t free = _heap->free_set()->available(); 112 113 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 114 size_t taxable = free - non_taxable; 115 116 double tax = 1.0 * used / taxable; // base tax for available free space 117 tax *= 1; // update-refs is the last phase, claim the remaining free 118 tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase 119 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 120 121 restart_with(non_taxable, tax); 122 123 log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 124 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 125 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 126 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 127 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 128 tax); 129 } 130 131 /* 132 * In idle phase, we have to pace the application to let control thread react with GC start. 133 * 134 * Here, we have rendezvous with concurrent thread that adds up the budget as it acknowledges 135 * it had seen recent allocations. It will naturally pace the allocations if control thread is 136 * not catching up. To bootstrap this feedback cycle, we need to start with some initial budget 137 * for applications to allocate at. 138 */ 139 140 void ShenandoahPacer::setup_for_idle() { 141 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 142 143 size_t initial = _heap->max_capacity() / 100 * ShenandoahPacingIdleSlack; 144 double tax = 1; 145 146 restart_with(initial, tax); 147 148 log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 149 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial), 150 tax); 151 } 152 153 /* 154 * There is no useful notion of progress for these operations. To avoid stalling 155 * the allocators unnecessarily, allow them to run unimpeded. 156 */ 157 158 void ShenandoahPacer::setup_for_reset() { 159 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 160 161 size_t initial = _heap->max_capacity(); 162 restart_with(initial, 1.0); 163 164 log_info(gc, ergo)("Pacer for Reset. Non-Taxable: " SIZE_FORMAT "%s", 165 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial)); 166 } 167 168 size_t ShenandoahPacer::update_and_get_progress_history() { 169 if (_progress == -1) { 170 // First initialization, report some prior 171 Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO); 172 return (size_t) (_heap->max_capacity() * 0.1); 173 } else { 174 // Record history, and reply historical data 175 _progress_history->add(_progress); 176 Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO); 177 return (size_t) (_progress_history->avg() * HeapWordSize); 178 } 179 } 180 181 void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) { 182 size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize; 183 STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); 184 Atomic::xchg(&_budget, (intptr_t)initial, memory_order_relaxed); 185 Atomic::store(&_tax_rate, tax_rate); 186 Atomic::inc(&_epoch); 187 188 // Shake up stalled waiters after budget update. 189 _need_notify_waiters.try_set(); 190 } 191 192 template<bool FORCE> 193 bool ShenandoahPacer::claim_for_alloc(size_t words) { 194 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 195 196 intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate)); 197 198 intptr_t cur = 0; 199 intptr_t new_val = 0; 200 do { 201 cur = Atomic::load(&_budget); 202 if (cur < tax && !FORCE) { 203 // Progress depleted, alas. 204 return false; 205 } 206 new_val = cur - tax; 207 } while (Atomic::cmpxchg(&_budget, cur, new_val, memory_order_relaxed) != cur); 208 return true; 209 } 210 211 template bool ShenandoahPacer::claim_for_alloc<true>(size_t words); 212 template bool ShenandoahPacer::claim_for_alloc<false>(size_t words); 213 214 void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) { 215 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 216 217 if (Atomic::load(&_epoch) != epoch) { 218 // Stale ticket, no need to unpace. 219 return; 220 } 221 222 size_t tax = MAX2<size_t>(1, words * Atomic::load(&_tax_rate)); 223 add_budget(tax); 224 } 225 226 intptr_t ShenandoahPacer::epoch() { 227 return Atomic::load(&_epoch); 228 } 229 230 void ShenandoahPacer::pace_for_alloc(size_t words) { 231 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 232 233 // Fast path: try to allocate right away 234 bool claimed = claim_for_alloc<false>(words); 235 if (claimed) { 236 return; 237 } 238 239 // Threads that are attaching should not block at all: they are not 240 // fully initialized yet. Blocking them would be awkward. 241 // This is probably the path that allocates the thread oop itself. 242 // 243 // Thread which is not an active Java thread should also not block. 244 // This can happen during VM init when main thread is still not an 245 // active Java thread. 246 JavaThread* current = JavaThread::current(); 247 if (current->is_attaching_via_jni() || 248 !current->is_active_Java_thread()) { 249 claim_for_alloc<true>(words); 250 return; 251 } 252 253 jlong const max_delay = ShenandoahPacingMaxDelay * NANOSECS_PER_MILLISEC; 254 jlong const start_time = os::elapsed_counter(); 255 while (!claimed && (os::elapsed_counter() - start_time) < max_delay) { 256 // We could instead assist GC, but this would suffice for now. 257 wait(1); 258 claimed = claim_for_alloc<false>(words); 259 } 260 if (!claimed) { 261 // Spent local time budget to wait for enough GC progress. 262 // Force allocating anyway, which may mean we outpace GC, 263 // and start Degenerated GC cycle. 264 claimed = claim_for_alloc<true>(words); 265 assert(claimed, "Should always succeed"); 266 } 267 ShenandoahThreadLocalData::add_paced_time(current, (double)(os::elapsed_counter() - start_time) / NANOSECS_PER_SEC); 268 } 269 270 void ShenandoahPacer::wait(size_t time_ms) { 271 // Perform timed wait. It works like like sleep(), except without modifying 272 // the thread interruptible status. MonitorLocker also checks for safepoints. 273 assert(time_ms > 0, "Should not call this with zero argument, as it would stall until notify"); 274 assert(time_ms <= LONG_MAX, "Sanity"); 275 MonitorLocker locker(_wait_monitor); 276 _wait_monitor->wait((long)time_ms); 277 } 278 279 void ShenandoahPacer::notify_waiters() { 280 if (_need_notify_waiters.try_unset()) { 281 MonitorLocker locker(_wait_monitor); 282 _wait_monitor->notify_all(); 283 } 284 } 285 286 void ShenandoahPacer::flush_stats_to_cycle() { 287 double sum = 0; 288 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 289 sum += ShenandoahThreadLocalData::paced_time(t); 290 } 291 ShenandoahHeap::heap()->phase_timings()->record_phase_time(ShenandoahPhaseTimings::pacing, sum); 292 } 293 294 void ShenandoahPacer::print_cycle_on(outputStream* out) { 295 MutexLocker lock(Threads_lock); 296 297 double now = os::elapsedTime(); 298 double total = now - _last_time; 299 _last_time = now; 300 301 out->cr(); 302 out->print_cr("Allocation pacing accrued:"); 303 304 size_t threads_total = 0; 305 size_t threads_nz = 0; 306 double sum = 0; 307 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 308 double d = ShenandoahThreadLocalData::paced_time(t); 309 if (d > 0) { 310 threads_nz++; 311 sum += d; 312 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): %s", 313 d * 1000, total * 1000, d/total*100, t->name()); 314 } 315 threads_total++; 316 ShenandoahThreadLocalData::reset_paced_time(t); 317 } 318 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <total>", 319 sum * 1000, total * 1000, sum/total*100); 320 321 if (threads_total > 0) { 322 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average total>", 323 sum / threads_total * 1000, total * 1000, sum / threads_total / total * 100); 324 } 325 if (threads_nz > 0) { 326 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average non-zero>", 327 sum / threads_nz * 1000, total * 1000, sum / threads_nz / total * 100); 328 } 329 out->cr(); 330 } 331 332 void ShenandoahPeriodicPacerNotifyTask::task() { 333 assert(ShenandoahPacing, "Should not be here otherwise"); 334 _pacer->notify_waiters(); 335 }