233 // GC is starting, bump the internal ID
234 update_gc_id();
235
236 GCIdMark gc_id_mark;
237
238 _heap->reset_bytes_allocated_since_gc_start();
239
240 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
241
242 // If GC was requested, we are sampling the counters even without actual triggers
243 // from allocation machinery. This captures GC phases more accurately.
244 _heap->set_forced_counters_update(true);
245
246 // If GC was requested, we better dump freeset data for performance debugging
247 _heap->free_set()->log_status_under_lock();
248
249 {
250 // Cannot uncommit bitmap slices during concurrent reset
251 ShenandoahNoUncommitMark forbid_region_uncommit(_heap);
252
253 switch (gc_mode()) {
254 case concurrent_normal: {
255 service_concurrent_normal_cycle(request);
256 break;
257 }
258 case stw_degenerated: {
259 service_stw_degenerated_cycle(request);
260 break;
261 }
262 case stw_full: {
263 service_stw_full_cycle(request.cause);
264 break;
265 }
266 case servicing_old: {
267 assert(request.generation->is_old(), "Expected old generation here");
268 service_concurrent_old_cycle(request);
269 break;
270 }
271 default:
272 ShouldNotReachHere();
273 }
274 }
275
276 // If this cycle completed successfully, notify threads waiting for gc
277 if (!_heap->cancelled_gc()) {
278 notify_gc_waiters();
279 notify_alloc_failure_waiters();
280 }
281
282 // Report current free set state at the end of cycle, whether
283 // it is a normal completion, or the abort.
284 _heap->free_set()->log_status_under_lock();
285
286 // Notify Universe about new heap usage. This has implications for
287 // global soft refs policy, and we better report it every time heap
288 // usage goes down.
289 _heap->update_capacity_and_used_at_gc();
290
291 // Signal that we have completed a visit to all live objects.
292 _heap->record_whole_heap_examined_timestamp();
293
|
233 // GC is starting, bump the internal ID
234 update_gc_id();
235
236 GCIdMark gc_id_mark;
237
238 _heap->reset_bytes_allocated_since_gc_start();
239
240 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
241
242 // If GC was requested, we are sampling the counters even without actual triggers
243 // from allocation machinery. This captures GC phases more accurately.
244 _heap->set_forced_counters_update(true);
245
246 // If GC was requested, we better dump freeset data for performance debugging
247 _heap->free_set()->log_status_under_lock();
248
249 {
250 // Cannot uncommit bitmap slices during concurrent reset
251 ShenandoahNoUncommitMark forbid_region_uncommit(_heap);
252
253 _heap->print_before_gc();
254 switch (gc_mode()) {
255 case concurrent_normal: {
256 service_concurrent_normal_cycle(request);
257 break;
258 }
259 case stw_degenerated: {
260 service_stw_degenerated_cycle(request);
261 break;
262 }
263 case stw_full: {
264 service_stw_full_cycle(request.cause);
265 break;
266 }
267 case servicing_old: {
268 assert(request.generation->is_old(), "Expected old generation here");
269 service_concurrent_old_cycle(request);
270 break;
271 }
272 default:
273 ShouldNotReachHere();
274 }
275 _heap->print_after_gc();
276 }
277
278 // If this cycle completed successfully, notify threads waiting for gc
279 if (!_heap->cancelled_gc()) {
280 notify_gc_waiters();
281 notify_alloc_failure_waiters();
282 }
283
284 // Report current free set state at the end of cycle, whether
285 // it is a normal completion, or the abort.
286 _heap->free_set()->log_status_under_lock();
287
288 // Notify Universe about new heap usage. This has implications for
289 // global soft refs policy, and we better report it every time heap
290 // usage goes down.
291 _heap->update_capacity_and_used_at_gc();
292
293 // Signal that we have completed a visit to all live objects.
294 _heap->record_whole_heap_examined_timestamp();
295
|