< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp

Print this page

133       ShenandoahNoUncommitMark forbid_region_uncommit(heap);
134 
135       // GC is starting, bump the internal ID
136       update_gc_id();
137 
138       GCIdMark gc_id_mark;
139 
140       heuristics->cancel_trigger_request();
141 
142       heap->reset_bytes_allocated_since_gc_start();
143 
144       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
145 
146       // If GC was requested, we are sampling the counters even without actual triggers
147       // from allocation machinery. This captures GC phases more accurately.
148       heap->set_forced_counters_update(true);
149 
150       // If GC was requested, we better dump freeset data for performance debugging
151       heap->free_set()->log_status_under_lock();
152 

153       switch (mode) {
154         case concurrent_normal:
155           service_concurrent_normal_cycle(cause);
156           break;
157         case stw_degenerated:
158           service_stw_degenerated_cycle(cause, degen_point);
159           break;
160         case stw_full:
161           service_stw_full_cycle(cause);
162           break;
163         default:
164           ShouldNotReachHere();
165       }

166 
167       // If this was the requested GC cycle, notify waiters about it
168       if (is_gc_requested) {
169         notify_gc_waiters();
170       }
171 
172       // If this cycle completed without being cancelled, notify waiters about it
173       if (!heap->cancelled_gc()) {
174         notify_alloc_failure_waiters();
175       }
176 
177       // Report current free set state at the end of cycle, whether
178       // it is a normal completion, or the abort.
179       heap->free_set()->log_status_under_lock();
180 
181       {
182         // Notify Universe about new heap usage. This has implications for
183         // global soft refs policy, and we better report it every time heap
184         // usage goes down.
185         ShenandoahHeapLocker locker(heap->lock());

133       ShenandoahNoUncommitMark forbid_region_uncommit(heap);
134 
135       // GC is starting, bump the internal ID
136       update_gc_id();
137 
138       GCIdMark gc_id_mark;
139 
140       heuristics->cancel_trigger_request();
141 
142       heap->reset_bytes_allocated_since_gc_start();
143 
144       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
145 
146       // If GC was requested, we are sampling the counters even without actual triggers
147       // from allocation machinery. This captures GC phases more accurately.
148       heap->set_forced_counters_update(true);
149 
150       // If GC was requested, we better dump freeset data for performance debugging
151       heap->free_set()->log_status_under_lock();
152 
153       heap->print_before_gc();
154       switch (mode) {
155         case concurrent_normal:
156           service_concurrent_normal_cycle(cause);
157           break;
158         case stw_degenerated:
159           service_stw_degenerated_cycle(cause, degen_point);
160           break;
161         case stw_full:
162           service_stw_full_cycle(cause);
163           break;
164         default:
165           ShouldNotReachHere();
166       }
167       heap->print_after_gc();
168 
169       // If this was the requested GC cycle, notify waiters about it
170       if (is_gc_requested) {
171         notify_gc_waiters();
172       }
173 
174       // If this cycle completed without being cancelled, notify waiters about it
175       if (!heap->cancelled_gc()) {
176         notify_alloc_failure_waiters();
177       }
178 
179       // Report current free set state at the end of cycle, whether
180       // it is a normal completion, or the abort.
181       heap->free_set()->log_status_under_lock();
182 
183       {
184         // Notify Universe about new heap usage. This has implications for
185         // global soft refs policy, and we better report it every time heap
186         // usage goes down.
187         ShenandoahHeapLocker locker(heap->lock());
< prev index next >