1 /*
  2  * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "gc/shared/gcId.hpp"
 26 #include "gc/shared/gcLocker.hpp"
 27 #include "gc/shared/gcVMOperations.hpp"
 28 #include "gc/shared/isGCActiveMark.hpp"
 29 #include "gc/z/zAbort.inline.hpp"
 30 #include "gc/z/zBreakpoint.hpp"
 31 #include "gc/z/zCollectedHeap.hpp"
 32 #include "gc/z/zDriver.hpp"
 33 #include "gc/z/zHeap.inline.hpp"
 34 #include "gc/z/zMessagePort.inline.hpp"
 35 #include "gc/z/zServiceability.hpp"
 36 #include "gc/z/zStat.hpp"
 37 #include "gc/z/zVerify.hpp"
 38 #include "logging/log.hpp"
 39 #include "memory/universe.hpp"
 40 #include "runtime/vmOperations.hpp"
 41 #include "runtime/vmThread.hpp"
 42 
 43 static const ZStatPhaseCycle      ZPhaseCycle("Garbage Collection Cycle");
 44 static const ZStatPhasePause      ZPhasePauseMarkStart("Pause Mark Start");
 45 static const ZStatPhaseConcurrent ZPhaseConcurrentMark("Concurrent Mark");
 46 static const ZStatPhaseConcurrent ZPhaseConcurrentMarkContinue("Concurrent Mark Continue");
 47 static const ZStatPhaseConcurrent ZPhaseConcurrentMarkFree("Concurrent Mark Free");
 48 static const ZStatPhasePause      ZPhasePauseMarkEnd("Pause Mark End");
 49 static const ZStatPhaseConcurrent ZPhaseConcurrentProcessNonStrongReferences("Concurrent Process Non-Strong References");
 50 static const ZStatPhaseConcurrent ZPhaseConcurrentResetRelocationSet("Concurrent Reset Relocation Set");
 51 static const ZStatPhaseConcurrent ZPhaseConcurrentSelectRelocationSet("Concurrent Select Relocation Set");
 52 static const ZStatPhasePause      ZPhasePauseRelocateStart("Pause Relocate Start");
 53 static const ZStatPhaseConcurrent ZPhaseConcurrentRelocated("Concurrent Relocate");
 54 static const ZStatCriticalPhase   ZCriticalPhaseGCLockerStall("GC Locker Stall", false /* verbose */);
 55 static const ZStatSampler         ZSamplerJavaThreads("System", "Java Threads", ZStatUnitThreads);
 56 
 57 ZDriverRequest::ZDriverRequest() :
 58     ZDriverRequest(GCCause::_no_gc) {}
 59 
 60 ZDriverRequest::ZDriverRequest(GCCause::Cause cause) :
 61     ZDriverRequest(cause, ConcGCThreads) {}
 62 
 63 ZDriverRequest::ZDriverRequest(GCCause::Cause cause, uint nworkers) :
 64     _cause(cause),
 65     _nworkers(nworkers) {}
 66 
 67 bool ZDriverRequest::operator==(const ZDriverRequest& other) const {
 68   return _cause == other._cause;
 69 }
 70 
 71 GCCause::Cause ZDriverRequest::cause() const {
 72   return _cause;
 73 }
 74 
 75 uint ZDriverRequest::nworkers() const {
 76   return _nworkers;
 77 }
 78 
 79 class VM_ZOperation : public VM_Operation {
 80 private:
 81   const uint _gc_id;
 82   bool       _gc_locked;
 83   bool       _success;
 84 
 85 public:
 86   VM_ZOperation() :
 87       _gc_id(GCId::current()),
 88       _gc_locked(false),
 89       _success(false) {}
 90 
 91   virtual bool needs_inactive_gc_locker() const {
 92     // An inactive GC locker is needed in operations where we change the bad
 93     // mask or move objects. Changing the bad mask will invalidate all oops,
 94     // which makes it conceptually the same thing as moving all objects.
 95     return false;
 96   }
 97 
 98   virtual bool skip_thread_oop_barriers() const {
 99     return true;
100   }
101 
102   virtual bool do_operation() = 0;
103 
104   virtual bool doit_prologue() {
105     Heap_lock->lock();
106     return true;
107   }
108 
109   virtual void doit() {
110     // Abort if GC locker state is incompatible
111     if (needs_inactive_gc_locker() && GCLocker::check_active_before_gc()) {
112       _gc_locked = true;
113       return;
114     }
115 
116     // Setup GC id and active marker
117     GCIdMark gc_id_mark(_gc_id);
118     IsGCActiveMark gc_active_mark;
119 
120     // Verify before operation
121     ZVerify::before_zoperation();
122 
123     // Execute operation
124     _success = do_operation();
125 
126     // Update statistics
127     ZStatSample(ZSamplerJavaThreads, Threads::number_of_threads());
128   }
129 
130   virtual void doit_epilogue() {
131     Heap_lock->unlock();
132   }
133 
134   bool gc_locked() const {
135     return _gc_locked;
136   }
137 
138   bool success() const {
139     return _success;
140   }
141 };
142 
143 class VM_ZMarkStart : public VM_ZOperation {
144 public:
145   virtual VMOp_Type type() const {
146     return VMOp_ZMarkStart;
147   }
148 
149   virtual bool needs_inactive_gc_locker() const {
150     return true;
151   }
152 
153   virtual bool do_operation() {
154     ZStatTimer timer(ZPhasePauseMarkStart);
155     ZServiceabilityPauseTracer tracer;
156 
157     ZCollectedHeap::heap()->increment_total_collections(true /* full */);
158 
159     ZHeap::heap()->mark_start();
160     return true;
161   }
162 };
163 
164 class VM_ZMarkEnd : public VM_ZOperation {
165 public:
166   virtual VMOp_Type type() const {
167     return VMOp_ZMarkEnd;
168   }
169 
170   virtual bool do_operation() {
171     ZStatTimer timer(ZPhasePauseMarkEnd);
172     ZServiceabilityPauseTracer tracer;
173     return ZHeap::heap()->mark_end();
174   }
175 };
176 
177 class VM_ZRelocateStart : public VM_ZOperation {
178 public:
179   virtual VMOp_Type type() const {
180     return VMOp_ZRelocateStart;
181   }
182 
183   virtual bool needs_inactive_gc_locker() const {
184     return true;
185   }
186 
187   virtual bool do_operation() {
188     ZStatTimer timer(ZPhasePauseRelocateStart);
189     ZServiceabilityPauseTracer tracer;
190     ZHeap::heap()->relocate_start();
191     return true;
192   }
193 };
194 
195 class VM_ZVerify : public VM_Operation {
196 public:
197   virtual VMOp_Type type() const {
198     return VMOp_ZVerify;
199   }
200 
201   virtual bool skip_thread_oop_barriers() const {
202     return true;
203   }
204 
205   virtual void doit() {
206     ZVerify::after_weak_processing();
207   }
208 };
209 
210 ZDriver::ZDriver() :
211     _gc_cycle_port(),
212     _gc_locker_port() {
213   set_name("ZDriver");
214   create_and_start();
215 }
216 
217 bool ZDriver::is_busy() const {
218   return _gc_cycle_port.is_busy();
219 }
220 
221 void ZDriver::collect(const ZDriverRequest& request) {
222   switch (request.cause()) {
223   case GCCause::_wb_young_gc:
224   case GCCause::_wb_conc_mark:
225   case GCCause::_wb_full_gc:
226   case GCCause::_dcmd_gc_run:
227   case GCCause::_java_lang_system_gc:
228   case GCCause::_full_gc_alot:
229   case GCCause::_scavenge_alot:
230   case GCCause::_jvmti_force_gc:
231   case GCCause::_metadata_GC_clear_soft_refs:
232   case GCCause::_codecache_GC_threshold:
233     // Start synchronous GC
234     _gc_cycle_port.send_sync(request);
235     break;
236 
237   case GCCause::_z_timer:
238   case GCCause::_z_warmup:
239   case GCCause::_z_allocation_rate:
240   case GCCause::_z_allocation_stall:
241   case GCCause::_z_proactive:
242   case GCCause::_z_high_usage:
243   case GCCause::_metadata_GC_threshold:
244     // Start asynchronous GC
245     _gc_cycle_port.send_async(request);
246     break;
247 
248   case GCCause::_gc_locker:
249     // Restart VM operation previously blocked by the GC locker
250     _gc_locker_port.signal();
251     break;
252 
253   case GCCause::_wb_breakpoint:
254     ZBreakpoint::start_gc();
255     _gc_cycle_port.send_async(request);
256     break;
257 
258   default:
259     // Other causes not supported
260     fatal("Unsupported GC cause (%s)", GCCause::to_string(request.cause()));
261     break;
262   }
263 }
264 
265 template <typename T>
266 bool ZDriver::pause() {
267   for (;;) {
268     T op;
269     VMThread::execute(&op);
270     if (op.gc_locked()) {
271       // Wait for GC to become unlocked and restart the VM operation
272       ZStatTimer timer(ZCriticalPhaseGCLockerStall);
273       _gc_locker_port.wait();
274       continue;
275     }
276 
277     // Notify VM operation completed
278     _gc_locker_port.ack();
279 
280     return op.success();
281   }
282 }
283 
284 void ZDriver::pause_mark_start() {
285   pause<VM_ZMarkStart>();
286 }
287 
288 void ZDriver::concurrent_mark() {
289   ZStatTimer timer(ZPhaseConcurrentMark);
290   ZBreakpoint::at_after_marking_started();
291   ZHeap::heap()->mark(true /* initial */);
292   ZBreakpoint::at_before_marking_completed();
293 }
294 
295 bool ZDriver::pause_mark_end() {
296   return pause<VM_ZMarkEnd>();
297 }
298 
299 void ZDriver::concurrent_mark_continue() {
300   ZStatTimer timer(ZPhaseConcurrentMarkContinue);
301   ZHeap::heap()->mark(false /* initial */);
302 }
303 
304 void ZDriver::concurrent_mark_free() {
305   ZStatTimer timer(ZPhaseConcurrentMarkFree);
306   ZHeap::heap()->mark_free();
307 }
308 
309 void ZDriver::concurrent_process_non_strong_references() {
310   ZStatTimer timer(ZPhaseConcurrentProcessNonStrongReferences);
311   ZBreakpoint::at_after_reference_processing_started();
312   ZHeap::heap()->process_non_strong_references();
313 }
314 
315 void ZDriver::concurrent_reset_relocation_set() {
316   ZStatTimer timer(ZPhaseConcurrentResetRelocationSet);
317   ZHeap::heap()->reset_relocation_set();
318 }
319 
320 void ZDriver::pause_verify() {
321   if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
322     // Full verification
323     VM_Verify op;
324     VMThread::execute(&op);
325   } else if (ZVerifyRoots || ZVerifyObjects) {
326     // Limited verification
327     VM_ZVerify op;
328     VMThread::execute(&op);
329   }
330 }
331 
332 void ZDriver::concurrent_select_relocation_set() {
333   ZStatTimer timer(ZPhaseConcurrentSelectRelocationSet);
334   ZHeap::heap()->select_relocation_set();
335 }
336 
337 void ZDriver::pause_relocate_start() {
338   pause<VM_ZRelocateStart>();
339 }
340 
341 void ZDriver::concurrent_relocate() {
342   ZStatTimer timer(ZPhaseConcurrentRelocated);
343   ZHeap::heap()->relocate();
344 }
345 
346 void ZDriver::check_out_of_memory() {
347   ZHeap::heap()->check_out_of_memory();
348 }
349 
350 static bool should_clear_soft_references(const ZDriverRequest& request) {
351   // Clear soft references if implied by the GC cause
352   if (request.cause() == GCCause::_wb_full_gc ||
353       request.cause() == GCCause::_metadata_GC_clear_soft_refs ||
354       request.cause() == GCCause::_z_allocation_stall) {
355     // Clear
356     return true;
357   }
358 
359   // Don't clear
360   return false;
361 }
362 
363 static uint select_active_worker_threads_dynamic(const ZDriverRequest& request) {
364   // Use requested number of worker threads
365   return request.nworkers();
366 }
367 
368 static uint select_active_worker_threads_static(const ZDriverRequest& request) {
369   const GCCause::Cause cause = request.cause();
370   const uint nworkers = request.nworkers();
371 
372   // Boost number of worker threads if implied by the GC cause
373   if (cause == GCCause::_wb_full_gc ||
374       cause == GCCause::_java_lang_system_gc ||
375       cause == GCCause::_metadata_GC_clear_soft_refs ||
376       cause == GCCause::_z_allocation_stall) {
377     // Boost
378     const uint boosted_nworkers = MAX2(nworkers, ParallelGCThreads);
379     return boosted_nworkers;
380   }
381 
382   // Use requested number of worker threads
383   return nworkers;
384 }
385 
386 static uint select_active_worker_threads(const ZDriverRequest& request) {
387   if (UseDynamicNumberOfGCThreads) {
388     return select_active_worker_threads_dynamic(request);
389   } else {
390     return select_active_worker_threads_static(request);
391   }
392 }
393 
394 class ZDriverGCScope : public StackObj {
395 private:
396   GCIdMark                   _gc_id;
397   GCCause::Cause             _gc_cause;
398   GCCauseSetter              _gc_cause_setter;
399   ZStatTimer                 _timer;
400   ZServiceabilityCycleTracer _tracer;
401 
402 public:
403   ZDriverGCScope(const ZDriverRequest& request) :
404       _gc_id(),
405       _gc_cause(request.cause()),
406       _gc_cause_setter(ZCollectedHeap::heap(), _gc_cause),
407       _timer(ZPhaseCycle),
408       _tracer() {
409     // Update statistics
410     ZStatCycle::at_start();
411 
412     // Set up soft reference policy
413     const bool clear = should_clear_soft_references(request);
414     ZHeap::heap()->set_soft_reference_policy(clear);
415 
416     // Select number of worker threads to use
417     const uint nworkers = select_active_worker_threads(request);
418     ZHeap::heap()->set_active_workers(nworkers);
419   }
420 
421   ~ZDriverGCScope() {
422     // Update statistics
423     ZStatCycle::at_end(_gc_cause, ZHeap::heap()->active_workers());
424 
425     // Update data used by soft reference policy
426     Universe::heap()->update_capacity_and_used_at_gc();
427 
428     // Signal that we have completed a visit to all live objects
429     Universe::heap()->record_whole_heap_examined_timestamp();
430   }
431 };
432 
433 // Macro to execute a termination check after a concurrent phase. Note
434 // that it's important that the termination check comes after the call
435 // to the function f, since we can't abort between pause_relocate_start()
436 // and concurrent_relocate(). We need to let concurrent_relocate() call
437 // abort_page() on the remaining entries in the relocation set.
438 #define concurrent(f)                 \
439   do {                                \
440     concurrent_##f();                 \
441     if (should_terminate()) {         \
442       return;                         \
443     }                                 \
444   } while (false)
445 
446 void ZDriver::gc(const ZDriverRequest& request) {
447   ZDriverGCScope scope(request);
448 
449   // Phase 1: Pause Mark Start
450   pause_mark_start();
451 
452   // Phase 2: Concurrent Mark
453   concurrent(mark);
454 
455   // Phase 3: Pause Mark End
456   while (!pause_mark_end()) {
457     // Phase 3.5: Concurrent Mark Continue
458     concurrent(mark_continue);
459   }
460 
461   // Phase 4: Concurrent Mark Free
462   concurrent(mark_free);
463 
464   // Phase 5: Concurrent Process Non-Strong References
465   concurrent(process_non_strong_references);
466 
467   // Phase 6: Concurrent Reset Relocation Set
468   concurrent(reset_relocation_set);
469 
470   // Phase 7: Pause Verify
471   pause_verify();
472 
473   // Phase 8: Concurrent Select Relocation Set
474   concurrent(select_relocation_set);
475 
476   // Phase 9: Pause Relocate Start
477   pause_relocate_start();
478 
479   // Phase 10: Concurrent Relocate
480   concurrent(relocate);
481 }
482 
483 void ZDriver::run_service() {
484   // Main loop
485   while (!should_terminate()) {
486     // Wait for GC request
487     const ZDriverRequest request = _gc_cycle_port.receive();
488     if (request.cause() == GCCause::_no_gc) {
489       continue;
490     }
491 
492     ZBreakpoint::at_before_gc();
493 
494     // Run GC
495     gc(request);
496 
497     if (should_terminate()) {
498       // Abort
499       break;
500     }
501 
502     // Notify GC completed
503     _gc_cycle_port.ack();
504 
505     // Check for out of memory condition
506     check_out_of_memory();
507 
508     ZBreakpoint::at_after_gc();
509   }
510 }
511 
512 void ZDriver::stop_service() {
513   ZAbort::abort();
514   _gc_cycle_port.send_async(GCCause::_no_gc);
515 }
--- EOF ---