< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp

Print this page




 445   heap->heuristics()->record_success_full();
 446   heap->shenandoah_policy()->record_success_full();
 447 }
 448 
 449 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
 450   assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
 451 
 452   GCIdMark gc_id_mark;
 453   ShenandoahGCSession session(cause);
 454 
 455   ShenandoahHeap* heap = ShenandoahHeap::heap();
 456   heap->vmop_degenerated(point);
 457 
 458   heap->heuristics()->record_success_degenerated();
 459   heap->shenandoah_policy()->record_success_degenerated();
 460 }
 461 
 462 void ShenandoahControlThread::service_uncommit(double shrink_before) {
 463   ShenandoahHeap* heap = ShenandoahHeap::heap();
 464 
 465   // Determine if there is work to do. This avoids taking heap lock if there is
 466   // no work available, avoids spamming logs with superfluous logging messages,
 467   // and minimises the amount of work while locks are taken.
 468 
 469   if (heap->committed() <= heap->min_capacity()) return;
 470 
 471   bool has_work = false;
 472   for (size_t i = 0; i < heap->num_regions(); i++) {
 473     ShenandoahHeapRegion *r = heap->get_region(i);
 474     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 475       has_work = true;
 476       break;
 477     }
 478   }
 479 
 480   if (has_work) {
 481     heap->entry_uncommit(shrink_before);
 482   }
 483 }
 484 
 485 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 486   return GCCause::is_user_requested_gc(cause) ||
 487          GCCause::is_serviceability_requested_gc(cause);
 488 }
 489 


 491   assert(GCCause::is_user_requested_gc(cause) ||
 492          GCCause::is_serviceability_requested_gc(cause) ||
 493          cause == GCCause::_metadata_GC_clear_soft_refs ||
 494          cause == GCCause::_full_gc_alot ||
 495          cause == GCCause::_wb_full_gc ||
 496          cause == GCCause::_scavenge_alot,
 497          "only requested GCs here");
 498 
 499   if (is_explicit_gc(cause)) {
 500     if (!DisableExplicitGC) {
 501       handle_requested_gc(cause);
 502     }
 503   } else {
 504     handle_requested_gc(cause);
 505   }
 506 }
 507 
 508 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 509   _requested_gc_cause = cause;
 510   _gc_requested.set();
 511   MonitorLocker ml(&_gc_waiters_lock);
 512   while (_gc_requested.is_set()) {
 513     ml.wait();
 514   }
 515 }
 516 
 517 void ShenandoahControlThread::handle_alloc_failure(size_t words) {
 518   ShenandoahHeap* heap = ShenandoahHeap::heap();
 519 
 520   assert(current()->is_Java_thread(), "expect Java thread here");
 521 
 522   if (try_set_alloc_failure_gc()) {
 523     // Only report the first allocation failure
 524     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s",
 525                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 526 
 527     // Now that alloc failure GC is scheduled, we can abort everything else
 528     heap->cancel_gc(GCCause::_allocation_failure);
 529   }
 530 
 531   MonitorLocker ml(&_alloc_failure_waiters_lock);
 532   while (is_alloc_failure_gc()) {
 533     ml.wait();
 534   }
 535 }
 536 
 537 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
 538   ShenandoahHeap* heap = ShenandoahHeap::heap();
 539 
 540   if (try_set_alloc_failure_gc()) {
 541     // Only report the first allocation failure
 542     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
 543                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 544   }
 545 
 546   // Forcefully report allocation failure
 547   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
 548 }
 549 
 550 void ShenandoahControlThread::notify_alloc_failure_waiters() {
 551   _alloc_failure_gc.unset();
 552   MonitorLocker ml(&_alloc_failure_waiters_lock);
 553   ml.notify_all();
 554 }
 555 
 556 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
 557   return _alloc_failure_gc.try_set();
 558 }
 559 
 560 bool ShenandoahControlThread::is_alloc_failure_gc() {
 561   return _alloc_failure_gc.is_set();
 562 }
 563 
 564 void ShenandoahControlThread::notify_gc_waiters() {
 565   _gc_requested.unset();
 566   MonitorLocker ml(&_gc_waiters_lock);
 567   ml.notify_all();
 568 }
 569 
 570 void ShenandoahControlThread::handle_counters_update() {
 571   if (_do_counters_update.is_set()) {
 572     _do_counters_update.unset();
 573     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 574   }
 575 }
 576 
 577 void ShenandoahControlThread::handle_force_counters_update() {
 578   if (_force_counters_update.is_set()) {
 579     _do_counters_update.unset(); // reset these too, we do update now!
 580     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 581   }
 582 }
 583 
 584 void ShenandoahControlThread::notify_heap_changed() {
 585   // This is called from allocation path, and thus should be fast.
 586 




 445   heap->heuristics()->record_success_full();
 446   heap->shenandoah_policy()->record_success_full();
 447 }
 448 
 449 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
 450   assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
 451 
 452   GCIdMark gc_id_mark;
 453   ShenandoahGCSession session(cause);
 454 
 455   ShenandoahHeap* heap = ShenandoahHeap::heap();
 456   heap->vmop_degenerated(point);
 457 
 458   heap->heuristics()->record_success_degenerated();
 459   heap->shenandoah_policy()->record_success_degenerated();
 460 }
 461 
 462 void ShenandoahControlThread::service_uncommit(double shrink_before) {
 463   ShenandoahHeap* heap = ShenandoahHeap::heap();
 464 
 465   // Scan through the heap and determine if there is work to do. This avoids taking
 466   // heap lock if there is no work available, avoids spamming logs with superfluous
 467   // logging messages, and minimises the amount of work while locks are taken.


 468 
 469   bool has_work = false;
 470   for (size_t i = 0; i < heap->num_regions(); i++) {
 471     ShenandoahHeapRegion *r = heap->get_region(i);
 472     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 473       has_work = true;
 474       break;
 475     }
 476   }
 477 
 478   if (has_work) {
 479     heap->entry_uncommit(shrink_before);
 480   }
 481 }
 482 
 483 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 484   return GCCause::is_user_requested_gc(cause) ||
 485          GCCause::is_serviceability_requested_gc(cause);
 486 }
 487 


 489   assert(GCCause::is_user_requested_gc(cause) ||
 490          GCCause::is_serviceability_requested_gc(cause) ||
 491          cause == GCCause::_metadata_GC_clear_soft_refs ||
 492          cause == GCCause::_full_gc_alot ||
 493          cause == GCCause::_wb_full_gc ||
 494          cause == GCCause::_scavenge_alot,
 495          "only requested GCs here");
 496 
 497   if (is_explicit_gc(cause)) {
 498     if (!DisableExplicitGC) {
 499       handle_requested_gc(cause);
 500     }
 501   } else {
 502     handle_requested_gc(cause);
 503   }
 504 }
 505 
 506 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 507   _requested_gc_cause = cause;
 508   _gc_requested.set();
 509   MonitorLockerEx ml(&_gc_waiters_lock);
 510   while (_gc_requested.is_set()) {
 511     ml.wait();
 512   }
 513 }
 514 
 515 void ShenandoahControlThread::handle_alloc_failure(size_t words) {
 516   ShenandoahHeap* heap = ShenandoahHeap::heap();
 517 
 518   assert(current()->is_Java_thread(), "expect Java thread here");
 519 
 520   if (try_set_alloc_failure_gc()) {
 521     // Only report the first allocation failure
 522     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s",
 523                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 524 
 525     // Now that alloc failure GC is scheduled, we can abort everything else
 526     heap->cancel_gc(GCCause::_allocation_failure);
 527   }
 528 
 529   MonitorLockerEx ml(&_alloc_failure_waiters_lock);
 530   while (is_alloc_failure_gc()) {
 531     ml.wait();
 532   }
 533 }
 534 
 535 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
 536   ShenandoahHeap* heap = ShenandoahHeap::heap();
 537 
 538   if (try_set_alloc_failure_gc()) {
 539     // Only report the first allocation failure
 540     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
 541                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 542   }
 543 
 544   // Forcefully report allocation failure
 545   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
 546 }
 547 
 548 void ShenandoahControlThread::notify_alloc_failure_waiters() {
 549   _alloc_failure_gc.unset();
 550   MonitorLockerEx ml(&_alloc_failure_waiters_lock);
 551   ml.notify_all();
 552 }
 553 
 554 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
 555   return _alloc_failure_gc.try_set();
 556 }
 557 
 558 bool ShenandoahControlThread::is_alloc_failure_gc() {
 559   return _alloc_failure_gc.is_set();
 560 }
 561 
 562 void ShenandoahControlThread::notify_gc_waiters() {
 563   _gc_requested.unset();
 564   MonitorLockerEx ml(&_gc_waiters_lock);
 565   ml.notify_all();
 566 }
 567 
 568 void ShenandoahControlThread::handle_counters_update() {
 569   if (_do_counters_update.is_set()) {
 570     _do_counters_update.unset();
 571     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 572   }
 573 }
 574 
 575 void ShenandoahControlThread::handle_force_counters_update() {
 576   if (_force_counters_update.is_set()) {
 577     _do_counters_update.unset(); // reset these too, we do update now!
 578     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 579   }
 580 }
 581 
 582 void ShenandoahControlThread::notify_heap_changed() {
 583   // This is called from allocation path, and thus should be fast.
 584 


< prev index next >