< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page




  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/memAllocator.hpp"
  30 #include "gc/shared/parallelCleaning.hpp"
  31 #include "gc/shared/plab.hpp"
  32 
  33 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  34 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  35 #include "gc/shenandoah/shenandoahBrooksPointer.hpp"
  36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  38 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  39 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahControlThread.hpp"
  41 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  45 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  46 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  47 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  48 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  49 #include "gc/shenandoah/shenandoahMetrics.hpp"
  50 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  51 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  52 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  53 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  54 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  55 #include "gc/shenandoah/shenandoahUtils.hpp"
  56 #include "gc/shenandoah/shenandoahVerifier.hpp"
  57 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  58 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  59 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  60 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  61 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
  62 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
  63 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
  64 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
  65 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
  66 #include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
  67 
  68 #include "memory/metaspace.hpp"
  69 #include "runtime/interfaceSupport.inline.hpp"
  70 #include "runtime/safepointMechanism.hpp"
  71 #include "runtime/vmThread.hpp"
  72 #include "services/mallocTracker.hpp"
  73 


  74 #ifdef ASSERT
  75 template <class T>
  76 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
  77   T o = RawAccess<>::oop_load(p);
  78   if (! CompressedOops::is_null(o)) {
  79     oop obj = CompressedOops::decode_not_null(o);
  80     shenandoah_assert_not_forwarded(p, obj);
  81   }
  82 }
  83 
  84 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  85 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_work(p); }
  86 #endif
  87 
  88 class ShenandoahPretouchHeapTask : public AbstractGangTask {
  89 private:
  90   ShenandoahRegionIterator _regions;
  91   const size_t _page_size;
  92 public:
  93   ShenandoahPretouchHeapTask(size_t page_size) :


 123       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 124       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 125 
 126       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 127 
 128       r = _regions.next();
 129     }
 130   }
 131 };
 132 
 133 jint ShenandoahHeap::initialize() {
 134   ShenandoahBrooksPointer::initial_checks();
 135 
 136   initialize_heuristics();
 137 
 138   //
 139   // Figure out heap sizing
 140   //
 141 
 142   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 143   size_t min_byte_size  = collector_policy()->min_heap_byte_size();
 144   size_t max_byte_size  = collector_policy()->max_heap_byte_size();
 145   size_t heap_alignment = collector_policy()->heap_alignment();
 146 
 147   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 148 
 149   if (ShenandoahAlwaysPreTouch) {
 150     // Enabled pre-touch means the entire heap is committed right away.
 151     init_byte_size = max_byte_size;
 152   }
 153 
 154   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 155   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 156 
 157   _num_regions = ShenandoahHeapRegion::region_count();
 158 
 159   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 160   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 161   assert(num_committed_regions <= _num_regions, "sanity");
 162   _initial_size = num_committed_regions * reg_size_bytes;
 163 
 164   size_t num_min_regions = min_byte_size / reg_size_bytes;
 165   num_min_regions = MIN2(num_min_regions, _num_regions);
 166   assert(num_min_regions <= _num_regions, "sanity");
 167   _minimum_size = num_min_regions * reg_size_bytes;
 168 

 169   _committed = _initial_size;
 170 
 171   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 172   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 173 
 174   //
 175   // Reserve and commit memory for heap
 176   //
 177 
 178   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 179   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 180   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 181   _heap_region_special = heap_rs.special();
 182 
 183   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 184          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 185 
 186   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 187   if (!_heap_region_special) {
 188     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,


 339   ShenandoahStringDedup::initialize();
 340   ShenandoahCodeRoots::initialize();
 341 
 342   if (ShenandoahAllocationTrace) {
 343     _alloc_tracker = new ShenandoahAllocTracker();
 344   }
 345 
 346   if (ShenandoahPacing) {
 347     _pacer = new ShenandoahPacer(this);
 348     _pacer->setup_for_idle();
 349   } else {
 350     _pacer = NULL;
 351   }
 352 
 353   _traversal_gc = heuristics()->can_do_traversal_gc() ?
 354                   new ShenandoahTraversalGC(this, _num_regions) :
 355                   NULL;
 356 
 357   _control_thread = new ShenandoahControlThread();
 358 
 359   log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
 360                      byte_size_in_proper_unit(_initial_size),  proper_unit_for_byte_size(_initial_size),
 361                      byte_size_in_proper_unit(_minimum_size),  proper_unit_for_byte_size(_minimum_size),
 362                      byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
 363   );
 364 
 365   log_info(gc, init)("Safepointing mechanism: %s",
 366                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 367                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 368 
 369   return JNI_OK;
 370 }
 371 
 372 void ShenandoahHeap::initialize_heuristics() {
 373   if (ShenandoahGCHeuristics != NULL) {
 374     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 375       _heuristics = new ShenandoahAggressiveHeuristics();
 376     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
 377       _heuristics = new ShenandoahStaticHeuristics();
 378     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 379       _heuristics = new ShenandoahAdaptiveHeuristics();
 380     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 381       _heuristics = new ShenandoahPassiveHeuristics();
 382     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
 383       _heuristics = new ShenandoahCompactHeuristics();


 441   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 442   _soft_ref_policy(),
 443   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 444   _ref_processor(NULL),
 445   _marking_context(NULL),
 446   _bitmap_size(0),
 447   _bitmap_regions_per_slice(0),
 448   _bitmap_bytes_per_slice(0),
 449   _bitmap_region_special(false),
 450   _aux_bitmap_region_special(false),
 451   _liveness_cache(NULL),
 452   _collection_set(NULL)
 453 {
 454   log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 455   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 456 
 457   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 458 
 459   _max_workers = MAX2(_max_workers, 1U);
 460   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 461                             /* are_GC_task_threads */ true,
 462                             /* are_ConcurrentGC_threads */ true);
 463   if (_workers == NULL) {
 464     vm_exit_during_initialization("Failed necessary allocation.");
 465   } else {
 466     _workers->initialize_workers();
 467   }
 468 
 469   if (ShenandoahParallelSafepointThreads > 1) {
 470     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 471                                                 ShenandoahParallelSafepointThreads,
 472                       /* are_GC_task_threads */ false,
 473                  /* are_ConcurrentGC_threads */ false);
 474     _safepoint_workers->initialize_workers();
 475   }
 476 }
 477 
 478 #ifdef _MSC_VER
 479 #pragma warning( pop )
 480 #endif
 481 
 482 class ShenandoahResetBitmapTask : public AbstractGangTask {
 483 private:
 484   ShenandoahRegionIterator _regions;
 485 
 486 public:
 487   ShenandoahResetBitmapTask() :
 488     AbstractGangTask("Parallel Reset Bitmap Task") {}
 489 
 490   void work(uint worker_id) {
 491     ShenandoahHeapRegion* region = _regions.next();
 492     ShenandoahHeap* heap = ShenandoahHeap::heap();
 493     ShenandoahMarkingContext* const ctx = heap->marking_context();
 494     while (region != NULL) {
 495       if (heap->is_bitmap_slice_committed(region)) {
 496         ctx->clear_bitmap(region);
 497       }
 498       region = _regions.next();
 499     }
 500   }
 501 };
 502 
 503 void ShenandoahHeap::reset_mark_bitmap() {
 504   assert_gc_workers(_workers->active_workers());
 505   mark_incomplete_marking_context();
 506 
 507   ShenandoahResetBitmapTask task;
 508   _workers->run_task(&task);
 509 }
 510 
 511 void ShenandoahHeap::print_on(outputStream* st) const {
 512   st->print_cr("Shenandoah Heap");
 513   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 514                max_capacity() / K, committed() / K, used() / K);
 515   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 516                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 517 
 518   st->print("Status: ");
 519   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 520   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 521   if (is_evacuation_in_progress())           st->print("evacuating, ");
 522   if (is_update_refs_in_progress())          st->print("updating refs, ");
 523   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 524   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 525   if (is_full_gc_in_progress())              st->print("full gc, ");
 526   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 527 
 528   if (cancelled_gc()) {
 529     st->print("cancelled");
 530   } else {
 531     st->print("not cancelled");
 532   }
 533   st->cr();
 534 


 607 
 608 void ShenandoahHeap::increase_allocated(size_t bytes) {
 609   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 610 }
 611 
 612 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 613   size_t bytes = words * HeapWordSize;
 614   if (!waste) {
 615     increase_used(bytes);
 616   }
 617   increase_allocated(bytes);
 618   if (ShenandoahPacing) {
 619     control_thread()->pacing_notify_alloc(words);
 620     if (waste) {
 621       pacer()->claim_for_alloc(words, true);
 622     }
 623   }
 624 }
 625 
 626 size_t ShenandoahHeap::capacity() const {
 627   return committed();
 628 }
 629 
 630 size_t ShenandoahHeap::max_capacity() const {
 631   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 632 }
 633 
 634 size_t ShenandoahHeap::min_capacity() const {
 635   return _minimum_size;
 636 }
 637 
 638 size_t ShenandoahHeap::initial_capacity() const {
 639   return _initial_size;
 640 }
 641 
 642 bool ShenandoahHeap::is_in(const void* p) const {
 643   HeapWord* heap_base = (HeapWord*) base();
 644   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 645   return p >= heap_base && p < last_region_end;
 646 }
 647 
 648 void ShenandoahHeap::op_uncommit(double shrink_before) {
 649   assert (ShenandoahUncommit, "should be enabled");
 650 
 651   // Application allocates from the beginning of the heap, and GC allocates at
 652   // the end of it. It is more efficient to uncommit from the end, so that applications
 653   // could enjoy the near committed regions. GC allocations are much less frequent,
 654   // and therefore can accept the committing costs.
 655 
 656   size_t count = 0;
 657   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 658     ShenandoahHeapRegion* r = get_region(i - 1);
 659     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 660       ShenandoahHeapLocker locker(lock());
 661       if (r->is_empty_committed()) {
 662         // Do not uncommit below minimal capacity
 663         if (committed() < min_capacity() + ShenandoahHeapRegion::region_size_bytes()) {
 664           break;
 665         }
 666 
 667         r->make_uncommitted();
 668         count++;
 669       }
 670     }
 671     SpinPause(); // allow allocators to take the lock
 672   }
 673 
 674   if (count > 0) {


 675     control_thread()->notify_heap_changed();
 676   }
 677 }
 678 
 679 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 680   // New object should fit the GCLAB size
 681   size_t min_size = MAX2(size, PLAB::min_size());
 682 
 683   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 684   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 685   new_size = MIN2(new_size, PLAB::max_size());
 686   new_size = MAX2(new_size, PLAB::min_size());
 687 
 688   // Record new heuristic value even if we take any shortcut. This captures
 689   // the case when moderately-sized objects always take a shortcut. At some point,
 690   // heuristics should catch up with them.
 691   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 692 
 693   if (new_size < size) {
 694     // New size still does not fit the object. Fall back to shared allocation.


 922 
 923   // Expand and retry allocation
 924   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 925   if (result != NULL) {
 926     return result;
 927   }
 928 
 929   // Out of memory
 930   return NULL;
 931 }
 932 
 933 void ShenandoahHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
 934   HeapWord* obj = tlab_post_allocation_setup(start);
 935   CollectedHeap::fill_with_object(obj, end);
 936 }
 937 
 938 size_t ShenandoahHeap::min_dummy_object_size() const {
 939   return CollectedHeap::min_dummy_object_size() + ShenandoahBrooksPointer::word_size();
 940 }
 941 





































 942 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 943 private:
 944   ShenandoahHeap* const _heap;
 945   Thread* const _thread;
 946 public:
 947   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 948     _heap(heap), _thread(Thread::current()) {}
 949 
 950   void do_object(oop p) {
 951     shenandoah_assert_marked(NULL, p);
 952     if (oopDesc::equals_raw(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 953       _heap->evacuate_object(p, _thread);
 954     }
 955   }
 956 };
 957 
 958 class ShenandoahEvacuationTask : public AbstractGangTask {
 959 private:
 960   ShenandoahHeap* const _sh;
 961   ShenandoahCollectionSet* const _cs;


1171   double v = heuristics()->time_since_last_gc() * 1000;
1172   assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
1173   return (jlong)v;
1174 }
1175 
1176 void ShenandoahHeap::prepare_for_verify() {
1177   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1178     make_parsable(false);
1179   }
1180 }
1181 
1182 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1183   workers()->print_worker_threads_on(st);
1184   if (ShenandoahStringDedup::is_enabled()) {
1185     ShenandoahStringDedup::print_worker_threads_on(st);
1186   }
1187 }
1188 
1189 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1190   workers()->threads_do(tcl);
1191   if (_safepoint_workers != NULL) {
1192     _safepoint_workers->threads_do(tcl);
1193   }
1194   if (ShenandoahStringDedup::is_enabled()) {
1195     ShenandoahStringDedup::threads_do(tcl);
1196   }
1197 }
1198 
1199 void ShenandoahHeap::print_tracing_info() const {
1200   LogTarget(Info, gc, stats) lt;
1201   if (lt.is_enabled()) {
1202     ResourceMark rm;
1203     LogStream ls(lt);
1204 
1205     phase_timings()->print_on(&ls);
1206 
1207     ls.cr();
1208     ls.cr();
1209 
1210     shenandoah_policy()->print_gc_stats(&ls);
1211 
1212     ls.cr();
1213     ls.cr();


1514     }
1515 
1516     // If collection set has candidates, start evacuation.
1517     // Otherwise, bypass the rest of the cycle.
1518     if (!collection_set()->is_empty()) {
1519       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1520 
1521       if (ShenandoahVerify) {
1522         verifier()->verify_before_evacuation();
1523       }
1524 
1525       set_evacuation_in_progress(true);
1526       // From here on, we need to update references.
1527       set_has_forwarded_objects(true);
1528 
1529       evacuate_and_update_roots();
1530 
1531       if (ShenandoahPacing) {
1532         pacer()->setup_for_evac();
1533       }
1534 
1535       if (ShenandoahVerify) {
1536         verifier()->verify_during_evacuation();
1537       }
1538     } else {
1539       if (ShenandoahVerify) {
1540         verifier()->verify_after_concmark();
1541       }
1542 
1543       if (VerifyAfterGC) {
1544         Universe::verify();
1545       }
1546     }
1547 
1548   } else {
1549     concurrent_mark()->cancel();
1550     stop_concurrent_marking();
1551 
1552     if (process_references()) {
1553       // Abandon reference processing right away: pre-cleaning must have failed.
1554       ReferenceProcessor *rp = ref_processor();
1555       rp->disable_discovery();
1556       rp->abandon_partial_discovery();
1557       rp->verify_no_references_recorded();


1829   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1830 }
1831 
1832 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1833    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress);
1834    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1835 }
1836 
1837 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1838   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1839   set_gc_state_mask(EVACUATION, in_progress);
1840 }
1841 
1842 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1843   // Initialize Brooks pointer for the next object
1844   HeapWord* result = obj + ShenandoahBrooksPointer::word_size();
1845   ShenandoahBrooksPointer::initialize(oop(result));
1846   return result;
1847 }
1848 

























1849 void ShenandoahHeap::ref_processing_init() {
1850   assert(_max_workers > 0, "Sanity");
1851 
1852   _ref_processor =
1853     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
1854                            ParallelRefProcEnabled,  // MT processing
1855                            _max_workers,            // Degree of MT processing
1856                            true,                    // MT discovery
1857                            _max_workers,            // Degree of MT discovery
1858                            false,                   // Reference discovery is not atomic
1859                            NULL,                    // No closure, should be installed before use
1860                            true);                   // Scale worker threads
1861 
1862   shenandoah_assert_rp_isalive_not_installed();
1863 }
1864 
1865 GCTracer* ShenandoahHeap::tracer() {
1866   return shenandoah_policy()->tracer();
1867 }
1868 


2798 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2799   assert(worker_id < _max_workers, "sanity");
2800   assert(_liveness_cache != NULL, "sanity");
2801   jushort* ld = _liveness_cache[worker_id];
2802   for (uint i = 0; i < num_regions(); i++) {
2803     ShenandoahHeapRegion* r = get_region(i);
2804     jushort live = ld[i];
2805     if (live > 0) {
2806       r->increase_live_data_gc_words(live);
2807       ld[i] = 0;
2808     }
2809   }
2810 }
2811 
2812 size_t ShenandoahHeap::obj_size(oop obj) const {
2813   return CollectedHeap::obj_size(obj) + ShenandoahBrooksPointer::word_size();
2814 }
2815 
2816 ptrdiff_t ShenandoahHeap::cell_header_size() const {
2817   return ShenandoahBrooksPointer::byte_size();





2818 }


  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/memAllocator.hpp"
  30 #include "gc/shared/parallelCleaning.hpp"
  31 #include "gc/shared/plab.hpp"
  32 
  33 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  34 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  35 #include "gc/shenandoah/shenandoahBrooksPointer.hpp"

  36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  37 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahControlThread.hpp"
  40 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  45 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  46 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  47 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  48 #include "gc/shenandoah/shenandoahMetrics.hpp"
  49 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  50 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  51 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  52 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  53 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  54 #include "gc/shenandoah/shenandoahUtils.hpp"
  55 #include "gc/shenandoah/shenandoahVerifier.hpp"
  56 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  57 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  58 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  59 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  60 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
  61 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
  62 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
  63 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
  64 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
  65 #include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
  66 
  67 #include "memory/metaspace.hpp"
  68 #include "runtime/interfaceSupport.inline.hpp"
  69 #include "runtime/safepointMechanism.hpp"
  70 #include "runtime/vmThread.hpp"
  71 #include "services/mallocTracker.hpp"
  72 
  73 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  74 
  75 #ifdef ASSERT
  76 template <class T>
  77 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
  78   T o = RawAccess<>::oop_load(p);
  79   if (! CompressedOops::is_null(o)) {
  80     oop obj = CompressedOops::decode_not_null(o);
  81     shenandoah_assert_not_forwarded(p, obj);
  82   }
  83 }
  84 
  85 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  86 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_work(p); }
  87 #endif
  88 
  89 class ShenandoahPretouchHeapTask : public AbstractGangTask {
  90 private:
  91   ShenandoahRegionIterator _regions;
  92   const size_t _page_size;
  93 public:
  94   ShenandoahPretouchHeapTask(size_t page_size) :


 124       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 125       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 126 
 127       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 128 
 129       r = _regions.next();
 130     }
 131   }
 132 };
 133 
 134 jint ShenandoahHeap::initialize() {
 135   ShenandoahBrooksPointer::initial_checks();
 136 
 137   initialize_heuristics();
 138 
 139   //
 140   // Figure out heap sizing
 141   //
 142 
 143   size_t init_byte_size = collector_policy()->initial_heap_byte_size();

 144   size_t max_byte_size  = collector_policy()->max_heap_byte_size();
 145   size_t heap_alignment = collector_policy()->heap_alignment();
 146 
 147   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 148 
 149   if (ShenandoahAlwaysPreTouch) {
 150     // Enabled pre-touch means the entire heap is committed right away.
 151     init_byte_size = max_byte_size;
 152   }
 153 
 154   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 155   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 156 
 157   _num_regions = ShenandoahHeapRegion::region_count();
 158 
 159   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 160   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 161   assert(num_committed_regions <= _num_regions, "sanity");






 162 
 163   _initial_size = num_committed_regions * reg_size_bytes;
 164   _committed = _initial_size;
 165 
 166   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 167   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 168 
 169   //
 170   // Reserve and commit memory for heap
 171   //
 172 
 173   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 174   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 175   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 176   _heap_region_special = heap_rs.special();
 177 
 178   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 179          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 180 
 181   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 182   if (!_heap_region_special) {
 183     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,


 334   ShenandoahStringDedup::initialize();
 335   ShenandoahCodeRoots::initialize();
 336 
 337   if (ShenandoahAllocationTrace) {
 338     _alloc_tracker = new ShenandoahAllocTracker();
 339   }
 340 
 341   if (ShenandoahPacing) {
 342     _pacer = new ShenandoahPacer(this);
 343     _pacer->setup_for_idle();
 344   } else {
 345     _pacer = NULL;
 346   }
 347 
 348   _traversal_gc = heuristics()->can_do_traversal_gc() ?
 349                   new ShenandoahTraversalGC(this, _num_regions) :
 350                   NULL;
 351 
 352   _control_thread = new ShenandoahControlThread();
 353 
 354   log_info(gc, init)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s",
 355                      byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size));



 356 
 357   log_info(gc, init)("Safepointing mechanism: %s",
 358                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 359                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 360 
 361   return JNI_OK;
 362 }
 363 
 364 void ShenandoahHeap::initialize_heuristics() {
 365   if (ShenandoahGCHeuristics != NULL) {
 366     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 367       _heuristics = new ShenandoahAggressiveHeuristics();
 368     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
 369       _heuristics = new ShenandoahStaticHeuristics();
 370     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 371       _heuristics = new ShenandoahAdaptiveHeuristics();
 372     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 373       _heuristics = new ShenandoahPassiveHeuristics();
 374     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
 375       _heuristics = new ShenandoahCompactHeuristics();


 433   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 434   _soft_ref_policy(),
 435   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 436   _ref_processor(NULL),
 437   _marking_context(NULL),
 438   _bitmap_size(0),
 439   _bitmap_regions_per_slice(0),
 440   _bitmap_bytes_per_slice(0),
 441   _bitmap_region_special(false),
 442   _aux_bitmap_region_special(false),
 443   _liveness_cache(NULL),
 444   _collection_set(NULL)
 445 {
 446   log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 447   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 448 
 449   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 450 
 451   _max_workers = MAX2(_max_workers, 1U);
 452   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 453                             /* are_GC_task_threads */true,
 454                             /* are_ConcurrentGC_threads */false);
 455   if (_workers == NULL) {
 456     vm_exit_during_initialization("Failed necessary allocation.");
 457   } else {
 458     _workers->initialize_workers();
 459   }
 460 
 461   if (ShenandoahParallelSafepointThreads > 1) {
 462     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 463                                                 ShenandoahParallelSafepointThreads,
 464                                                 false, false);

 465     _safepoint_workers->initialize_workers();
 466   }
 467 }
 468 
 469 #ifdef _MSC_VER
 470 #pragma warning( pop )
 471 #endif
 472 
 473 class ShenandoahResetBitmapTask : public AbstractGangTask {
 474 private:
 475   ShenandoahRegionIterator _regions;
 476 
 477 public:
 478   ShenandoahResetBitmapTask() :
 479     AbstractGangTask("Parallel Reset Bitmap Task") {}
 480 
 481   void work(uint worker_id) {
 482     ShenandoahHeapRegion* region = _regions.next();
 483     ShenandoahHeap* heap = ShenandoahHeap::heap();
 484     ShenandoahMarkingContext* const ctx = heap->marking_context();
 485     while (region != NULL) {
 486       if (heap->is_bitmap_slice_committed(region)) {
 487         ctx->clear_bitmap(region);
 488       }
 489       region = _regions.next();
 490     }
 491   }
 492 };
 493 
 494 void ShenandoahHeap::reset_mark_bitmap() {
 495   assert_gc_workers(_workers->active_workers());
 496   mark_incomplete_marking_context();
 497 
 498   ShenandoahResetBitmapTask task;
 499   _workers->run_task(&task);
 500 }
 501 
 502 void ShenandoahHeap::print_on(outputStream* st) const {
 503   st->print_cr("Shenandoah Heap");
 504   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 505                capacity() / K, committed() / K, used() / K);
 506   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 507                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 508 
 509   st->print("Status: ");
 510   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 511   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 512   if (is_evacuation_in_progress())           st->print("evacuating, ");
 513   if (is_update_refs_in_progress())          st->print("updating refs, ");
 514   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 515   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 516   if (is_full_gc_in_progress())              st->print("full gc, ");
 517   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 518 
 519   if (cancelled_gc()) {
 520     st->print("cancelled");
 521   } else {
 522     st->print("not cancelled");
 523   }
 524   st->cr();
 525 


 598 
 599 void ShenandoahHeap::increase_allocated(size_t bytes) {
 600   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 601 }
 602 
 603 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 604   size_t bytes = words * HeapWordSize;
 605   if (!waste) {
 606     increase_used(bytes);
 607   }
 608   increase_allocated(bytes);
 609   if (ShenandoahPacing) {
 610     control_thread()->pacing_notify_alloc(words);
 611     if (waste) {
 612       pacer()->claim_for_alloc(words, true);
 613     }
 614   }
 615 }
 616 
 617 size_t ShenandoahHeap::capacity() const {
 618   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 619 }
 620 
 621 size_t ShenandoahHeap::max_capacity() const {
 622   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 623 }
 624 




 625 size_t ShenandoahHeap::initial_capacity() const {
 626   return _initial_size;
 627 }
 628 
 629 bool ShenandoahHeap::is_in(const void* p) const {
 630   HeapWord* heap_base = (HeapWord*) base();
 631   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 632   return p >= heap_base && p < last_region_end;
 633 }
 634 
 635 void ShenandoahHeap::op_uncommit(double shrink_before) {
 636   assert (ShenandoahUncommit, "should be enabled");
 637 





 638   size_t count = 0;
 639   for (size_t i = 0; i < num_regions(); i++) {
 640     ShenandoahHeapRegion* r = get_region(i);
 641     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 642       ShenandoahHeapLocker locker(lock());
 643       if (r->is_empty_committed()) {





 644         r->make_uncommitted();
 645         count++;
 646       }
 647     }
 648     SpinPause(); // allow allocators to take the lock
 649   }
 650 
 651   if (count > 0) {
 652     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 653                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 654     control_thread()->notify_heap_changed();
 655   }
 656 }
 657 
 658 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 659   // New object should fit the GCLAB size
 660   size_t min_size = MAX2(size, PLAB::min_size());
 661 
 662   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 663   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 664   new_size = MIN2(new_size, PLAB::max_size());
 665   new_size = MAX2(new_size, PLAB::min_size());
 666 
 667   // Record new heuristic value even if we take any shortcut. This captures
 668   // the case when moderately-sized objects always take a shortcut. At some point,
 669   // heuristics should catch up with them.
 670   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 671 
 672   if (new_size < size) {
 673     // New size still does not fit the object. Fall back to shared allocation.


 901 
 902   // Expand and retry allocation
 903   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 904   if (result != NULL) {
 905     return result;
 906   }
 907 
 908   // Out of memory
 909   return NULL;
 910 }
 911 
 912 void ShenandoahHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
 913   HeapWord* obj = tlab_post_allocation_setup(start);
 914   CollectedHeap::fill_with_object(obj, end);
 915 }
 916 
 917 size_t ShenandoahHeap::min_dummy_object_size() const {
 918   return CollectedHeap::min_dummy_object_size() + ShenandoahBrooksPointer::word_size();
 919 }
 920 
 921 class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure {
 922 private:
 923   ShenandoahHeap* _heap;
 924   Thread* _thread;
 925 public:
 926   ShenandoahEvacuateUpdateRootsClosure() :
 927     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 928   }
 929 
 930 private:
 931   template <class T>
 932   void do_oop_work(T* p) {
 933     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 934 
 935     T o = RawAccess<>::oop_load(p);
 936     if (! CompressedOops::is_null(o)) {
 937       oop obj = CompressedOops::decode_not_null(o);
 938       if (_heap->in_collection_set(obj)) {
 939         shenandoah_assert_marked(p, obj);
 940         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 941         if (oopDesc::equals_raw(resolved, obj)) {
 942           resolved = _heap->evacuate_object(obj, _thread);
 943         }
 944         RawAccess<IS_NOT_NULL>::oop_store(p, resolved);
 945       }
 946     }
 947   }
 948 
 949 public:
 950   void do_oop(oop* p) {
 951     do_oop_work(p);
 952   }
 953   void do_oop(narrowOop* p) {
 954     do_oop_work(p);
 955   }
 956 };
 957 
 958 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 959 private:
 960   ShenandoahHeap* const _heap;
 961   Thread* const _thread;
 962 public:
 963   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 964     _heap(heap), _thread(Thread::current()) {}
 965 
 966   void do_object(oop p) {
 967     shenandoah_assert_marked(NULL, p);
 968     if (oopDesc::equals_raw(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 969       _heap->evacuate_object(p, _thread);
 970     }
 971   }
 972 };
 973 
 974 class ShenandoahEvacuationTask : public AbstractGangTask {
 975 private:
 976   ShenandoahHeap* const _sh;
 977   ShenandoahCollectionSet* const _cs;


1187   double v = heuristics()->time_since_last_gc() * 1000;
1188   assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
1189   return (jlong)v;
1190 }
1191 
1192 void ShenandoahHeap::prepare_for_verify() {
1193   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1194     make_parsable(false);
1195   }
1196 }
1197 
1198 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1199   workers()->print_worker_threads_on(st);
1200   if (ShenandoahStringDedup::is_enabled()) {
1201     ShenandoahStringDedup::print_worker_threads_on(st);
1202   }
1203 }
1204 
1205 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1206   workers()->threads_do(tcl);
1207   _safepoint_workers->threads_do(tcl);


1208   if (ShenandoahStringDedup::is_enabled()) {
1209     ShenandoahStringDedup::threads_do(tcl);
1210   }
1211 }
1212 
1213 void ShenandoahHeap::print_tracing_info() const {
1214   LogTarget(Info, gc, stats) lt;
1215   if (lt.is_enabled()) {
1216     ResourceMark rm;
1217     LogStream ls(lt);
1218 
1219     phase_timings()->print_on(&ls);
1220 
1221     ls.cr();
1222     ls.cr();
1223 
1224     shenandoah_policy()->print_gc_stats(&ls);
1225 
1226     ls.cr();
1227     ls.cr();


1528     }
1529 
1530     // If collection set has candidates, start evacuation.
1531     // Otherwise, bypass the rest of the cycle.
1532     if (!collection_set()->is_empty()) {
1533       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1534 
1535       if (ShenandoahVerify) {
1536         verifier()->verify_before_evacuation();
1537       }
1538 
1539       set_evacuation_in_progress(true);
1540       // From here on, we need to update references.
1541       set_has_forwarded_objects(true);
1542 
1543       evacuate_and_update_roots();
1544 
1545       if (ShenandoahPacing) {
1546         pacer()->setup_for_evac();
1547       }




1548     } else {
1549       if (ShenandoahVerify) {
1550         verifier()->verify_after_concmark();
1551       }
1552 
1553       if (VerifyAfterGC) {
1554         Universe::verify();
1555       }
1556     }
1557 
1558   } else {
1559     concurrent_mark()->cancel();
1560     stop_concurrent_marking();
1561 
1562     if (process_references()) {
1563       // Abandon reference processing right away: pre-cleaning must have failed.
1564       ReferenceProcessor *rp = ref_processor();
1565       rp->disable_discovery();
1566       rp->abandon_partial_discovery();
1567       rp->verify_no_references_recorded();


1839   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1840 }
1841 
1842 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1843    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress);
1844    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1845 }
1846 
1847 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1848   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1849   set_gc_state_mask(EVACUATION, in_progress);
1850 }
1851 
1852 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1853   // Initialize Brooks pointer for the next object
1854   HeapWord* result = obj + ShenandoahBrooksPointer::word_size();
1855   ShenandoahBrooksPointer::initialize(oop(result));
1856   return result;
1857 }
1858 
1859 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1860   _mark_context(ShenandoahHeap::heap()->marking_context()) {
1861 }
1862 
1863 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1864   _mark_context(ShenandoahHeap::heap()->marking_context()) {
1865 }
1866 
1867 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1868   if (CompressedOops::is_null(obj)) {
1869     return false;
1870   }
1871   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1872   shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress() || ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
1873   return _mark_context->is_marked(obj);
1874 }
1875 
1876 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1877   if (CompressedOops::is_null(obj)) {
1878     return false;
1879   }
1880   shenandoah_assert_not_forwarded(NULL, obj);
1881   return _mark_context->is_marked(obj);
1882 }
1883 
1884 void ShenandoahHeap::ref_processing_init() {
1885   assert(_max_workers > 0, "Sanity");
1886 
1887   _ref_processor =
1888     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
1889                            ParallelRefProcEnabled,  // MT processing
1890                            _max_workers,            // Degree of MT processing
1891                            true,                    // MT discovery
1892                            _max_workers,            // Degree of MT discovery
1893                            false,                   // Reference discovery is not atomic
1894                            NULL,                    // No closure, should be installed before use
1895                            true);                   // Scale worker threads
1896 
1897   shenandoah_assert_rp_isalive_not_installed();
1898 }
1899 
1900 GCTracer* ShenandoahHeap::tracer() {
1901   return shenandoah_policy()->tracer();
1902 }
1903 


2833 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2834   assert(worker_id < _max_workers, "sanity");
2835   assert(_liveness_cache != NULL, "sanity");
2836   jushort* ld = _liveness_cache[worker_id];
2837   for (uint i = 0; i < num_regions(); i++) {
2838     ShenandoahHeapRegion* r = get_region(i);
2839     jushort live = ld[i];
2840     if (live > 0) {
2841       r->increase_live_data_gc_words(live);
2842       ld[i] = 0;
2843     }
2844   }
2845 }
2846 
2847 size_t ShenandoahHeap::obj_size(oop obj) const {
2848   return CollectedHeap::obj_size(obj) + ShenandoahBrooksPointer::word_size();
2849 }
2850 
2851 ptrdiff_t ShenandoahHeap::cell_header_size() const {
2852   return ShenandoahBrooksPointer::byte_size();
2853 }
2854 
2855 BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
2856   return ShenandoahHeap::heap()->has_forwarded_objects() ? reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl)
2857                                                          : reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
2858 }
< prev index next >