< prev index next >

src/hotspot/share/gc/z/zHeap.cpp

Print this page




  62 ZHeap* ZHeap::_heap = NULL;
  63 
  64 ZHeap::ZHeap() :
  65     _workers(),
  66     _object_allocator(_workers.nworkers()),
  67     _page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  68     _page_table(),
  69     _forwarding_table(),
  70     _mark(&_workers, &_page_table),
  71     _reference_processor(&_workers),
  72     _weak_roots_processor(&_workers),
  73     _relocate(&_workers),
  74     _relocation_set(),
  75     _unload(&_workers),
  76     _serviceability(heap_min_size(), heap_max_size()) {
  77   // Install global heap instance
  78   assert(_heap == NULL, "Already initialized");
  79   _heap = this;
  80 
  81   // Update statistics
  82   ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
  83 }
  84 
  85 size_t ZHeap::heap_min_size() const {
  86   return MinHeapSize;
  87 }
  88 
  89 size_t ZHeap::heap_initial_size() const {
  90   return InitialHeapSize;
  91 }
  92 
  93 size_t ZHeap::heap_max_size() const {
  94   return MaxHeapSize;
  95 }
  96 
  97 size_t ZHeap::heap_max_reserve_size() const {
  98   // Reserve one small page per worker plus one shared medium page. This is still just
  99   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
 100   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
 101   return MIN2(max_reserve_size, heap_max_size());
 102 }
 103 
 104 bool ZHeap::is_initialized() const {
 105   return _page_allocator.is_initialized() && _mark.is_initialized();
 106 }
 107 
 108 size_t ZHeap::min_capacity() const {
 109   return _page_allocator.min_capacity();
 110 }
 111 
 112 size_t ZHeap::max_capacity() const {
 113   return _page_allocator.max_capacity();
 114 }
 115 
 116 size_t ZHeap::soft_max_capacity() const {
 117   return _page_allocator.soft_max_capacity();
 118 }
 119 
 120 size_t ZHeap::capacity() const {
 121   return _page_allocator.capacity();
 122 }
 123 
 124 size_t ZHeap::max_reserve() const {
 125   return _page_allocator.max_reserve();
 126 }
 127 
 128 size_t ZHeap::used_high() const {
 129   return _page_allocator.used_high();
 130 }
 131 
 132 size_t ZHeap::used_low() const {
 133   return _page_allocator.used_low();
 134 }
 135 
 136 size_t ZHeap::used() const {
 137   return _page_allocator.used();


 160 size_t ZHeap::max_tlab_size() const {
 161   return ZObjectSizeLimitSmall;
 162 }
 163 
 164 size_t ZHeap::unsafe_max_tlab_alloc() const {
 165   size_t size = _object_allocator.remaining();
 166 
 167   if (size < MinTLABSize) {
 168     // The remaining space in the allocator is not enough to
 169     // fit the smallest possible TLAB. This means that the next
 170     // TLAB allocation will force the allocator to get a new
 171     // backing page anyway, which in turn means that we can then
 172     // fit the largest possible TLAB.
 173     size = max_tlab_size();
 174   }
 175 
 176   return MIN2(size, max_tlab_size());
 177 }
 178 
 179 bool ZHeap::is_in(uintptr_t addr) const {
 180   // An address is considered to be "in the heap" if it points into
 181   // the allocated part of a pages, regardless of which heap view is
 182   // used. Note that an address with the finalizable metadata bit set
 183   // is not pointing into a heap view, and therefore not considered
 184   // to be "in the heap".
 185 
 186   if (ZAddress::is_in(addr)) {
 187     const ZPage* const page = _page_table.get(addr);
 188     if (page != NULL) {
 189       return page->is_in(addr);
 190     }
 191   }
 192 
 193   return false;
 194 }
 195 
 196 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 197   const ZPage* const page = _page_table.get(addr);
 198   return page->block_start(addr);
 199 }
 200 
 201 bool ZHeap::block_is_obj(uintptr_t addr) const {
 202   const ZPage* const page = _page_table.get(addr);
 203   return page->block_is_obj(addr);
 204 }
 205 
 206 uint ZHeap::nconcurrent_worker_threads() const {
 207   return _workers.nconcurrent();
 208 }
 209 
 210 uint ZHeap::nconcurrent_no_boost_worker_threads() const {


 300 
 301   // Flip address view
 302   flip_to_marked();
 303 
 304   // Retire allocating pages
 305   _object_allocator.retire_pages();
 306 
 307   // Reset allocated/reclaimed/used statistics
 308   _page_allocator.reset_statistics();
 309 
 310   // Reset encountered/dropped/enqueued statistics
 311   _reference_processor.reset_statistics();
 312 
 313   // Enter mark phase
 314   ZGlobalPhase = ZPhaseMark;
 315 
 316   // Reset marking information and mark roots
 317   _mark.start();
 318 
 319   // Update statistics
 320   ZStatHeap::set_at_mark_start(soft_max_capacity(), capacity(), used());
 321 }
 322 
 323 void ZHeap::mark(bool initial) {
 324   _mark.mark(initial);
 325 }
 326 
 327 void ZHeap::mark_flush_and_free(Thread* thread) {
 328   _mark.flush_and_free(thread);
 329 }
 330 































 331 bool ZHeap::mark_end() {
 332   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 333 






 334   // Try end marking
 335   if (!_mark.end()) {
 336     // Marking not completed, continue concurrent mark
 337     return false;
 338   }
 339 
 340   // Enter mark completed phase
 341   ZGlobalPhase = ZPhaseMarkCompleted;
 342 
 343   // Update statistics
 344   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 345   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 346 
 347   // Block resurrection of weak/phantom references
 348   ZResurrection::block();
 349 
 350   // Process weak roots
 351   _weak_roots_processor.process_weak_roots();
 352 
 353   // Prepare to unload unused classes and code


 454   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 455 
 456   // Remap/Relocate roots
 457   _relocate.start();
 458 }
 459 
 460 void ZHeap::relocate() {
 461   // Relocate relocation set
 462   const bool success = _relocate.relocate(&_relocation_set);
 463 
 464   // Update statistics
 465   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 466   ZStatRelocation::set_at_relocate_end(success);
 467   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 468                                  used(), used_high(), used_low());
 469 }
 470 
 471 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
 472   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 473 
 474   ZHeapIterator iter;
 475   iter.objects_do(cl, visit_referents);
 476 }
 477 
 478 void ZHeap::serviceability_initialize() {
 479   _serviceability.initialize();
 480 }
 481 
 482 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 483   return _serviceability.memory_manager();
 484 }
 485 
 486 MemoryPool* ZHeap::serviceability_memory_pool() {
 487   return _serviceability.memory_pool();
 488 }
 489 
 490 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 491   return _serviceability.counters();
 492 }
 493 
 494 void ZHeap::print_on(outputStream* st) const {
 495   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",




  62 ZHeap* ZHeap::_heap = NULL;
  63 
  64 ZHeap::ZHeap() :
  65     _workers(),
  66     _object_allocator(_workers.nworkers()),
  67     _page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  68     _page_table(),
  69     _forwarding_table(),
  70     _mark(&_workers, &_page_table),
  71     _reference_processor(&_workers),
  72     _weak_roots_processor(&_workers),
  73     _relocate(&_workers),
  74     _relocation_set(),
  75     _unload(&_workers),
  76     _serviceability(heap_min_size(), heap_max_size()) {
  77   // Install global heap instance
  78   assert(_heap == NULL, "Already initialized");
  79   _heap = this;
  80 
  81   // Update statistics
  82   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
  83 }
  84 
  85 size_t ZHeap::heap_min_size() const {
  86   return MinHeapSize;
  87 }
  88 
  89 size_t ZHeap::heap_initial_size() const {
  90   return InitialHeapSize;
  91 }
  92 
  93 size_t ZHeap::heap_max_size() const {
  94   return MaxHeapSize;
  95 }
  96 
  97 size_t ZHeap::heap_max_reserve_size() const {
  98   // Reserve one small page per worker plus one shared medium page. This is still just
  99   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
 100   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
 101   return MIN2(max_reserve_size, heap_max_size());
 102 }
 103 
 104 bool ZHeap::is_initialized() const {
 105   return _page_allocator.is_initialized() && _mark.is_initialized();
 106 }
 107 
 108 size_t ZHeap::min_capacity() const {
 109   return _page_allocator.min_capacity();
 110 }
 111 
 112 size_t ZHeap::max_capacity() const {
 113   return _page_allocator.max_capacity();
 114 }
 115 
 116 size_t ZHeap::current_max_capacity() const {
 117   return _page_allocator.current_max_capacity();
 118 }
 119 
 120 size_t ZHeap::capacity() const {
 121   return _page_allocator.capacity();
 122 }
 123 
 124 size_t ZHeap::max_reserve() const {
 125   return _page_allocator.max_reserve();
 126 }
 127 
 128 size_t ZHeap::used_high() const {
 129   return _page_allocator.used_high();
 130 }
 131 
 132 size_t ZHeap::used_low() const {
 133   return _page_allocator.used_low();
 134 }
 135 
 136 size_t ZHeap::used() const {
 137   return _page_allocator.used();


 160 size_t ZHeap::max_tlab_size() const {
 161   return ZObjectSizeLimitSmall;
 162 }
 163 
 164 size_t ZHeap::unsafe_max_tlab_alloc() const {
 165   size_t size = _object_allocator.remaining();
 166 
 167   if (size < MinTLABSize) {
 168     // The remaining space in the allocator is not enough to
 169     // fit the smallest possible TLAB. This means that the next
 170     // TLAB allocation will force the allocator to get a new
 171     // backing page anyway, which in turn means that we can then
 172     // fit the largest possible TLAB.
 173     size = max_tlab_size();
 174   }
 175 
 176   return MIN2(size, max_tlab_size());
 177 }
 178 
 179 bool ZHeap::is_in(uintptr_t addr) const {
 180   if (addr < ZAddressReservedStart || addr >= ZAddressReservedEnd) {
 181     return false;
 182   }
 183 
 184   const ZPage* const page = _page_table.get(addr);
 185   if (page != NULL) {
 186     return page->is_in(addr);




 187   }
 188 
 189   return false;
 190 }
 191 
 192 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 193   const ZPage* const page = _page_table.get(addr);
 194   return page->block_start(addr);
 195 }
 196 
 197 bool ZHeap::block_is_obj(uintptr_t addr) const {
 198   const ZPage* const page = _page_table.get(addr);
 199   return page->block_is_obj(addr);
 200 }
 201 
 202 uint ZHeap::nconcurrent_worker_threads() const {
 203   return _workers.nconcurrent();
 204 }
 205 
 206 uint ZHeap::nconcurrent_no_boost_worker_threads() const {


 296 
 297   // Flip address view
 298   flip_to_marked();
 299 
 300   // Retire allocating pages
 301   _object_allocator.retire_pages();
 302 
 303   // Reset allocated/reclaimed/used statistics
 304   _page_allocator.reset_statistics();
 305 
 306   // Reset encountered/dropped/enqueued statistics
 307   _reference_processor.reset_statistics();
 308 
 309   // Enter mark phase
 310   ZGlobalPhase = ZPhaseMark;
 311 
 312   // Reset marking information and mark roots
 313   _mark.start();
 314 
 315   // Update statistics
 316   ZStatHeap::set_at_mark_start(capacity(), used());
 317 }
 318 
 319 void ZHeap::mark(bool initial) {
 320   _mark.mark(initial);
 321 }
 322 
 323 void ZHeap::mark_flush_and_free(Thread* thread) {
 324   _mark.flush_and_free(thread);
 325 }
 326 
 327 class ZFixupPartialLoadsClosure : public ZRootsIteratorClosure {
 328 public:
 329   virtual void do_oop(oop* p) {
 330     ZBarrier::mark_barrier_on_root_oop_field(p);
 331   }
 332 
 333   virtual void do_oop(narrowOop* p) {
 334     ShouldNotReachHere();
 335   }
 336 };
 337 
 338 class ZFixupPartialLoadsTask : public ZTask {
 339 private:
 340   ZThreadRootsIterator _thread_roots;
 341 
 342 public:
 343   ZFixupPartialLoadsTask() :
 344       ZTask("ZFixupPartialLoadsTask"),
 345       _thread_roots() {}
 346 
 347   virtual void work() {
 348     ZFixupPartialLoadsClosure cl;
 349     _thread_roots.oops_do(&cl);
 350   }
 351 };
 352 
 353 void ZHeap::fixup_partial_loads() {
 354   ZFixupPartialLoadsTask task;
 355   _workers.run_parallel(&task);
 356 }
 357 
 358 bool ZHeap::mark_end() {
 359   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 360 
 361   // C2 can generate code where a safepoint poll is inserted
 362   // between a load and the associated load barrier. To handle
 363   // this case we need to rescan the thread stack here to make
 364   // sure such oops are marked.
 365   fixup_partial_loads();
 366 
 367   // Try end marking
 368   if (!_mark.end()) {
 369     // Marking not completed, continue concurrent mark
 370     return false;
 371   }
 372 
 373   // Enter mark completed phase
 374   ZGlobalPhase = ZPhaseMarkCompleted;
 375 
 376   // Update statistics
 377   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 378   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 379 
 380   // Block resurrection of weak/phantom references
 381   ZResurrection::block();
 382 
 383   // Process weak roots
 384   _weak_roots_processor.process_weak_roots();
 385 
 386   // Prepare to unload unused classes and code


 487   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 488 
 489   // Remap/Relocate roots
 490   _relocate.start();
 491 }
 492 
 493 void ZHeap::relocate() {
 494   // Relocate relocation set
 495   const bool success = _relocate.relocate(&_relocation_set);
 496 
 497   // Update statistics
 498   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 499   ZStatRelocation::set_at_relocate_end(success);
 500   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 501                                  used(), used_high(), used_low());
 502 }
 503 
 504 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
 505   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 506 
 507   ZHeapIterator iter(visit_referents);
 508   iter.objects_do(cl);
 509 }
 510 
 511 void ZHeap::serviceability_initialize() {
 512   _serviceability.initialize();
 513 }
 514 
 515 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 516   return _serviceability.memory_manager();
 517 }
 518 
 519 MemoryPool* ZHeap::serviceability_memory_pool() {
 520   return _serviceability.memory_pool();
 521 }
 522 
 523 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 524   return _serviceability.counters();
 525 }
 526 
 527 void ZHeap::print_on(outputStream* st) const {
 528   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",


< prev index next >