< prev index next >

src/hotspot/share/gc/z/zHeap.cpp

Print this page

416   flip_to_remapped();
417 
418   // Enter relocate phase
419   ZGlobalPhase = ZPhaseRelocate;
420 
421   // Update statistics
422   ZStatHeap::set_at_relocate_start(_page_allocator.stats());
423 
424   // Notify JVMTI
425   JvmtiTagMap::set_needs_rehashing();
426 }
427 
428 void ZHeap::relocate() {
429   // Relocate relocation set
430   _relocate.relocate(&_relocation_set);
431 
432   // Update statistics
433   ZStatHeap::set_at_relocate_end(_page_allocator.stats(), _object_allocator.relocated());
434 }
435 





436 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
437   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
438   ZHeapIterator iter(1 /* nworkers */, visit_weaks);
439   iter.object_iterate(cl, 0 /* worker_id */);
440 }
441 
442 ParallelObjectIterator* ZHeap::parallel_object_iterator(uint nworkers, bool visit_weaks) {
443   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
444   return new ZHeapIterator(nworkers, visit_weaks);
445 }
446 
447 void ZHeap::pages_do(ZPageClosure* cl) {
448   ZPageTableIterator iter(&_page_table);
449   for (ZPage* page; iter.next(&page);) {
450     cl->do_page(page);
451   }
452   _page_allocator.pages_do(cl);
453 }
454 
455 void ZHeap::serviceability_initialize() {

416   flip_to_remapped();
417 
418   // Enter relocate phase
419   ZGlobalPhase = ZPhaseRelocate;
420 
421   // Update statistics
422   ZStatHeap::set_at_relocate_start(_page_allocator.stats());
423 
424   // Notify JVMTI
425   JvmtiTagMap::set_needs_rehashing();
426 }
427 
428 void ZHeap::relocate() {
429   // Relocate relocation set
430   _relocate.relocate(&_relocation_set);
431 
432   // Update statistics
433   ZStatHeap::set_at_relocate_end(_page_allocator.stats(), _object_allocator.relocated());
434 }
435 
436 bool ZHeap::is_allocating(uintptr_t addr) const {
437   const ZPage* const page = _page_table.get(addr);
438   return page->is_allocating();
439 }
440 
441 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
442   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
443   ZHeapIterator iter(1 /* nworkers */, visit_weaks);
444   iter.object_iterate(cl, 0 /* worker_id */);
445 }
446 
447 ParallelObjectIterator* ZHeap::parallel_object_iterator(uint nworkers, bool visit_weaks) {
448   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
449   return new ZHeapIterator(nworkers, visit_weaks);
450 }
451 
452 void ZHeap::pages_do(ZPageClosure* cl) {
453   ZPageTableIterator iter(&_page_table);
454   for (ZPage* page; iter.next(&page);) {
455     cl->do_page(page);
456   }
457   _page_allocator.pages_do(cl);
458 }
459 
460 void ZHeap::serviceability_initialize() {
< prev index next >