< prev index next >

src/hotspot/share/gc/z/zHeapIterator.cpp

Print this page




  34 #include "utilities/stack.inline.hpp"
  35 
  36 class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
  37 private:
  38   CHeapBitMap _map;
  39 
  40 public:
  41   ZHeapIteratorBitMap(size_t size_in_bits) :
  42       _map(size_in_bits) {}
  43 
  44   bool try_set_bit(size_t index) {
  45     if (_map.at(index)) {
  46       return false;
  47     }
  48 
  49     _map.set_bit(index);
  50     return true;
  51   }
  52 };
  53 
  54 template <bool Concurrent, bool Weak>
  55 class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
  56 private:
  57   ZHeapIterator* const _iter;
  58 
  59   oop load_oop(oop* p) {
  60     if (Weak) {
  61       return NativeAccess<AS_NO_KEEPALIVE | ON_PHANTOM_OOP_REF>::oop_load(p);
  62     }
  63 
  64     if (Concurrent) {
  65       return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
  66     }
  67 
  68     return RawAccess<>::oop_load(p);
  69   }
  70 
  71 public:
  72   ZHeapIteratorRootOopClosure(ZHeapIterator* iter) :
  73       _iter(iter) {}
  74 
  75   virtual void do_oop(oop* p) {
  76     const oop obj = load_oop(p);


  77     _iter->push(obj);
  78   }
  79 
  80   virtual void do_oop(narrowOop* p) {
  81     ShouldNotReachHere();
  82   }
  83 };
  84 
  85 template <bool VisitReferents>
  86 class ZHeapIteratorOopClosure : public BasicOopIterateClosure {
  87 private:
  88   ZHeapIterator* const _iter;
  89   const oop            _base;

  90 
  91   oop load_oop(oop* p) {
  92     if (VisitReferents) {
  93       return HeapAccess<AS_NO_KEEPALIVE | ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));


  94     }
  95 
  96     return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p);
  97   }
  98 
  99 public:
 100   ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base) :
 101       _iter(iter),
 102       _base(base) {}

 103 
 104   virtual ReferenceIterationMode reference_iteration_mode() {
 105     return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
 106   }
 107 
 108   virtual void do_oop(oop* p) {
 109     const oop obj = load_oop(p);
 110     _iter->push(obj);
 111   }
 112 
 113   virtual void do_oop(narrowOop* p) {
 114     ShouldNotReachHere();
 115   }
 116 
 117 #ifdef ASSERT
 118   virtual bool should_verify_oops() {
 119     return false;
 120   }
 121 #endif
 122 };
 123 
 124 ZHeapIterator::ZHeapIterator() :
 125     _visit_stack(),
 126     _visit_map() {}

 127 
 128 ZHeapIterator::~ZHeapIterator() {
 129   ZVisitMapIterator iter(&_visit_map);
 130   for (ZHeapIteratorBitMap* map; iter.next(&map);) {
 131     delete map;
 132   }
 133 }
 134 
 135 static size_t object_index_max() {
 136   return ZGranuleSize >> ZObjectAlignmentSmallShift;
 137 }
 138 
 139 static size_t object_index(oop obj) {
 140   const uintptr_t addr = ZOop::to_address(obj);
 141   const uintptr_t offset = ZAddress::offset(addr);
 142   const uintptr_t mask = ZGranuleSize - 1;
 143   return (offset & mask) >> ZObjectAlignmentSmallShift;
 144 }
 145 
 146 ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {


 154   return map;
 155 }
 156 
 157 void ZHeapIterator::push(oop obj) {
 158   if (obj == NULL) {
 159     // Ignore
 160     return;
 161   }
 162 
 163   ZHeapIteratorBitMap* const map = object_map(obj);
 164   const size_t index = object_index(obj);
 165   if (!map->try_set_bit(index)) {
 166     // Already pushed
 167     return;
 168   }
 169 
 170   // Push
 171   _visit_stack.push(obj);
 172 }
 173 
 174 template <typename RootsIterator, bool Concurrent, bool Weak>
 175 void ZHeapIterator::push_roots() {
 176   ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(this);
 177   RootsIterator roots;
 178   roots.oops_do(&cl);
 179 }
 180 
 181 template <bool VisitReferents>
 182 void ZHeapIterator::push_fields(oop obj) {
 183   ZHeapIteratorOopClosure<VisitReferents> cl(this, obj);
 184   obj->oop_iterate(&cl);
 185 }
 186 
 187 template <bool VisitReferents>
 188 void ZHeapIterator::objects_do(ObjectClosure* cl) {








 189   ZStatTimerDisable disable;







 190 
 191   // Push roots to visit
 192   push_roots<ZRootsIterator,               false /* Concurrent */, false /* Weak */>();
 193   push_roots<ZConcurrentRootsIterator,     true  /* Concurrent */, false /* Weak */>();
 194   push_roots<ZWeakRootsIterator,           false /* Concurrent */, true  /* Weak */>();
 195   push_roots<ZConcurrentWeakRootsIterator, true  /* Concurrent */, true  /* Weak */>();










 196 
 197   // Drain stack
 198   while (!_visit_stack.is_empty()) {
 199     const oop obj = _visit_stack.pop();
 200 
 201     // Visit object
 202     cl->do_object(obj);
 203 
 204     // Push fields to visit
 205     push_fields<VisitReferents>(obj);
 206   }
 207 }
 208 
 209 void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_referents) {
 210   if (visit_referents) {
 211     objects_do<true /* VisitReferents */>(cl);
 212   } else {
 213     objects_do<false /* VisitReferents */>(cl);
 214   }
 215 }


  34 #include "utilities/stack.inline.hpp"
  35 
  36 class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
  37 private:
  38   CHeapBitMap _map;
  39 
  40 public:
  41   ZHeapIteratorBitMap(size_t size_in_bits) :
  42       _map(size_in_bits) {}
  43 
  44   bool try_set_bit(size_t index) {
  45     if (_map.at(index)) {
  46       return false;
  47     }
  48 
  49     _map.set_bit(index);
  50     return true;
  51   }
  52 };
  53 

  54 class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
  55 private:
  56   ZHeapIterator* const _iter;
  57 












  58 public:
  59   ZHeapIteratorRootOopClosure(ZHeapIterator* iter) :
  60       _iter(iter) {}
  61 
  62   virtual void do_oop(oop* p) {
  63     // Load barrier needed here, even on non-concurrent strong roots,
  64     // for the same reason we need fixup_partial_loads() in ZHeap::mark_end().
  65     const oop obj = NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
  66     _iter->push(obj);
  67   }
  68 
  69   virtual void do_oop(narrowOop* p) {
  70     ShouldNotReachHere();
  71   }
  72 };
  73 

  74 class ZHeapIteratorOopClosure : public BasicOopIterateClosure {
  75 private:
  76   ZHeapIterator* const _iter;
  77   const oop            _base;
  78   const bool           _visit_referents;
  79 
  80   oop load_oop(oop* p) const {
  81     if (_visit_referents) {
  82       return HeapAccess<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>::oop_load_at(_base, _base->field_offset(p));
  83     } else {
  84       return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p);
  85     }


  86   }
  87 
  88 public:
  89   ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base, bool visit_referents) :
  90       _iter(iter),
  91       _base(base),
  92       _visit_referents(visit_referents) {}
  93 
  94   virtual ReferenceIterationMode reference_iteration_mode() {
  95     return _visit_referents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
  96   }
  97 
  98   virtual void do_oop(oop* p) {
  99     const oop obj = load_oop(p);
 100     _iter->push(obj);
 101   }
 102 
 103   virtual void do_oop(narrowOop* p) {
 104     ShouldNotReachHere();
 105   }
 106 
 107 #ifdef ASSERT
 108   virtual bool should_verify_oops() {
 109     return false;
 110   }
 111 #endif
 112 };
 113 
 114 ZHeapIterator::ZHeapIterator(bool visit_referents) :
 115     _visit_stack(),
 116     _visit_map(),
 117     _visit_referents(visit_referents) {}
 118 
 119 ZHeapIterator::~ZHeapIterator() {
 120   ZVisitMapIterator iter(&_visit_map);
 121   for (ZHeapIteratorBitMap* map; iter.next(&map);) {
 122     delete map;
 123   }
 124 }
 125 
 126 static size_t object_index_max() {
 127   return ZGranuleSize >> ZObjectAlignmentSmallShift;
 128 }
 129 
 130 static size_t object_index(oop obj) {
 131   const uintptr_t addr = ZOop::to_address(obj);
 132   const uintptr_t offset = ZAddress::offset(addr);
 133   const uintptr_t mask = ZGranuleSize - 1;
 134   return (offset & mask) >> ZObjectAlignmentSmallShift;
 135 }
 136 
 137 ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {


 145   return map;
 146 }
 147 
 148 void ZHeapIterator::push(oop obj) {
 149   if (obj == NULL) {
 150     // Ignore
 151     return;
 152   }
 153 
 154   ZHeapIteratorBitMap* const map = object_map(obj);
 155   const size_t index = object_index(obj);
 156   if (!map->try_set_bit(index)) {
 157     // Already pushed
 158     return;
 159   }
 160 
 161   // Push
 162   _visit_stack.push(obj);
 163 }
 164 














 165 void ZHeapIterator::objects_do(ObjectClosure* cl) {
 166   // Note that the heap iterator visits all reachable objects, including
 167   // objects that might be unreachable from the application, such as a
 168   // not yet cleared JNIWeakGloablRef. However, also note that visiting
 169   // the JVMTI tag map is a requirement to make sure we visit all tagged
 170   // objects, even those that might now have become phantom reachable.
 171   // If we didn't do this the application would have expected to see
 172   // ObjectFree events for phantom reachable objects in the tag map.
 173 
 174   ZStatTimerDisable disable;
 175   ZHeapIteratorRootOopClosure root_cl(this);
 176 
 177   // Push strong roots onto stack
 178   {
 179     ZRootsIterator roots;
 180     roots.oops_do(&root_cl);
 181   }
 182 
 183   {
 184     ZConcurrentRootsIterator roots;
 185     roots.oops_do(&root_cl);
 186   }
 187 
 188   // Push weak roots onto stack
 189   {
 190     ZWeakRootsIterator roots;
 191     roots.oops_do(&root_cl);
 192   }
 193 
 194   {
 195     ZConcurrentWeakRootsIterator roots;
 196     roots.oops_do(&root_cl);
 197   }
 198 
 199   // Drain stack
 200   while (!_visit_stack.is_empty()) {
 201     const oop obj = _visit_stack.pop();
 202 
 203     // Visit
 204     cl->do_object(obj);
 205 
 206     // Push members to visit
 207     ZHeapIteratorOopClosure push_cl(this, obj, _visit_referents);
 208     obj->oop_iterate(&push_cl);







 209   }
 210 }
< prev index next >