1 /*
   2  * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zBarrier.inline.hpp"
  26 #include "gc/z/zGlobals.hpp"
  27 #include "gc/z/zGranuleMap.inline.hpp"
  28 #include "gc/z/zHeapIterator.hpp"
  29 #include "gc/z/zOop.inline.hpp"
  30 #include "gc/z/zRootsIterator.hpp"
  31 #include "gc/z/zStat.hpp"
  32 #include "memory/iterator.inline.hpp"
  33 #include "utilities/bitMap.inline.hpp"
  34 #include "utilities/stack.inline.hpp"
  35 
  36 class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
  37 private:
  38   CHeapBitMap _map;
  39 
  40 public:
  41   ZHeapIteratorBitMap(size_t size_in_bits) :
  42       _map(size_in_bits) {}
  43 
  44   bool try_set_bit(size_t index) {
  45     if (_map.at(index)) {
  46       return false;
  47     }
  48 
  49     _map.set_bit(index);
  50     return true;
  51   }
  52 };
  53 
  54 template <bool Concurrent, bool Weak>
  55 class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
  56 private:
  57   ZHeapIterator* const _iter;
  58 
  59   oop load_oop(oop* p) {
  60     if (Weak) {
  61       return NativeAccess<AS_NO_KEEPALIVE | ON_PHANTOM_OOP_REF>::oop_load(p);
  62     }
  63 
  64     if (Concurrent) {
  65       return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
  66     }
  67 
  68     return RawAccess<>::oop_load(p);
  69   }
  70 
  71 public:
  72   ZHeapIteratorRootOopClosure(ZHeapIterator* iter) :
  73       _iter(iter) {}
  74 
  75   virtual void do_oop(oop* p) {
  76     const oop obj = load_oop(p);
  77     _iter->push(obj);
  78   }
  79 
  80   virtual void do_oop(narrowOop* p) {
  81     ShouldNotReachHere();
  82   }
  83 };
  84 
  85 template <bool VisitReferents>
  86 class ZHeapIteratorOopClosure : public BasicOopIterateClosure {
  87 private:
  88   ZHeapIterator* const _iter;
  89   const oop            _base;
  90 
  91   oop load_oop(oop* p) {
  92     if (VisitReferents) {
  93       return HeapAccess<AS_NO_KEEPALIVE | ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));
  94     }
  95 
  96     return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p);
  97   }
  98 
  99 public:
 100   ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base) :
 101       _iter(iter),
 102       _base(base) {}
 103 
 104   virtual ReferenceIterationMode reference_iteration_mode() {
 105     return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
 106   }
 107 
 108   virtual void do_oop(oop* p) {
 109     const oop obj = load_oop(p);
 110     _iter->push(obj);
 111   }
 112 
 113   virtual void do_oop(narrowOop* p) {
 114     ShouldNotReachHere();
 115   }
 116 
 117 #ifdef ASSERT
 118   virtual bool should_verify_oops() {
 119     return false;
 120   }
 121 #endif
 122 };
 123 
 124 ZHeapIterator::ZHeapIterator() :
 125     _visit_stack(),
 126     _visit_map() {}
 127 
 128 ZHeapIterator::~ZHeapIterator() {
 129   ZVisitMapIterator iter(&_visit_map);
 130   for (ZHeapIteratorBitMap* map; iter.next(&map);) {
 131     delete map;
 132   }
 133 }
 134 
 135 static size_t object_index_max() {
 136   return ZGranuleSize >> ZObjectAlignmentSmallShift;
 137 }
 138 
 139 static size_t object_index(oop obj) {
 140   const uintptr_t addr = ZOop::to_address(obj);
 141   const uintptr_t offset = ZAddress::offset(addr);
 142   const uintptr_t mask = ZGranuleSize - 1;
 143   return (offset & mask) >> ZObjectAlignmentSmallShift;
 144 }
 145 
 146 ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {
 147   const uintptr_t addr = ZOop::to_address(obj);
 148   ZHeapIteratorBitMap* map = _visit_map.get(addr);
 149   if (map == NULL) {
 150     map = new ZHeapIteratorBitMap(object_index_max());
 151     _visit_map.put(addr, map);
 152   }
 153 
 154   return map;
 155 }
 156 
 157 void ZHeapIterator::push(oop obj) {
 158   if (obj == NULL) {
 159     // Ignore
 160     return;
 161   }
 162 
 163   ZHeapIteratorBitMap* const map = object_map(obj);
 164   const size_t index = object_index(obj);
 165   if (!map->try_set_bit(index)) {
 166     // Already pushed
 167     return;
 168   }
 169 
 170   // Push
 171   _visit_stack.push(obj);
 172 }
 173 
 174 template <typename RootsIterator, bool Concurrent, bool Weak>
 175 void ZHeapIterator::push_roots() {
 176   ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(this);
 177   RootsIterator roots;
 178   roots.oops_do(&cl);
 179 }
 180 
 181 template <bool VisitReferents>
 182 void ZHeapIterator::push_fields(oop obj) {
 183   ZHeapIteratorOopClosure<VisitReferents> cl(this, obj);
 184   obj->oop_iterate(&cl);
 185 }
 186 
 187 template <bool VisitReferents>
 188 void ZHeapIterator::objects_do(ObjectClosure* cl) {
 189   ZStatTimerDisable disable;
 190 
 191   // Push roots to visit
 192   push_roots<ZRootsIterator,               false /* Concurrent */, false /* Weak */>();
 193   push_roots<ZConcurrentRootsIterator,     true  /* Concurrent */, false /* Weak */>();
 194   push_roots<ZWeakRootsIterator,           false /* Concurrent */, true  /* Weak */>();
 195   push_roots<ZConcurrentWeakRootsIterator, true  /* Concurrent */, true  /* Weak */>();
 196 
 197   // Drain stack
 198   while (!_visit_stack.is_empty()) {
 199     const oop obj = _visit_stack.pop();
 200 
 201     // Visit object
 202     cl->do_object(obj);
 203 
 204     // Push fields to visit
 205     push_fields<VisitReferents>(obj);
 206   }
 207 }
 208 
 209 void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_referents) {
 210   if (visit_referents) {
 211     objects_do<true /* VisitReferents */>(cl);
 212   } else {
 213     objects_do<false /* VisitReferents */>(cl);
 214   }
 215 }