1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_MEMORY_ITERATOR_HPP
  26 #define SHARE_MEMORY_ITERATOR_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "oops/oopsHierarchy.hpp"
  31 
  32 class CodeBlob;
  33 class nmethod;
  34 class ReferenceDiscoverer;
  35 class DataLayout;
  36 class KlassClosure;
  37 class ClassLoaderData;
  38 class Symbol;
  39 class Metadata;
  40 
  41 // The following classes are C++ `closures` for iterating over objects, roots and spaces
  42 
  43 class Closure : public StackObj { };
  44 
  45 // OopClosure is used for iterating through references to Java objects.
  46 class OopClosure : public Closure {
  47  public:
  48   virtual void do_oop(oop* o) = 0;
  49   virtual void do_oop(narrowOop* o) = 0;
  50   virtual void do_oop_no_buffering(oop* o) { do_oop(o); }
  51   virtual void do_oop_no_buffering(narrowOop* o) { do_oop(o); }
  52 };
  53 
  54 class DoNothingClosure : public OopClosure {
  55  public:
  56   virtual void do_oop(oop* p)       {}
  57   virtual void do_oop(narrowOop* p) {}
  58 };
  59 extern DoNothingClosure do_nothing_cl;
  60 
  61 // OopIterateClosure adds extra code to be run during oop iterations.
  62 // This is needed by the GC and is extracted to a separate type to not
  63 // pollute the OopClosure interface.
  64 class OopIterateClosure : public OopClosure {
  65  private:
  66   ReferenceDiscoverer* _ref_discoverer;
  67 
  68  protected:
  69   OopIterateClosure(ReferenceDiscoverer* rd) : _ref_discoverer(rd) { }
  70   OopIterateClosure() : _ref_discoverer(NULL) { }
  71   ~OopIterateClosure() { }
  72 
  73   void set_ref_discoverer_internal(ReferenceDiscoverer* rd) { _ref_discoverer = rd; }
  74 
  75  public:
  76   ReferenceDiscoverer* ref_discoverer() const { return _ref_discoverer; }
  77 
  78   // Iteration of InstanceRefKlasses differ depending on the closure,
  79   // the below enum describes the different alternatives.
  80   enum ReferenceIterationMode {
  81     DO_DISCOVERY,                // Apply closure and discover references
  82     DO_DISCOVERED_AND_DISCOVERY, // Apply closure to discovered field and do discovery
  83     DO_FIELDS,                   // Apply closure to all fields
  84     DO_FIELDS_EXCEPT_REFERENT    // Apply closure to all fields except the referent field
  85   };
  86 
  87   // The default iteration mode is to do discovery.
  88   virtual ReferenceIterationMode reference_iteration_mode() { return DO_DISCOVERY; }
  89 
  90   // If the do_metadata functions return "true",
  91   // we invoke the following when running oop_iterate():
  92   //
  93   // 1) do_klass on the header klass pointer.
  94   // 2) do_klass on the klass pointer in the mirrors.
  95   // 3) do_cld   on the class loader data in class loaders.
  96 
  97   virtual bool do_metadata() = 0;
  98   virtual void do_klass(Klass* k) = 0;
  99   virtual void do_cld(ClassLoaderData* cld) = 0;
 100 
 101 #ifdef ASSERT
 102   // Default verification of each visited oop field.
 103   template <typename T> void verify(T* p);
 104 
 105   // Can be used by subclasses to turn off the default verification of oop fields.
 106   virtual bool should_verify_oops() { return true; }
 107 #endif
 108 };
 109 
 110 // An OopIterateClosure that can be used when there's no need to visit the Metadata.
 111 class BasicOopIterateClosure : public OopIterateClosure {
 112 public:
 113   BasicOopIterateClosure(ReferenceDiscoverer* rd = NULL) : OopIterateClosure(rd) {}
 114 
 115   virtual bool do_metadata() { return false; }
 116   virtual void do_klass(Klass* k) { ShouldNotReachHere(); }
 117   virtual void do_cld(ClassLoaderData* cld) { ShouldNotReachHere(); }
 118 };
 119 
 120 class BufferedValueClosure : public Closure {
 121 public:
 122   virtual void do_buffered_value(oop* p) = 0;
 123 };
 124 
 125 class KlassClosure : public Closure {
 126  public:
 127   virtual void do_klass(Klass* k) = 0;
 128 };
 129 
 130 class CLDClosure : public Closure {
 131  public:
 132   virtual void do_cld(ClassLoaderData* cld) = 0;
 133 };
 134 
 135 class MetadataClosure : public Closure {
 136  public:
 137   virtual void do_metadata(Metadata* md) = 0;
 138 };
 139 
 140 
 141 class CLDToOopClosure : public CLDClosure {
 142   OopClosure*       _oop_closure;
 143   int               _cld_claim;
 144 
 145  public:
 146   CLDToOopClosure(OopClosure* oop_closure,
 147                   int cld_claim) :
 148       _oop_closure(oop_closure),
 149       _cld_claim(cld_claim) {}
 150 
 151   void do_cld(ClassLoaderData* cld);
 152 };
 153 
 154 // The base class for all concurrent marking closures,
 155 // that participates in class unloading.
 156 // It's used to proxy through the metadata to the oops defined in them.
 157 class MetadataVisitingOopIterateClosure: public OopIterateClosure {
 158  public:
 159   MetadataVisitingOopIterateClosure(ReferenceDiscoverer* rd = NULL) : OopIterateClosure(rd) { }
 160 
 161   virtual bool do_metadata() { return true; }
 162   virtual void do_klass(Klass* k);
 163   virtual void do_cld(ClassLoaderData* cld);
 164 };
 165 
 166 // ObjectClosure is used for iterating through an object space
 167 
 168 class ObjectClosure : public Closure {
 169  public:
 170   // Called for each object.
 171   virtual void do_object(oop obj) = 0;
 172 };
 173 
 174 
 175 class BoolObjectClosure : public Closure {
 176  public:
 177   virtual bool do_object_b(oop obj) = 0;
 178 };
 179 
 180 class AlwaysTrueClosure: public BoolObjectClosure {
 181  public:
 182   bool do_object_b(oop p) { return true; }
 183 };
 184 
 185 class AlwaysFalseClosure : public BoolObjectClosure {
 186  public:
 187   bool do_object_b(oop p) { return false; }
 188 };
 189 
 190 // Applies an oop closure to all ref fields in objects iterated over in an
 191 // object iteration.
 192 class ObjectToOopClosure: public ObjectClosure {
 193   OopIterateClosure* _cl;
 194 public:
 195   void do_object(oop obj);
 196   ObjectToOopClosure(OopIterateClosure* cl) : _cl(cl) {}
 197 };
 198 
 199 // A version of ObjectClosure that is expected to be robust
 200 // in the face of possibly uninitialized objects.
 201 class ObjectClosureCareful : public ObjectClosure {
 202  public:
 203   virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
 204   virtual size_t do_object_careful(oop p) = 0;
 205 };
 206 
 207 // The following are used in CompactibleFreeListSpace and
 208 // ConcurrentMarkSweepGeneration.
 209 
 210 // Blk closure (abstract class)
 211 class BlkClosure : public StackObj {
 212  public:
 213   virtual size_t do_blk(HeapWord* addr) = 0;
 214 };
 215 
 216 // A version of BlkClosure that is expected to be robust
 217 // in the face of possibly uninitialized objects.
 218 class BlkClosureCareful : public BlkClosure {
 219  public:
 220   size_t do_blk(HeapWord* addr) {
 221     guarantee(false, "call do_blk_careful instead");
 222     return 0;
 223   }
 224   virtual size_t do_blk_careful(HeapWord* addr) = 0;
 225 };
 226 
 227 // SpaceClosure is used for iterating over spaces
 228 
 229 class Space;
 230 class CompactibleSpace;
 231 
 232 class SpaceClosure : public StackObj {
 233  public:
 234   // Called for each space
 235   virtual void do_space(Space* s) = 0;
 236 };
 237 
 238 class CompactibleSpaceClosure : public StackObj {
 239  public:
 240   // Called for each compactible space
 241   virtual void do_space(CompactibleSpace* s) = 0;
 242 };
 243 
 244 
 245 // CodeBlobClosure is used for iterating through code blobs
 246 // in the code cache or on thread stacks
 247 
 248 class CodeBlobClosure : public Closure {
 249  public:
 250   // Called for each code blob.
 251   virtual void do_code_blob(CodeBlob* cb) = 0;
 252 };
 253 
 254 // Applies an oop closure to all ref fields in code blobs
 255 // iterated over in an object iteration.
 256 class CodeBlobToOopClosure : public CodeBlobClosure {
 257   OopClosure* _cl;
 258   bool _fix_relocations;
 259  protected:
 260   void do_nmethod(nmethod* nm);
 261  public:
 262   // If fix_relocations(), then cl must copy objects to their new location immediately to avoid
 263   // patching nmethods with the old locations.
 264   CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
 265   virtual void do_code_blob(CodeBlob* cb);
 266 
 267   bool fix_relocations() const { return _fix_relocations; }
 268   const static bool FixRelocations = true;
 269 };
 270 
 271 class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
 272  public:
 273   MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {}
 274   // Called for each code blob, but at most once per unique blob.
 275 
 276   virtual void do_code_blob(CodeBlob* cb);
 277 };
 278 
 279 class NMethodClosure : public Closure {
 280  public:
 281   virtual void do_nmethod(nmethod* n) = 0;
 282 };
 283 
 284 // MonitorClosure is used for iterating over monitors in the monitors cache
 285 
 286 class ObjectMonitor;
 287 
 288 class MonitorClosure : public StackObj {
 289  public:
 290   // called for each monitor in cache
 291   virtual void do_monitor(ObjectMonitor* m) = 0;
 292 };
 293 
 294 // A closure that is applied without any arguments.
 295 class VoidClosure : public StackObj {
 296  public:
 297   // I would have liked to declare this a pure virtual, but that breaks
 298   // in mysterious ways, for unknown reasons.
 299   virtual void do_void();
 300 };
 301 
 302 
 303 // YieldClosure is intended for use by iteration loops
 304 // to incrementalize their work, allowing interleaving
 305 // of an interruptable task so as to allow other
 306 // threads to run (which may not otherwise be able to access
 307 // exclusive resources, for instance). Additionally, the
 308 // closure also allows for aborting an ongoing iteration
 309 // by means of checking the return value from the polling
 310 // call.
 311 class YieldClosure : public StackObj {
 312 public:
 313  virtual bool should_return() = 0;
 314 
 315  // Yield on a fine-grain level. The check in case of not yielding should be very fast.
 316  virtual bool should_return_fine_grain() { return false; }
 317 };
 318 
 319 // Abstract closure for serializing data (read or write).
 320 
 321 class SerializeClosure : public Closure {
 322 public:
 323   // Return bool indicating whether closure implements read or write.
 324   virtual bool reading() const = 0;
 325 
 326   // Read/write the void pointer pointed to by p.
 327   virtual void do_ptr(void** p) = 0;
 328 
 329   // Read/write the 32-bit unsigned integer pointed to by p.
 330   virtual void do_u4(u4* p) = 0;
 331 
 332   // Read/write the bool pointed to by p.
 333   virtual void do_bool(bool* p) = 0;
 334 
 335   // Read/write the region specified.
 336   virtual void do_region(u_char* start, size_t size) = 0;
 337 
 338   // Check/write the tag.  If reading, then compare the tag against
 339   // the passed in value and fail is they don't match.  This allows
 340   // for verification that sections of the serialized data are of the
 341   // correct length.
 342   virtual void do_tag(int tag) = 0;
 343 
 344   // Read/write the oop
 345   virtual void do_oop(oop* o) = 0;
 346 
 347   bool writing() {
 348     return !reading();
 349   }
 350 };
 351 
 352 class SymbolClosure : public StackObj {
 353  public:
 354   virtual void do_symbol(Symbol**) = 0;
 355 
 356   // Clear LSB in symbol address; it can be set by CPSlot.
 357   static Symbol* load_symbol(Symbol** p) {
 358     return (Symbol*)(intptr_t(*p) & ~1);
 359   }
 360 
 361   // Store symbol, adjusting new pointer if the original pointer was adjusted
 362   // (symbol references in constant pool slots have their LSB set to 1).
 363   static void store_symbol(Symbol** p, Symbol* sym) {
 364     *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
 365   }
 366 };
 367 
 368 // Dispatches to the non-virtual functions if OopClosureType has
 369 // a concrete implementation, otherwise a virtual call is taken.
 370 class Devirtualizer {
 371  public:
 372   template <typename OopClosureType, typename T> static void do_oop_no_verify(OopClosureType* closure, T* p);
 373   template <typename OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
 374   template <typename OopClosureType>             static void do_klass(OopClosureType* closure, Klass* k);
 375   template <typename OopClosureType>             static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
 376   template <typename OopClosureType>             static bool do_metadata(OopClosureType* closure);
 377 };
 378 
 379 class OopIteratorClosureDispatch {
 380  public:
 381   template <typename OopClosureType> static void oop_oop_iterate(OopClosureType* cl, oop obj, Klass* klass);
 382   template <typename OopClosureType> static void oop_oop_iterate(OopClosureType* cl, oop obj, Klass* klass, MemRegion mr);
 383   template <typename OopClosureType> static void oop_oop_iterate_backwards(OopClosureType* cl, oop obj, Klass* klass);
 384 };
 385 
 386 #endif // SHARE_MEMORY_ITERATOR_HPP