< prev index next >

src/hotspot/share/gc/shared/generation.cpp

Print this page




  51                     "object heap");
  52   }
  53   // Mangle all of the the initial generation.
  54   if (ZapUnusedHeapArea) {
  55     MemRegion mangle_region((HeapWord*)_virtual_space.low(),
  56       (HeapWord*)_virtual_space.high());
  57     SpaceMangler::mangle_region(mangle_region);
  58   }
  59   _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
  60           (HeapWord*)_virtual_space.high_boundary());
  61 }
  62 
  63 size_t Generation::initial_size() {
  64   GenCollectedHeap* gch = GenCollectedHeap::heap();
  65   if (gch->is_young_gen(this)) {
  66     return gch->young_gen_spec()->init_size();
  67   }
  68   return gch->old_gen_spec()->init_size();
  69 }
  70 
  71 // This is for CMS. It returns stable monotonic used space size.
  72 // Remove this when CMS is removed.
  73 size_t Generation::used_stable() const {
  74   return used();
  75 }
  76 
  77 size_t Generation::max_capacity() const {
  78   return reserved().byte_size();
  79 }
  80 
  81 // By default we get a single threaded default reference processor;
  82 // generations needing multi-threaded refs processing or discovery override this method.
  83 void Generation::ref_processor_init() {
  84   assert(_ref_processor == NULL, "a reference processor already exists");
  85   assert(!_reserved.is_empty(), "empty generation?");
  86   _span_based_discoverer.set_span(_reserved);
  87   _ref_processor = new ReferenceProcessor(&_span_based_discoverer);    // a vanilla reference processor
  88   if (_ref_processor == NULL) {
  89     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
  90   }
  91 }
  92 
  93 void Generation::print() const { print_on(tty); }
  94 
  95 void Generation::print_on(outputStream* st)  const {
  96   st->print(" %-20s", name());


 167 oop Generation::promote(oop obj, size_t obj_size) {
 168   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 169 
 170 #ifndef PRODUCT
 171   if (GenCollectedHeap::heap()->promotion_should_fail()) {
 172     return NULL;
 173   }
 174 #endif  // #ifndef PRODUCT
 175 
 176   HeapWord* result = allocate(obj_size, false);
 177   if (result != NULL) {
 178     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
 179     return oop(result);
 180   } else {
 181     GenCollectedHeap* gch = GenCollectedHeap::heap();
 182     return gch->handle_failed_promotion(this, obj, obj_size);
 183   }
 184 }
 185 
 186 oop Generation::par_promote(int thread_num,
 187                             oop obj, markWord m, size_t word_sz) {
 188   // Could do a bad general impl here that gets a lock.  But no.
 189   ShouldNotCallThis();
 190   return NULL;
 191 }
 192 
 193 Space* Generation::space_containing(const void* p) const {
 194   GenerationIsInReservedClosure blk(p);
 195   // Cast away const
 196   ((Generation*)this)->space_iterate(&blk);
 197   return blk.sp;
 198 }
 199 
 200 // Some of these are mediocre general implementations.  Should be
 201 // overridden to get better performance.
 202 
 203 class GenerationBlockStartClosure : public SpaceClosure {
 204  public:
 205   const void* _p;
 206   HeapWord* _start;
 207   virtual void do_space(Space* s) {




  51                     "object heap");
  52   }
  53   // Mangle all of the the initial generation.
  54   if (ZapUnusedHeapArea) {
  55     MemRegion mangle_region((HeapWord*)_virtual_space.low(),
  56       (HeapWord*)_virtual_space.high());
  57     SpaceMangler::mangle_region(mangle_region);
  58   }
  59   _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
  60           (HeapWord*)_virtual_space.high_boundary());
  61 }
  62 
  63 size_t Generation::initial_size() {
  64   GenCollectedHeap* gch = GenCollectedHeap::heap();
  65   if (gch->is_young_gen(this)) {
  66     return gch->young_gen_spec()->init_size();
  67   }
  68   return gch->old_gen_spec()->init_size();
  69 }
  70 






  71 size_t Generation::max_capacity() const {
  72   return reserved().byte_size();
  73 }
  74 
  75 // By default we get a single threaded default reference processor;
  76 // generations needing multi-threaded refs processing or discovery override this method.
  77 void Generation::ref_processor_init() {
  78   assert(_ref_processor == NULL, "a reference processor already exists");
  79   assert(!_reserved.is_empty(), "empty generation?");
  80   _span_based_discoverer.set_span(_reserved);
  81   _ref_processor = new ReferenceProcessor(&_span_based_discoverer);    // a vanilla reference processor
  82   if (_ref_processor == NULL) {
  83     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
  84   }
  85 }
  86 
  87 void Generation::print() const { print_on(tty); }
  88 
  89 void Generation::print_on(outputStream* st)  const {
  90   st->print(" %-20s", name());


 161 oop Generation::promote(oop obj, size_t obj_size) {
 162   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 163 
 164 #ifndef PRODUCT
 165   if (GenCollectedHeap::heap()->promotion_should_fail()) {
 166     return NULL;
 167   }
 168 #endif  // #ifndef PRODUCT
 169 
 170   HeapWord* result = allocate(obj_size, false);
 171   if (result != NULL) {
 172     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
 173     return oop(result);
 174   } else {
 175     GenCollectedHeap* gch = GenCollectedHeap::heap();
 176     return gch->handle_failed_promotion(this, obj, obj_size);
 177   }
 178 }
 179 
 180 oop Generation::par_promote(int thread_num,
 181                             oop obj, markOop m, size_t word_sz) {
 182   // Could do a bad general impl here that gets a lock.  But no.
 183   ShouldNotCallThis();
 184   return NULL;
 185 }
 186 
 187 Space* Generation::space_containing(const void* p) const {
 188   GenerationIsInReservedClosure blk(p);
 189   // Cast away const
 190   ((Generation*)this)->space_iterate(&blk);
 191   return blk.sp;
 192 }
 193 
 194 // Some of these are mediocre general implementations.  Should be
 195 // overridden to get better performance.
 196 
 197 class GenerationBlockStartClosure : public SpaceClosure {
 198  public:
 199   const void* _p;
 200   HeapWord* _start;
 201   virtual void do_space(Space* s) {


< prev index next >