< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page

        

@@ -690,14 +690,10 @@
 size_t
 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 }
 
-size_t ConcurrentMarkSweepGeneration::used_stable() const {
-  return cmsSpace()->used_stable();
-}
-
 size_t ConcurrentMarkSweepGeneration::max_available() const {
   return free() + _virtual_space.uncommitted_size();
 }
 
 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {

@@ -1012,11 +1008,11 @@
 // size of an object that may not yet have been initialized.
 
 // Things to support parallel young-gen collection.
 oop
 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
-                                           oop old, markWord m,
+                                           oop old, markOop m,
                                            size_t word_sz) {
 #ifndef PRODUCT
   if (CMSHeap::heap()->promotion_should_fail()) {
     return NULL;
   }

@@ -1525,12 +1521,10 @@
 void CMSCollector::compute_new_size() {
   assert_locked_or_safepoint(Heap_lock);
   FreelistLocker z(this);
   MetaspaceGC::compute_new_size();
   _cmsGen->compute_new_size_free_list();
-  // recalculate CMS used space after CMS collection
-  _cmsGen->cmsSpace()->recalculate_used_stable();
 }
 
 // A work method used by the foreground collector to do
 // a mark-sweep-compact.
 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {

@@ -2055,11 +2049,10 @@
 
 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
 
   _capacity_at_prologue = capacity();
   _used_at_prologue = used();
-  _cmsSpace->recalculate_used_stable();
 
   // We enable promotion tracking so that card-scanning can recognize
   // which objects have been promoted during this GC and skip them.
   for (uint i = 0; i < ParallelGCThreads; i++) {
     _par_gc_thread_states[i]->promo.startTrackingPromotions();

@@ -2128,11 +2121,10 @@
   }
   // reset _eden_chunk_array so sampling starts afresh
   _eden_chunk_index = 0;
 
   size_t cms_used   = _cmsGen->cmsSpace()->used();
-  _cmsGen->cmsSpace()->recalculate_used_stable();
 
   // update performance counters - this uses a special version of
   // update_counters() that allows the utilization to be passed as a
   // parameter, avoiding multiple calls to used().
   //

@@ -2822,12 +2814,10 @@
     checkpointRootsInitialWork();
     // enable ("weak") refs discovery
     rp->enable_discovery();
     _collectorState = Marking;
   }
-
-  _cmsGen->cmsSpace()->recalculate_used_stable();
 }
 
 void CMSCollector::checkpointRootsInitialWork() {
   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
   assert(_collectorState == InitialMarking, "just checking");

@@ -4185,11 +4175,10 @@
     }
     FreelistLocker x(this);
     MutexLocker y(bitMapLock(),
                   Mutex::_no_safepoint_check_flag);
     checkpointRootsFinalWork();
-    _cmsGen->cmsSpace()->recalculate_used_stable();
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
 }
 

@@ -4259,10 +4248,11 @@
   verify_overflow_empty();
 
   if (should_unload_classes()) {
     heap->prune_scavengable_nmethods();
   }
+  JvmtiExport::gc_epilogue();
 
   // If we encountered any (marking stack / work queue) overflow
   // events during the current CMS cycle, take appropriate
   // remedial measures, where possible, so as to try and avoid
   // recurrence of that condition.

@@ -5345,18 +5335,13 @@
     // We need all the free list locks to make the abstract state
     // transition from Sweeping to Resetting. See detailed note
     // further below.
     {
       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
-
       // Update heap occupancy information which is used as
       // input to soft ref clearing policy at the next gc.
       Universe::update_heap_info_at_gc();
-
-      // recalculate CMS used space after CMS collection
-      _cmsGen->cmsSpace()->recalculate_used_stable();
-
       _collectorState = Resizing;
     }
   }
   verify_work_stacks_empty();
   verify_overflow_empty();

@@ -5441,11 +5426,10 @@
   bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
   if (!full && current_is_young) {
     // Gather statistics on the young generation collection.
     collector()->stats().record_gc0_end(used());
   }
-  _cmsSpace->recalculate_used_stable();
 }
 
 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
   // We iterate over the space(s) underlying this generation,
   // checking the mark bit map to see if the bits corresponding

@@ -6984,11 +6968,11 @@
                        !CMSCollector::foregroundGCIsActive(); ++i) {
     os::sleep(Thread::current(), 1, false);
   }
 
   ConcurrentMarkSweepThread::synchronize(true);
-  bml->lock_without_safepoint_check();
+  bml->lock();
 
   _collector->startTimer();
 }
 
 bool CMSPrecleanRefsYieldClosure::should_return() {

@@ -7791,14 +7775,14 @@
 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
   assert(stack->isEmpty(), "Expected precondition");
   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
   size_t i = num;
   oop  cur = _overflow_list;
-  const markWord proto = markWord::prototype();
+  const markOop proto = markOopDesc::prototype();
   NOT_PRODUCT(ssize_t n = 0;)
   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
-    next = oop(cur->mark_raw().to_pointer());
+    next = oop(cur->mark_raw());
     cur->set_mark_raw(proto);   // until proven otherwise
     assert(oopDesc::is_oop(cur), "Should be an oop");
     bool res = stack->push(cur);
     assert(res, "Bit off more than can chew?");
     NOT_PRODUCT(n++;)

@@ -7878,24 +7862,24 @@
   }
   assert(prefix != NULL && prefix != BUSY, "Error");
   size_t i = num;
   oop cur = prefix;
   // Walk down the first "num" objects, unless we reach the end.
-  for (; i > 1 && cur->mark_raw().to_pointer() != NULL; cur = oop(cur->mark_raw().to_pointer()), i--);
-  if (cur->mark_raw().to_pointer() == NULL) {
+  for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--);
+  if (cur->mark_raw() == NULL) {
     // We have "num" or fewer elements in the list, so there
     // is nothing to return to the global list.
     // Write back the NULL in lieu of the BUSY we wrote
     // above, if it is still the same value.
     if (_overflow_list == BUSY) {
       Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
     }
   } else {
     // Chop off the suffix and return it to the global list.
-    assert(cur->mark_raw().to_pointer() != (void*)BUSY, "Error");
-    oop suffix_head = oop(cur->mark_raw().to_pointer()); // suffix will be put back on global list
-    cur->set_mark_raw(markWord::from_pointer(NULL));     // break off suffix
+    assert(cur->mark_raw() != BUSY, "Error");
+    oop suffix_head = cur->mark_raw(); // suffix will be put back on global list
+    cur->set_mark_raw(NULL);           // break off suffix
     // It's possible that the list is still in the empty(busy) state
     // we left it in a short while ago; in that case we may be
     // able to place back the suffix without incurring the cost
     // of a walk down the list.
     oop observed_overflow_list = _overflow_list;

@@ -7911,22 +7895,22 @@
     }
     if (!attached) {
       // Too bad, someone else sneaked in (at least) an element; we'll need
       // to do a splice. Find tail of suffix so we can prepend suffix to global
       // list.
-      for (cur = suffix_head; cur->mark_raw().to_pointer() != NULL; cur = (oop)(cur->mark_raw().to_pointer()));
+      for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw()));
       oop suffix_tail = cur;
-      assert(suffix_tail != NULL && suffix_tail->mark_raw().to_pointer() == NULL,
+      assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL,
              "Tautology");
       observed_overflow_list = _overflow_list;
       do {
         cur_overflow_list = observed_overflow_list;
         if (cur_overflow_list != BUSY) {
           // Do the splice ...
-          suffix_tail->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
+          suffix_tail->set_mark_raw(markOop(cur_overflow_list));
         } else { // cur_overflow_list == BUSY
-          suffix_tail->set_mark_raw(markWord::from_pointer(NULL));
+          suffix_tail->set_mark_raw(NULL);
         }
         // ... and try to place spliced list back on overflow_list ...
         observed_overflow_list =
           Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
       } while (cur_overflow_list != observed_overflow_list);

@@ -7934,15 +7918,15 @@
     }
   }
 
   // Push the prefix elements on work_q
   assert(prefix != NULL, "control point invariant");
-  const markWord proto = markWord::prototype();
+  const markOop proto = markOopDesc::prototype();
   oop next;
   NOT_PRODUCT(ssize_t n = 0;)
   for (cur = prefix; cur != NULL; cur = next) {
-    next = oop(cur->mark_raw().to_pointer());
+    next = oop(cur->mark_raw());
     cur->set_mark_raw(proto);   // until proven otherwise
     assert(oopDesc::is_oop(cur), "Should be an oop");
     bool res = work_q->push(cur);
     assert(res, "Bit off more than we can chew?");
     NOT_PRODUCT(n++;)

@@ -7957,11 +7941,11 @@
 // Single-threaded
 void CMSCollector::push_on_overflow_list(oop p) {
   NOT_PRODUCT(_num_par_pushes++;)
   assert(oopDesc::is_oop(p), "Not an oop");
   preserve_mark_if_necessary(p);
-  p->set_mark_raw(markWord::from_pointer(_overflow_list));
+  p->set_mark_raw((markOop)_overflow_list);
   _overflow_list = p;
 }
 
 // Multi-threaded; use CAS to prepend to overflow list
 void CMSCollector::par_push_on_overflow_list(oop p) {

@@ -7971,13 +7955,13 @@
   oop observed_overflow_list = _overflow_list;
   oop cur_overflow_list;
   do {
     cur_overflow_list = observed_overflow_list;
     if (cur_overflow_list != BUSY) {
-      p->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
+      p->set_mark_raw(markOop(cur_overflow_list));
     } else {
-      p->set_mark_raw(markWord::from_pointer(NULL));
+      p->set_mark_raw(NULL);
     }
     observed_overflow_list =
       Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
   } while (cur_overflow_list != observed_overflow_list);
 }

@@ -7995,29 +7979,29 @@
 // an indication of success or failure with the assumption that
 // the caller may be able to recover from a failure; code in
 // the VM can then be changed, incrementally, to deal with such
 // failures where possible, thus, incrementally hardening the VM
 // in such low resource situations.
-void CMSCollector::preserve_mark_work(oop p, markWord m) {
+void CMSCollector::preserve_mark_work(oop p, markOop m) {
   _preserved_oop_stack.push(p);
   _preserved_mark_stack.push(m);
   assert(m == p->mark_raw(), "Mark word changed");
   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
          "bijection");
 }
 
 // Single threaded
 void CMSCollector::preserve_mark_if_necessary(oop p) {
-  markWord m = p->mark_raw();
-  if (p->mark_must_be_preserved(m)) {
+  markOop m = p->mark_raw();
+  if (m->must_be_preserved(p)) {
     preserve_mark_work(p, m);
   }
 }
 
 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
-  markWord m = p->mark_raw();
-  if (p->mark_must_be_preserved(m)) {
+  markOop m = p->mark_raw();
+  if (m->must_be_preserved(p)) {
     MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
     // Even though we read the mark word without holding
     // the lock, we are assured that it will not change
     // because we "own" this oop, so no other thread can
     // be trying to push it on the overflow list; see

@@ -8053,13 +8037,13 @@
 
   while (!_preserved_oop_stack.is_empty()) {
     oop p = _preserved_oop_stack.pop();
     assert(oopDesc::is_oop(p), "Should be an oop");
     assert(_span.contains(p), "oop should be in _span");
-    assert(p->mark_raw() == markWord::prototype(),
+    assert(p->mark_raw() == markOopDesc::prototype(),
            "Set when taken from overflow list");
-    markWord m = _preserved_mark_stack.pop();
+    markOop m = _preserved_mark_stack.pop();
     p->set_mark_raw(m);
   }
   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
          "stacks were cleared above");
 }
< prev index next >