< prev index next >
src/share/vm/gc_implementation/g1/satbQueue.cpp
Print this page
@@ -23,10 +23,11 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/satbQueue.hpp"
+#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/sharedHeap.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
@@ -74,34 +75,33 @@
// The stale reference cases are implicitly handled by the NTAMS
// comparison. Because of the possibility of stale references, buffer
// processing must be somewhat circumspect and not assume entries
// in an unfiltered buffer refer to valid objects.
-inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
- // Includes rejection of NULL pointers.
- assert(heap->is_in_reserved(entry),
- err_msg("Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)));
-
- HeapRegion* region = heap->heap_region_containing_raw(entry);
- assert(region != NULL, err_msg("No region for " PTR_FORMAT, p2i(entry)));
- if (entry >= region->next_top_at_mark_start()) {
- return false;
- }
-
- assert(((oop)entry)->is_oop(true /* ignore mark word */),
- err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry)));
+template <class HeapType>
+inline bool requires_marking(const void* entry, HeapType* heap) {
+ return heap->requires_marking(entry);
+}
- return true;
+void ObjPtrQueue::filter() {
+ if (UseG1GC) {
+ filter_impl<G1CollectedHeap>();
+ } else if (UseShenandoahGC) {
+ filter_impl<ShenandoahHeap>();
+ } else {
+ ShouldNotReachHere();
+ }
}
// This method removes entries from a SATB buffer that will not be
// useful to the concurrent marking threads. Entries are retained if
// they require marking and are not already marked. Retained entries
// are compacted toward the top of the buffer.
-void ObjPtrQueue::filter() {
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
+template <class HeapType>
+void ObjPtrQueue::filter_impl() {
+ HeapType* heap = (HeapType*) Universe::heap();
void** buf = _buf;
size_t sz = _sz;
if (buf == NULL) {
// nothing to do
@@ -124,11 +124,11 @@
// at the end. If we are going to retain it we will copy it to its
// final place. If we have retained all entries we have visited so
// far, we'll just end up copying it to the same place.
*p = NULL;
- if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) {
+ if (requires_marking(entry, heap)) {
assert(new_index > 0, "we should not have already filled up the buffer");
new_index -= oopSize;
assert(new_index >= i,
"new_index should never be below i, as we alwaysr compact 'up'");
void** new_p = &buf[byte_index_to_index((int) new_index)];
@@ -175,10 +175,26 @@
size_t sz = _sz;
size_t all_entries = sz / oopSize;
size_t retained_entries = (sz - _index) / oopSize;
size_t perc = retained_entries * 100 / all_entries;
bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
+
+ if (UseShenandoahGC) {
+ Thread* t = Thread::current();
+ if (t->is_force_satb_flush()) {
+ if (!should_enqueue && sz != _index) {
+ // Non-empty buffer is compacted, and we decided not to enqueue it.
+ // Shenandoah still wants to know about leftover work in that buffer eventually.
+ // This avoid dealing with these leftovers during the final-mark, after the buffers
+ // are drained completely.
+ // TODO: This can be extended to handle G1 too
+ should_enqueue = true;
+ }
+ t->set_force_satb_flush(false);
+ }
+ }
+
return should_enqueue;
}
void ObjPtrQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
assert(SafepointSynchronize::is_at_safepoint(),
< prev index next >