< prev index next >

src/hotspot/share/gc/z/zHeap.cpp

Print this page

        

@@ -77,11 +77,11 @@
   // Install global heap instance
   assert(_heap == NULL, "Already initialized");
   _heap = this;
 
   // Update statistics
-  ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
+  ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
 }
 
 size_t ZHeap::heap_min_size() const {
   return MinHeapSize;
 }

@@ -111,12 +111,12 @@
 
 size_t ZHeap::max_capacity() const {
   return _page_allocator.max_capacity();
 }
 
-size_t ZHeap::soft_max_capacity() const {
-  return _page_allocator.soft_max_capacity();
+size_t ZHeap::current_max_capacity() const {
+  return _page_allocator.current_max_capacity();
 }
 
 size_t ZHeap::capacity() const {
   return _page_allocator.capacity();
 }

@@ -175,21 +175,17 @@
 
   return MIN2(size, max_tlab_size());
 }
 
 bool ZHeap::is_in(uintptr_t addr) const {
-  // An address is considered to be "in the heap" if it points into
-  // the allocated part of a pages, regardless of which heap view is
-  // used. Note that an address with the finalizable metadata bit set
-  // is not pointing into a heap view, and therefore not considered
-  // to be "in the heap".
-
-  if (ZAddress::is_in(addr)) {
-    const ZPage* const page = _page_table.get(addr);
-    if (page != NULL) {
-      return page->is_in(addr);
-    }
+  if (addr < ZAddressReservedStart || addr >= ZAddressReservedEnd) {
+    return false;
+  }
+
+  const ZPage* const page = _page_table.get(addr);
+  if (page != NULL) {
+    return page->is_in(addr);
   }
 
   return false;
 }
 

@@ -315,24 +311,61 @@
 
   // Reset marking information and mark roots
   _mark.start();
 
   // Update statistics
-  ZStatHeap::set_at_mark_start(soft_max_capacity(), capacity(), used());
+  ZStatHeap::set_at_mark_start(capacity(), used());
 }
 
 void ZHeap::mark(bool initial) {
   _mark.mark(initial);
 }
 
 void ZHeap::mark_flush_and_free(Thread* thread) {
   _mark.flush_and_free(thread);
 }
 
+class ZFixupPartialLoadsClosure : public ZRootsIteratorClosure {
+public:
+  virtual void do_oop(oop* p) {
+    ZBarrier::mark_barrier_on_root_oop_field(p);
+  }
+
+  virtual void do_oop(narrowOop* p) {
+    ShouldNotReachHere();
+  }
+};
+
+class ZFixupPartialLoadsTask : public ZTask {
+private:
+  ZThreadRootsIterator _thread_roots;
+
+public:
+  ZFixupPartialLoadsTask() :
+      ZTask("ZFixupPartialLoadsTask"),
+      _thread_roots() {}
+
+  virtual void work() {
+    ZFixupPartialLoadsClosure cl;
+    _thread_roots.oops_do(&cl);
+  }
+};
+
+void ZHeap::fixup_partial_loads() {
+  ZFixupPartialLoadsTask task;
+  _workers.run_parallel(&task);
+}
+
 bool ZHeap::mark_end() {
   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 
+  // C2 can generate code where a safepoint poll is inserted
+  // between a load and the associated load barrier. To handle
+  // this case we need to rescan the thread stack here to make
+  // sure such oops are marked.
+  fixup_partial_loads();
+
   // Try end marking
   if (!_mark.end()) {
     // Marking not completed, continue concurrent mark
     return false;
   }

@@ -469,12 +502,12 @@
 }
 
 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 
-  ZHeapIterator iter;
-  iter.objects_do(cl, visit_referents);
+  ZHeapIterator iter(visit_referents);
+  iter.objects_do(cl);
 }
 
 void ZHeap::serviceability_initialize() {
   _serviceability.initialize();
 }
< prev index next >