< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page

  53 #include "gc/shared/gcVMOperations.hpp"
  54 #include "gc/shared/partialArraySplitter.inline.hpp"
  55 #include "gc/shared/partialArrayState.hpp"
  56 #include "gc/shared/partialArrayTaskStats.hpp"
  57 #include "gc/shared/referencePolicy.hpp"
  58 #include "gc/shared/suspendibleThreadSet.hpp"
  59 #include "gc/shared/taskqueue.inline.hpp"
  60 #include "gc/shared/taskTerminator.hpp"
  61 #include "gc/shared/weakProcessor.inline.hpp"
  62 #include "gc/shared/workerPolicy.hpp"
  63 #include "jvm.h"
  64 #include "logging/log.hpp"
  65 #include "memory/allocation.hpp"
  66 #include "memory/iterator.hpp"
  67 #include "memory/metaspaceUtils.hpp"
  68 #include "memory/resourceArea.hpp"
  69 #include "memory/universe.hpp"
  70 #include "nmt/memTracker.hpp"
  71 #include "oops/access.inline.hpp"
  72 #include "oops/oop.inline.hpp"

  73 #include "runtime/globals_extension.hpp"
  74 #include "runtime/handles.inline.hpp"
  75 #include "runtime/java.hpp"
  76 #include "runtime/orderAccess.hpp"
  77 #include "runtime/os.hpp"
  78 #include "runtime/prefetch.inline.hpp"
  79 #include "runtime/threads.hpp"
  80 #include "utilities/align.hpp"
  81 #include "utilities/checkedCast.hpp"
  82 #include "utilities/formatBuffer.hpp"
  83 #include "utilities/growableArray.hpp"
  84 #include "utilities/powerOfTwo.hpp"
  85 
  86 G1CMIsAliveClosure::G1CMIsAliveClosure() : _cm(nullptr) { }
  87 
  88 G1CMIsAliveClosure::G1CMIsAliveClosure(G1ConcurrentMark* cm) : _cm(cm) {
  89   assert(cm != nullptr, "must be");
  90 }
  91 
  92 void G1CMIsAliveClosure::initialize(G1ConcurrentMark* cm) {

2327     target_size = 0;
2328   }
2329 
2330   if (_task_queue->size() > target_size) {
2331     G1TaskQueueEntry entry;
2332     bool ret = _task_queue->pop_local(entry);
2333     while (ret) {
2334       process_entry(entry, false /* stolen */);
2335       if (_task_queue->size() <= target_size || has_aborted()) {
2336         ret = false;
2337       } else {
2338         ret = _task_queue->pop_local(entry);
2339       }
2340     }
2341   }
2342 }
2343 
2344 size_t G1CMTask::start_partial_array_processing(oop obj) {
2345   assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
2346 
2347   objArrayOop obj_array = objArrayOop(obj);
2348   size_t array_length = obj_array->length();
2349 
2350   size_t initial_chunk_size = _partial_array_splitter.start(_task_queue, obj_array, nullptr, array_length);
2351 
2352   // Mark objArray klass metadata
2353   if (_cm_oop_closure->do_metadata()) {
2354     _cm_oop_closure->do_klass(obj_array->klass());
2355   }
2356 
2357   process_array_chunk(obj_array, 0, initial_chunk_size);
2358 
2359   // Include object header size
2360   return objArrayOopDesc::object_size(checked_cast<int>(initial_chunk_size));





2361 }
2362 
2363 size_t G1CMTask::process_partial_array(const G1TaskQueueEntry& task, bool stolen) {
2364   PartialArrayState* state = task.to_partial_array_state();
2365   // Access state before release by claim().
2366   objArrayOop obj = objArrayOop(state->source());
2367 
2368   PartialArraySplitter::Claim claim =
2369     _partial_array_splitter.claim(state, _task_queue, stolen);
2370 
2371   process_array_chunk(obj, claim._start, claim._end);
2372   return heap_word_size((claim._end - claim._start) * heapOopSize);








2373 }
2374 
2375 void G1CMTask::drain_global_stack(bool partially) {
2376   if (has_aborted()) {
2377     return;
2378   }
2379 
2380   // We have a policy to drain the local queue before we attempt to
2381   // drain the global stack.
2382   assert(partially || _task_queue->size() == 0, "invariant");
2383 
2384   // Decide what the target size is, depending whether we're going to
2385   // drain it partially (so that other tasks can steal if they run out
2386   // of things to do) or totally (at the very end).
2387   // Notice that when draining the global mark stack partially, due to the racyness
2388   // of the mark stack size update we might in fact drop below the target. But,
2389   // this is not a problem.
2390   // In case of total draining, we simply process until the global mark stack is
2391   // totally empty, disregarding the size counter.
2392   if (partially) {

  53 #include "gc/shared/gcVMOperations.hpp"
  54 #include "gc/shared/partialArraySplitter.inline.hpp"
  55 #include "gc/shared/partialArrayState.hpp"
  56 #include "gc/shared/partialArrayTaskStats.hpp"
  57 #include "gc/shared/referencePolicy.hpp"
  58 #include "gc/shared/suspendibleThreadSet.hpp"
  59 #include "gc/shared/taskqueue.inline.hpp"
  60 #include "gc/shared/taskTerminator.hpp"
  61 #include "gc/shared/weakProcessor.inline.hpp"
  62 #include "gc/shared/workerPolicy.hpp"
  63 #include "jvm.h"
  64 #include "logging/log.hpp"
  65 #include "memory/allocation.hpp"
  66 #include "memory/iterator.hpp"
  67 #include "memory/metaspaceUtils.hpp"
  68 #include "memory/resourceArea.hpp"
  69 #include "memory/universe.hpp"
  70 #include "nmt/memTracker.hpp"
  71 #include "oops/access.inline.hpp"
  72 #include "oops/oop.inline.hpp"
  73 #include "oops/oopCast.inline.hpp"
  74 #include "runtime/globals_extension.hpp"
  75 #include "runtime/handles.inline.hpp"
  76 #include "runtime/java.hpp"
  77 #include "runtime/orderAccess.hpp"
  78 #include "runtime/os.hpp"
  79 #include "runtime/prefetch.inline.hpp"
  80 #include "runtime/threads.hpp"
  81 #include "utilities/align.hpp"
  82 #include "utilities/checkedCast.hpp"
  83 #include "utilities/formatBuffer.hpp"
  84 #include "utilities/growableArray.hpp"
  85 #include "utilities/powerOfTwo.hpp"
  86 
  87 G1CMIsAliveClosure::G1CMIsAliveClosure() : _cm(nullptr) { }
  88 
  89 G1CMIsAliveClosure::G1CMIsAliveClosure(G1ConcurrentMark* cm) : _cm(cm) {
  90   assert(cm != nullptr, "must be");
  91 }
  92 
  93 void G1CMIsAliveClosure::initialize(G1ConcurrentMark* cm) {

2328     target_size = 0;
2329   }
2330 
2331   if (_task_queue->size() > target_size) {
2332     G1TaskQueueEntry entry;
2333     bool ret = _task_queue->pop_local(entry);
2334     while (ret) {
2335       process_entry(entry, false /* stolen */);
2336       if (_task_queue->size() <= target_size || has_aborted()) {
2337         ret = false;
2338       } else {
2339         ret = _task_queue->pop_local(entry);
2340       }
2341     }
2342   }
2343 }
2344 
2345 size_t G1CMTask::start_partial_array_processing(oop obj) {
2346   assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
2347 
2348   objArrayOop obj_array = oop_cast<objArrayOop>(obj);
2349   size_t array_length = obj_array->length();
2350 
2351   size_t initial_chunk_size = _partial_array_splitter.start(_task_queue, obj_array, nullptr, array_length);
2352 
2353   // Mark objArray klass metadata
2354   if (_cm_oop_closure->do_metadata()) {
2355     _cm_oop_closure->do_klass(obj_array->klass());
2356   }
2357 
2358   process_array_chunk(obj_array, 0, initial_chunk_size);
2359 
2360   // Include object header size
2361   if (obj_array->is_refArray()) {
2362     return refArrayOopDesc::object_size(checked_cast<int>(initial_chunk_size));
2363   } else {
2364     FlatArrayKlass* fak = FlatArrayKlass::cast(obj_array->klass());
2365     return flatArrayOopDesc::object_size(fak->layout_helper(), checked_cast<int>(initial_chunk_size));
2366   }
2367 }
2368 
2369 size_t G1CMTask::process_partial_array(const G1TaskQueueEntry& task, bool stolen) {
2370   PartialArrayState* state = task.to_partial_array_state();
2371   // Access state before release by claim().
2372   objArrayOop obj = oop_cast<objArrayOop>(state->source());
2373 
2374   PartialArraySplitter::Claim claim =
2375     _partial_array_splitter.claim(state, _task_queue, stolen);
2376 
2377   process_array_chunk(obj, claim._start, claim._end);
2378 
2379   if (obj->is_refArray()) {
2380     return heap_word_size((claim._end - claim._start) * heapOopSize);
2381   } else {
2382     assert(obj->is_flatArray(), "Must be!");
2383     size_t element_byte_size = FlatArrayKlass::cast(obj->klass())->element_byte_size();
2384     size_t nof_elements = claim._end - claim._start;
2385     return heap_word_size(nof_elements * element_byte_size);
2386   }
2387 }
2388 
2389 void G1CMTask::drain_global_stack(bool partially) {
2390   if (has_aborted()) {
2391     return;
2392   }
2393 
2394   // We have a policy to drain the local queue before we attempt to
2395   // drain the global stack.
2396   assert(partially || _task_queue->size() == 0, "invariant");
2397 
2398   // Decide what the target size is, depending whether we're going to
2399   // drain it partially (so that other tasks can steal if they run out
2400   // of things to do) or totally (at the very end).
2401   // Notice that when draining the global mark stack partially, due to the racyness
2402   // of the mark stack size update we might in fact drop below the target. But,
2403   // this is not a problem.
2404   // In case of total draining, we simply process until the global mark stack is
2405   // totally empty, disregarding the size counter.
2406   if (partially) {
< prev index next >