54 #include "gc/shared/gcVMOperations.hpp"
55 #include "gc/shared/partialArraySplitter.inline.hpp"
56 #include "gc/shared/partialArrayState.hpp"
57 #include "gc/shared/partialArrayTaskStats.hpp"
58 #include "gc/shared/referencePolicy.hpp"
59 #include "gc/shared/suspendibleThreadSet.hpp"
60 #include "gc/shared/taskqueue.inline.hpp"
61 #include "gc/shared/taskTerminator.hpp"
62 #include "gc/shared/weakProcessor.inline.hpp"
63 #include "gc/shared/workerPolicy.hpp"
64 #include "jvm.h"
65 #include "logging/log.hpp"
66 #include "memory/allocation.hpp"
67 #include "memory/iterator.hpp"
68 #include "memory/metaspaceUtils.hpp"
69 #include "memory/resourceArea.hpp"
70 #include "memory/universe.hpp"
71 #include "nmt/memTracker.hpp"
72 #include "oops/access.inline.hpp"
73 #include "oops/oop.inline.hpp"
74 #include "runtime/globals_extension.hpp"
75 #include "runtime/handles.inline.hpp"
76 #include "runtime/java.hpp"
77 #include "runtime/orderAccess.hpp"
78 #include "runtime/os.hpp"
79 #include "runtime/prefetch.inline.hpp"
80 #include "runtime/threads.hpp"
81 #include "utilities/align.hpp"
82 #include "utilities/checkedCast.hpp"
83 #include "utilities/formatBuffer.hpp"
84 #include "utilities/growableArray.hpp"
85 #include "utilities/powerOfTwo.hpp"
86
87 G1CMIsAliveClosure::G1CMIsAliveClosure() : _cm(nullptr) { }
88
89 G1CMIsAliveClosure::G1CMIsAliveClosure(G1ConcurrentMark* cm) : _cm(cm) {
90 assert(cm != nullptr, "must be");
91 }
92
93 void G1CMIsAliveClosure::initialize(G1ConcurrentMark* cm) {
2308 target_size = 0;
2309 }
2310
2311 if (_task_queue->size() > target_size) {
2312 G1TaskQueueEntry entry;
2313 bool ret = _task_queue->pop_local(entry);
2314 while (ret) {
2315 process_entry(entry, false /* stolen */);
2316 if (_task_queue->size() <= target_size || has_aborted()) {
2317 ret = false;
2318 } else {
2319 ret = _task_queue->pop_local(entry);
2320 }
2321 }
2322 }
2323 }
2324
2325 size_t G1CMTask::start_partial_array_processing(oop obj) {
2326 assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
2327
2328 objArrayOop obj_array = objArrayOop(obj);
2329 size_t array_length = obj_array->length();
2330
2331 size_t initial_chunk_size = _partial_array_splitter.start(_task_queue, obj_array, nullptr, array_length);
2332
2333 // Mark objArray klass metadata
2334 if (_cm_oop_closure->do_metadata()) {
2335 _cm_oop_closure->do_klass(obj_array->klass());
2336 }
2337
2338 process_array_chunk(obj_array, 0, initial_chunk_size);
2339
2340 // Include object header size
2341 return objArrayOopDesc::object_size(checked_cast<int>(initial_chunk_size));
2342 }
2343
2344 size_t G1CMTask::process_partial_array(const G1TaskQueueEntry& task, bool stolen) {
2345 PartialArrayState* state = task.to_partial_array_state();
2346 // Access state before release by claim().
2347 objArrayOop obj = objArrayOop(state->source());
2348
2349 PartialArraySplitter::Claim claim =
2350 _partial_array_splitter.claim(state, _task_queue, stolen);
2351
2352 process_array_chunk(obj, claim._start, claim._end);
2353 return heap_word_size((claim._end - claim._start) * heapOopSize);
2354 }
2355
2356 void G1CMTask::drain_global_stack(bool partially) {
2357 if (has_aborted()) {
2358 return;
2359 }
2360
2361 // We have a policy to drain the local queue before we attempt to
2362 // drain the global stack.
2363 assert(partially || _task_queue->size() == 0, "invariant");
2364
2365 // Decide what the target size is, depending whether we're going to
2366 // drain it partially (so that other tasks can steal if they run out
2367 // of things to do) or totally (at the very end).
2368 // Notice that when draining the global mark stack partially, due to the racyness
2369 // of the mark stack size update we might in fact drop below the target. But,
2370 // this is not a problem.
2371 // In case of total draining, we simply process until the global mark stack is
2372 // totally empty, disregarding the size counter.
2373 if (partially) {
|
54 #include "gc/shared/gcVMOperations.hpp"
55 #include "gc/shared/partialArraySplitter.inline.hpp"
56 #include "gc/shared/partialArrayState.hpp"
57 #include "gc/shared/partialArrayTaskStats.hpp"
58 #include "gc/shared/referencePolicy.hpp"
59 #include "gc/shared/suspendibleThreadSet.hpp"
60 #include "gc/shared/taskqueue.inline.hpp"
61 #include "gc/shared/taskTerminator.hpp"
62 #include "gc/shared/weakProcessor.inline.hpp"
63 #include "gc/shared/workerPolicy.hpp"
64 #include "jvm.h"
65 #include "logging/log.hpp"
66 #include "memory/allocation.hpp"
67 #include "memory/iterator.hpp"
68 #include "memory/metaspaceUtils.hpp"
69 #include "memory/resourceArea.hpp"
70 #include "memory/universe.hpp"
71 #include "nmt/memTracker.hpp"
72 #include "oops/access.inline.hpp"
73 #include "oops/oop.inline.hpp"
74 #include "oops/oopCast.inline.hpp"
75 #include "runtime/globals_extension.hpp"
76 #include "runtime/handles.inline.hpp"
77 #include "runtime/java.hpp"
78 #include "runtime/orderAccess.hpp"
79 #include "runtime/os.hpp"
80 #include "runtime/prefetch.inline.hpp"
81 #include "runtime/threads.hpp"
82 #include "utilities/align.hpp"
83 #include "utilities/checkedCast.hpp"
84 #include "utilities/formatBuffer.hpp"
85 #include "utilities/growableArray.hpp"
86 #include "utilities/powerOfTwo.hpp"
87
88 G1CMIsAliveClosure::G1CMIsAliveClosure() : _cm(nullptr) { }
89
90 G1CMIsAliveClosure::G1CMIsAliveClosure(G1ConcurrentMark* cm) : _cm(cm) {
91 assert(cm != nullptr, "must be");
92 }
93
94 void G1CMIsAliveClosure::initialize(G1ConcurrentMark* cm) {
2309 target_size = 0;
2310 }
2311
2312 if (_task_queue->size() > target_size) {
2313 G1TaskQueueEntry entry;
2314 bool ret = _task_queue->pop_local(entry);
2315 while (ret) {
2316 process_entry(entry, false /* stolen */);
2317 if (_task_queue->size() <= target_size || has_aborted()) {
2318 ret = false;
2319 } else {
2320 ret = _task_queue->pop_local(entry);
2321 }
2322 }
2323 }
2324 }
2325
2326 size_t G1CMTask::start_partial_array_processing(oop obj) {
2327 assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
2328
2329 objArrayOop obj_array = oop_cast<objArrayOop>(obj);
2330 size_t array_length = obj_array->length();
2331
2332 size_t initial_chunk_size = _partial_array_splitter.start(_task_queue, obj_array, nullptr, array_length);
2333
2334 // Mark objArray klass metadata
2335 if (_cm_oop_closure->do_metadata()) {
2336 _cm_oop_closure->do_klass(obj_array->klass());
2337 }
2338
2339 process_array_chunk(obj_array, 0, initial_chunk_size);
2340
2341 // Include object header size
2342 if (obj_array->is_refArray()) {
2343 return refArrayOopDesc::object_size(checked_cast<int>(initial_chunk_size));
2344 } else {
2345 FlatArrayKlass* fak = FlatArrayKlass::cast(obj_array->klass());
2346 return flatArrayOopDesc::object_size(fak->layout_helper(), checked_cast<int>(initial_chunk_size));
2347 }
2348 }
2349
2350 size_t G1CMTask::process_partial_array(const G1TaskQueueEntry& task, bool stolen) {
2351 PartialArrayState* state = task.to_partial_array_state();
2352 // Access state before release by claim().
2353 objArrayOop obj = oop_cast<objArrayOop>(state->source());
2354
2355 PartialArraySplitter::Claim claim =
2356 _partial_array_splitter.claim(state, _task_queue, stolen);
2357
2358 process_array_chunk(obj, claim._start, claim._end);
2359
2360 if (obj->is_refArray()) {
2361 return heap_word_size((claim._end - claim._start) * heapOopSize);
2362 } else {
2363 assert(obj->is_flatArray(), "Must be!");
2364 size_t element_byte_size = FlatArrayKlass::cast(obj->klass())->element_byte_size();
2365 size_t nof_elements = claim._end - claim._start;
2366 return heap_word_size(nof_elements * element_byte_size);
2367 }
2368 }
2369
2370 void G1CMTask::drain_global_stack(bool partially) {
2371 if (has_aborted()) {
2372 return;
2373 }
2374
2375 // We have a policy to drain the local queue before we attempt to
2376 // drain the global stack.
2377 assert(partially || _task_queue->size() == 0, "invariant");
2378
2379 // Decide what the target size is, depending whether we're going to
2380 // drain it partially (so that other tasks can steal if they run out
2381 // of things to do) or totally (at the very end).
2382 // Notice that when draining the global mark stack partially, due to the racyness
2383 // of the mark stack size update we might in fact drop below the target. But,
2384 // this is not a problem.
2385 // In case of total draining, we simply process until the global mark stack is
2386 // totally empty, disregarding the size counter.
2387 if (partially) {
|