24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/g1/g1CardSetMemory.hpp"
29 #include "gc/g1/g1CardTableEntryClosure.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
32 #include "gc/g1/g1CollectorState.hpp"
33 #include "gc/g1/g1ConcurrentMark.inline.hpp"
34 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
35 #include "gc/g1/g1EvacInfo.hpp"
36 #include "gc/g1/g1EvacStats.inline.hpp"
37 #include "gc/g1/g1HeapRegion.inline.hpp"
38 #include "gc/g1/g1HeapRegionRemSet.inline.hpp"
39 #include "gc/g1/g1OopClosures.inline.hpp"
40 #include "gc/g1/g1ParScanThreadState.hpp"
41 #include "gc/g1/g1RemSet.hpp"
42 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
43 #include "gc/shared/bufferNode.hpp"
44 #include "gc/shared/preservedMarks.inline.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "oops/access.inline.hpp"
47 #include "oops/compressedOops.inline.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "runtime/prefetch.hpp"
50 #include "runtime/threads.hpp"
51 #include "runtime/threadSMR.hpp"
52 #include "utilities/bitMap.inline.hpp"
53 #include "utilities/ticks.hpp"
54
55 class G1PostEvacuateCollectionSetCleanupTask1::MergePssTask : public G1AbstractSubTask {
56 G1ParScanThreadStateSet* _per_thread_states;
57
58 public:
59 MergePssTask(G1ParScanThreadStateSet* per_thread_states) :
60 G1AbstractSubTask(G1GCPhaseTimes::MergePSS),
61 _per_thread_states(per_thread_states) { }
62
63 double worker_cost() const override { return 1.0; }
64
234 stat.register_nonempty_chunk();
235
236 size_t num_marked_objs = 0;
237 size_t marked_words = 0;
238
239 HeapWord* obj_addr = first_marked_addr;
240 assert(chunk_start <= obj_addr && obj_addr < chunk_end,
241 "object " PTR_FORMAT " must be within chunk [" PTR_FORMAT ", " PTR_FORMAT "[",
242 p2i(obj_addr), p2i(chunk_start), p2i(chunk_end));
243 do {
244 assert(bitmap->is_marked(obj_addr), "inv");
245 prefetch_obj(obj_addr);
246
247 oop obj = cast_to_oop(obj_addr);
248 const size_t obj_size = obj->size();
249 HeapWord* const obj_end_addr = obj_addr + obj_size;
250
251 {
252 // Process marked object.
253 assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded");
254 obj->init_mark();
255 hr->update_bot_for_block(obj_addr, obj_end_addr);
256
257 // Statistics
258 num_marked_objs++;
259 marked_words += obj_size;
260 }
261
262 assert(obj_end_addr <= hr_top, "inv");
263 // Use hr_top as the limit so that we zap dead ranges up to the next
264 // marked obj or hr_top.
265 HeapWord* next_marked_obj_addr = bitmap->get_next_marked_addr(obj_end_addr, hr_top);
266 garbage_words += zap_dead_objects(hr, obj_end_addr, next_marked_obj_addr);
267 obj_addr = next_marked_obj_addr;
268 } while (obj_addr < chunk_end);
269
270 assert(marked_words > 0 && num_marked_objs > 0, "inv");
271
272 stat.register_objects_count_and_size(num_marked_objs, marked_words);
273
274 update_garbage_words_in_hr(hr, garbage_words);
459 g1h->remove_from_old_gen_sets(0, _humongous_regions_reclaimed);
460 g1h->decrement_summary_bytes(_bytes_freed);
461 }
462
463 double worker_cost() const override { return 1.0; }
464 void do_work(uint worker_id) override {
465 G1CollectedHeap* g1h = G1CollectedHeap::heap();
466
467 G1FreeHumongousRegionClosure cl;
468 g1h->heap_region_iterate(&cl);
469
470 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumTotal, g1h->num_humongous_objects());
471 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumCandidates, g1h->num_humongous_reclaim_candidates());
472 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumReclaimed, cl.humongous_objects_reclaimed());
473
474 _humongous_regions_reclaimed = cl.humongous_regions_reclaimed();
475 _bytes_freed = cl.bytes_freed();
476 }
477 };
478
479 class G1PostEvacuateCollectionSetCleanupTask2::RestorePreservedMarksTask : public G1AbstractSubTask {
480 PreservedMarksSet* _preserved_marks;
481 WorkerTask* _task;
482
483 public:
484 RestorePreservedMarksTask(PreservedMarksSet* preserved_marks) :
485 G1AbstractSubTask(G1GCPhaseTimes::RestorePreservedMarks),
486 _preserved_marks(preserved_marks),
487 _task(preserved_marks->create_task()) { }
488
489 virtual ~RestorePreservedMarksTask() {
490 delete _task;
491 }
492
493 double worker_cost() const override {
494 return _preserved_marks->num();
495 }
496
497 void do_work(uint worker_id) override { _task->work(worker_id); }
498 };
499
500 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
501 size_t _num_dirtied;
502 G1CollectedHeap* _g1h;
503 G1CardTable* _g1_ct;
504 G1EvacFailureRegions* _evac_failure_regions;
505
506 HeapRegion* region_for_card(CardValue* card_ptr) const {
507 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
508 }
509
510 bool will_become_free(HeapRegion* hr) const {
511 // A region will be freed by during the FreeCollectionSet phase if the region is in the
512 // collection set and has not had an evacuation failure.
513 return _g1h->is_in_cset(hr) && !_evac_failure_regions->contains(hr->hrm_index());
514 }
515
516 public:
517 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h, G1EvacFailureRegions* evac_failure_regions) :
518 G1CardTableEntryClosure(),
519 _num_dirtied(0),
954 }
955
956 double worker_cost() const override {
957 return (double)_claimer.length() / ThreadsPerWorker;
958 }
959 };
960
961 G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
962 G1EvacInfo* evacuation_info,
963 G1EvacFailureRegions* evac_failure_regions) :
964 G1BatchedTask("Post Evacuate Cleanup 2", G1CollectedHeap::heap()->phase_times())
965 {
966 #if COMPILER2_OR_JVMCI
967 add_serial_task(new UpdateDerivedPointersTask());
968 #endif
969 if (G1CollectedHeap::heap()->has_humongous_reclaim_candidates()) {
970 add_serial_task(new EagerlyReclaimHumongousObjectsTask());
971 }
972
973 if (evac_failure_regions->has_regions_evac_failed()) {
974 add_parallel_task(new RestorePreservedMarksTask(per_thread_states->preserved_marks_set()));
975 add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions));
976 }
977 add_parallel_task(new RedirtyLoggedCardsTask(evac_failure_regions,
978 per_thread_states->rdc_buffers(),
979 per_thread_states->num_workers()));
980
981 if (UseTLAB && ResizeTLAB) {
982 add_parallel_task(new ResizeTLABsTask());
983 }
984 add_parallel_task(new FreeCollectionSetTask(evacuation_info,
985 per_thread_states->surviving_young_words(),
986 evac_failure_regions));
987 }
|
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/g1/g1CardSetMemory.hpp"
29 #include "gc/g1/g1CardTableEntryClosure.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
32 #include "gc/g1/g1CollectorState.hpp"
33 #include "gc/g1/g1ConcurrentMark.inline.hpp"
34 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
35 #include "gc/g1/g1EvacInfo.hpp"
36 #include "gc/g1/g1EvacStats.inline.hpp"
37 #include "gc/g1/g1HeapRegion.inline.hpp"
38 #include "gc/g1/g1HeapRegionRemSet.inline.hpp"
39 #include "gc/g1/g1OopClosures.inline.hpp"
40 #include "gc/g1/g1ParScanThreadState.hpp"
41 #include "gc/g1/g1RemSet.hpp"
42 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
43 #include "gc/shared/bufferNode.hpp"
44 #include "jfr/jfrEvents.hpp"
45 #include "oops/access.inline.hpp"
46 #include "oops/compressedOops.inline.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "runtime/prefetch.hpp"
49 #include "runtime/threads.hpp"
50 #include "runtime/threadSMR.hpp"
51 #include "utilities/bitMap.inline.hpp"
52 #include "utilities/ticks.hpp"
53
54 class G1PostEvacuateCollectionSetCleanupTask1::MergePssTask : public G1AbstractSubTask {
55 G1ParScanThreadStateSet* _per_thread_states;
56
57 public:
58 MergePssTask(G1ParScanThreadStateSet* per_thread_states) :
59 G1AbstractSubTask(G1GCPhaseTimes::MergePSS),
60 _per_thread_states(per_thread_states) { }
61
62 double worker_cost() const override { return 1.0; }
63
233 stat.register_nonempty_chunk();
234
235 size_t num_marked_objs = 0;
236 size_t marked_words = 0;
237
238 HeapWord* obj_addr = first_marked_addr;
239 assert(chunk_start <= obj_addr && obj_addr < chunk_end,
240 "object " PTR_FORMAT " must be within chunk [" PTR_FORMAT ", " PTR_FORMAT "[",
241 p2i(obj_addr), p2i(chunk_start), p2i(chunk_end));
242 do {
243 assert(bitmap->is_marked(obj_addr), "inv");
244 prefetch_obj(obj_addr);
245
246 oop obj = cast_to_oop(obj_addr);
247 const size_t obj_size = obj->size();
248 HeapWord* const obj_end_addr = obj_addr + obj_size;
249
250 {
251 // Process marked object.
252 assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded");
253 obj->unset_self_forwarded();
254 hr->update_bot_for_block(obj_addr, obj_end_addr);
255
256 // Statistics
257 num_marked_objs++;
258 marked_words += obj_size;
259 }
260
261 assert(obj_end_addr <= hr_top, "inv");
262 // Use hr_top as the limit so that we zap dead ranges up to the next
263 // marked obj or hr_top.
264 HeapWord* next_marked_obj_addr = bitmap->get_next_marked_addr(obj_end_addr, hr_top);
265 garbage_words += zap_dead_objects(hr, obj_end_addr, next_marked_obj_addr);
266 obj_addr = next_marked_obj_addr;
267 } while (obj_addr < chunk_end);
268
269 assert(marked_words > 0 && num_marked_objs > 0, "inv");
270
271 stat.register_objects_count_and_size(num_marked_objs, marked_words);
272
273 update_garbage_words_in_hr(hr, garbage_words);
458 g1h->remove_from_old_gen_sets(0, _humongous_regions_reclaimed);
459 g1h->decrement_summary_bytes(_bytes_freed);
460 }
461
462 double worker_cost() const override { return 1.0; }
463 void do_work(uint worker_id) override {
464 G1CollectedHeap* g1h = G1CollectedHeap::heap();
465
466 G1FreeHumongousRegionClosure cl;
467 g1h->heap_region_iterate(&cl);
468
469 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumTotal, g1h->num_humongous_objects());
470 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumCandidates, g1h->num_humongous_reclaim_candidates());
471 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumReclaimed, cl.humongous_objects_reclaimed());
472
473 _humongous_regions_reclaimed = cl.humongous_regions_reclaimed();
474 _bytes_freed = cl.bytes_freed();
475 }
476 };
477
478 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
479 size_t _num_dirtied;
480 G1CollectedHeap* _g1h;
481 G1CardTable* _g1_ct;
482 G1EvacFailureRegions* _evac_failure_regions;
483
484 HeapRegion* region_for_card(CardValue* card_ptr) const {
485 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
486 }
487
488 bool will_become_free(HeapRegion* hr) const {
489 // A region will be freed by during the FreeCollectionSet phase if the region is in the
490 // collection set and has not had an evacuation failure.
491 return _g1h->is_in_cset(hr) && !_evac_failure_regions->contains(hr->hrm_index());
492 }
493
494 public:
495 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h, G1EvacFailureRegions* evac_failure_regions) :
496 G1CardTableEntryClosure(),
497 _num_dirtied(0),
932 }
933
934 double worker_cost() const override {
935 return (double)_claimer.length() / ThreadsPerWorker;
936 }
937 };
938
939 G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
940 G1EvacInfo* evacuation_info,
941 G1EvacFailureRegions* evac_failure_regions) :
942 G1BatchedTask("Post Evacuate Cleanup 2", G1CollectedHeap::heap()->phase_times())
943 {
944 #if COMPILER2_OR_JVMCI
945 add_serial_task(new UpdateDerivedPointersTask());
946 #endif
947 if (G1CollectedHeap::heap()->has_humongous_reclaim_candidates()) {
948 add_serial_task(new EagerlyReclaimHumongousObjectsTask());
949 }
950
951 if (evac_failure_regions->has_regions_evac_failed()) {
952 add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions));
953 }
954 add_parallel_task(new RedirtyLoggedCardsTask(evac_failure_regions,
955 per_thread_states->rdc_buffers(),
956 per_thread_states->num_workers()));
957
958 if (UseTLAB && ResizeTLAB) {
959 add_parallel_task(new ResizeTLABsTask());
960 }
961 add_parallel_task(new FreeCollectionSetTask(evacuation_info,
962 per_thread_states->surviving_young_words(),
963 evac_failure_regions));
964 }
|