25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/g1/g1CardSetMemory.hpp"
29 #include "gc/g1/g1CardTableEntryClosure.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
32 #include "gc/g1/g1CollectorState.hpp"
33 #include "gc/g1/g1ConcurrentMark.inline.hpp"
34 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
35 #include "gc/g1/g1EvacInfo.hpp"
36 #include "gc/g1/g1EvacStats.inline.hpp"
37 #include "gc/g1/g1HeapRegion.inline.hpp"
38 #include "gc/g1/g1HeapRegionPrinter.hpp"
39 #include "gc/g1/g1HeapRegionRemSet.inline.hpp"
40 #include "gc/g1/g1OopClosures.inline.hpp"
41 #include "gc/g1/g1ParScanThreadState.hpp"
42 #include "gc/g1/g1RemSet.hpp"
43 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
44 #include "gc/shared/bufferNode.hpp"
45 #include "gc/shared/preservedMarks.inline.hpp"
46 #include "jfr/jfrEvents.hpp"
47 #include "oops/access.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "runtime/prefetch.hpp"
51 #include "runtime/threads.hpp"
52 #include "runtime/threadSMR.hpp"
53 #include "utilities/bitMap.inline.hpp"
54 #include "utilities/ticks.hpp"
55
56 class G1PostEvacuateCollectionSetCleanupTask1::MergePssTask : public G1AbstractSubTask {
57 G1ParScanThreadStateSet* _per_thread_states;
58
59 public:
60 MergePssTask(G1ParScanThreadStateSet* per_thread_states) :
61 G1AbstractSubTask(G1GCPhaseTimes::MergePSS),
62 _per_thread_states(per_thread_states) { }
63
64 double worker_cost() const override { return 1.0; }
65
235 stat.register_nonempty_chunk();
236
237 size_t num_marked_objs = 0;
238 size_t marked_words = 0;
239
240 HeapWord* obj_addr = first_marked_addr;
241 assert(chunk_start <= obj_addr && obj_addr < chunk_end,
242 "object " PTR_FORMAT " must be within chunk [" PTR_FORMAT ", " PTR_FORMAT "[",
243 p2i(obj_addr), p2i(chunk_start), p2i(chunk_end));
244 do {
245 assert(bitmap->is_marked(obj_addr), "inv");
246 prefetch_obj(obj_addr);
247
248 oop obj = cast_to_oop(obj_addr);
249 const size_t obj_size = obj->size();
250 HeapWord* const obj_end_addr = obj_addr + obj_size;
251
252 {
253 // Process marked object.
254 assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded");
255 obj->init_mark();
256 hr->update_bot_for_block(obj_addr, obj_end_addr);
257
258 // Statistics
259 num_marked_objs++;
260 marked_words += obj_size;
261 }
262
263 assert(obj_end_addr <= hr_top, "inv");
264 // Use hr_top as the limit so that we zap dead ranges up to the next
265 // marked obj or hr_top.
266 HeapWord* next_marked_obj_addr = bitmap->get_next_marked_addr(obj_end_addr, hr_top);
267 garbage_words += zap_dead_objects(hr, obj_end_addr, next_marked_obj_addr);
268 obj_addr = next_marked_obj_addr;
269 } while (obj_addr < chunk_end);
270
271 assert(marked_words > 0 && num_marked_objs > 0, "inv");
272
273 stat.register_objects_count_and_size(num_marked_objs, marked_words);
274
275 update_garbage_words_in_hr(hr, garbage_words);
460 g1h->remove_from_old_gen_sets(0, _humongous_regions_reclaimed);
461 g1h->decrement_summary_bytes(_bytes_freed);
462 }
463
464 double worker_cost() const override { return 1.0; }
465 void do_work(uint worker_id) override {
466 G1CollectedHeap* g1h = G1CollectedHeap::heap();
467
468 G1FreeHumongousRegionClosure cl;
469 g1h->heap_region_iterate(&cl);
470
471 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumTotal, g1h->num_humongous_objects());
472 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumCandidates, g1h->num_humongous_reclaim_candidates());
473 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumReclaimed, cl.humongous_objects_reclaimed());
474
475 _humongous_regions_reclaimed = cl.humongous_regions_reclaimed();
476 _bytes_freed = cl.bytes_freed();
477 }
478 };
479
480 class G1PostEvacuateCollectionSetCleanupTask2::RestorePreservedMarksTask : public G1AbstractSubTask {
481 PreservedMarksSet* _preserved_marks;
482 WorkerTask* _task;
483
484 public:
485 RestorePreservedMarksTask(PreservedMarksSet* preserved_marks) :
486 G1AbstractSubTask(G1GCPhaseTimes::RestorePreservedMarks),
487 _preserved_marks(preserved_marks),
488 _task(preserved_marks->create_task()) { }
489
490 virtual ~RestorePreservedMarksTask() {
491 delete _task;
492 }
493
494 double worker_cost() const override {
495 return _preserved_marks->num();
496 }
497
498 void do_work(uint worker_id) override { _task->work(worker_id); }
499 };
500
501 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
502 size_t _num_dirtied;
503 G1CollectedHeap* _g1h;
504 G1CardTable* _g1_ct;
505 G1EvacFailureRegions* _evac_failure_regions;
506
507 G1HeapRegion* region_for_card(CardValue* card_ptr) const {
508 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
509 }
510
511 bool will_become_free(G1HeapRegion* hr) const {
512 // A region will be freed by during the FreeCollectionSet phase if the region is in the
513 // collection set and has not had an evacuation failure.
514 return _g1h->is_in_cset(hr) && !_evac_failure_regions->contains(hr->hrm_index());
515 }
516
517 public:
518 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h, G1EvacFailureRegions* evac_failure_regions) :
519 G1CardTableEntryClosure(),
520 _num_dirtied(0),
955 }
956
957 double worker_cost() const override {
958 return (double)_claimer.length() / ThreadsPerWorker;
959 }
960 };
961
962 G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
963 G1EvacInfo* evacuation_info,
964 G1EvacFailureRegions* evac_failure_regions) :
965 G1BatchedTask("Post Evacuate Cleanup 2", G1CollectedHeap::heap()->phase_times())
966 {
967 #if COMPILER2_OR_JVMCI
968 add_serial_task(new UpdateDerivedPointersTask());
969 #endif
970 if (G1CollectedHeap::heap()->has_humongous_reclaim_candidates()) {
971 add_serial_task(new EagerlyReclaimHumongousObjectsTask());
972 }
973
974 if (evac_failure_regions->has_regions_evac_failed()) {
975 add_parallel_task(new RestorePreservedMarksTask(per_thread_states->preserved_marks_set()));
976 add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions));
977 }
978 add_parallel_task(new RedirtyLoggedCardsTask(evac_failure_regions,
979 per_thread_states->rdc_buffers(),
980 per_thread_states->num_workers()));
981
982 if (UseTLAB && ResizeTLAB) {
983 add_parallel_task(new ResizeTLABsTask());
984 }
985 add_parallel_task(new FreeCollectionSetTask(evacuation_info,
986 per_thread_states->surviving_young_words(),
987 evac_failure_regions));
988 }
|
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/g1/g1CardSetMemory.hpp"
29 #include "gc/g1/g1CardTableEntryClosure.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
32 #include "gc/g1/g1CollectorState.hpp"
33 #include "gc/g1/g1ConcurrentMark.inline.hpp"
34 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
35 #include "gc/g1/g1EvacInfo.hpp"
36 #include "gc/g1/g1EvacStats.inline.hpp"
37 #include "gc/g1/g1HeapRegion.inline.hpp"
38 #include "gc/g1/g1HeapRegionPrinter.hpp"
39 #include "gc/g1/g1HeapRegionRemSet.inline.hpp"
40 #include "gc/g1/g1OopClosures.inline.hpp"
41 #include "gc/g1/g1ParScanThreadState.hpp"
42 #include "gc/g1/g1RemSet.hpp"
43 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
44 #include "gc/shared/bufferNode.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "oops/access.inline.hpp"
47 #include "oops/compressedOops.inline.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "runtime/prefetch.hpp"
50 #include "runtime/threads.hpp"
51 #include "runtime/threadSMR.hpp"
52 #include "utilities/bitMap.inline.hpp"
53 #include "utilities/ticks.hpp"
54
55 class G1PostEvacuateCollectionSetCleanupTask1::MergePssTask : public G1AbstractSubTask {
56 G1ParScanThreadStateSet* _per_thread_states;
57
58 public:
59 MergePssTask(G1ParScanThreadStateSet* per_thread_states) :
60 G1AbstractSubTask(G1GCPhaseTimes::MergePSS),
61 _per_thread_states(per_thread_states) { }
62
63 double worker_cost() const override { return 1.0; }
64
234 stat.register_nonempty_chunk();
235
236 size_t num_marked_objs = 0;
237 size_t marked_words = 0;
238
239 HeapWord* obj_addr = first_marked_addr;
240 assert(chunk_start <= obj_addr && obj_addr < chunk_end,
241 "object " PTR_FORMAT " must be within chunk [" PTR_FORMAT ", " PTR_FORMAT "[",
242 p2i(obj_addr), p2i(chunk_start), p2i(chunk_end));
243 do {
244 assert(bitmap->is_marked(obj_addr), "inv");
245 prefetch_obj(obj_addr);
246
247 oop obj = cast_to_oop(obj_addr);
248 const size_t obj_size = obj->size();
249 HeapWord* const obj_end_addr = obj_addr + obj_size;
250
251 {
252 // Process marked object.
253 assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded");
254 obj->unset_self_forwarded();
255 hr->update_bot_for_block(obj_addr, obj_end_addr);
256
257 // Statistics
258 num_marked_objs++;
259 marked_words += obj_size;
260 }
261
262 assert(obj_end_addr <= hr_top, "inv");
263 // Use hr_top as the limit so that we zap dead ranges up to the next
264 // marked obj or hr_top.
265 HeapWord* next_marked_obj_addr = bitmap->get_next_marked_addr(obj_end_addr, hr_top);
266 garbage_words += zap_dead_objects(hr, obj_end_addr, next_marked_obj_addr);
267 obj_addr = next_marked_obj_addr;
268 } while (obj_addr < chunk_end);
269
270 assert(marked_words > 0 && num_marked_objs > 0, "inv");
271
272 stat.register_objects_count_and_size(num_marked_objs, marked_words);
273
274 update_garbage_words_in_hr(hr, garbage_words);
459 g1h->remove_from_old_gen_sets(0, _humongous_regions_reclaimed);
460 g1h->decrement_summary_bytes(_bytes_freed);
461 }
462
463 double worker_cost() const override { return 1.0; }
464 void do_work(uint worker_id) override {
465 G1CollectedHeap* g1h = G1CollectedHeap::heap();
466
467 G1FreeHumongousRegionClosure cl;
468 g1h->heap_region_iterate(&cl);
469
470 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumTotal, g1h->num_humongous_objects());
471 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumCandidates, g1h->num_humongous_reclaim_candidates());
472 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumReclaimed, cl.humongous_objects_reclaimed());
473
474 _humongous_regions_reclaimed = cl.humongous_regions_reclaimed();
475 _bytes_freed = cl.bytes_freed();
476 }
477 };
478
479 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
480 size_t _num_dirtied;
481 G1CollectedHeap* _g1h;
482 G1CardTable* _g1_ct;
483 G1EvacFailureRegions* _evac_failure_regions;
484
485 G1HeapRegion* region_for_card(CardValue* card_ptr) const {
486 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
487 }
488
489 bool will_become_free(G1HeapRegion* hr) const {
490 // A region will be freed by during the FreeCollectionSet phase if the region is in the
491 // collection set and has not had an evacuation failure.
492 return _g1h->is_in_cset(hr) && !_evac_failure_regions->contains(hr->hrm_index());
493 }
494
495 public:
496 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h, G1EvacFailureRegions* evac_failure_regions) :
497 G1CardTableEntryClosure(),
498 _num_dirtied(0),
933 }
934
935 double worker_cost() const override {
936 return (double)_claimer.length() / ThreadsPerWorker;
937 }
938 };
939
940 G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
941 G1EvacInfo* evacuation_info,
942 G1EvacFailureRegions* evac_failure_regions) :
943 G1BatchedTask("Post Evacuate Cleanup 2", G1CollectedHeap::heap()->phase_times())
944 {
945 #if COMPILER2_OR_JVMCI
946 add_serial_task(new UpdateDerivedPointersTask());
947 #endif
948 if (G1CollectedHeap::heap()->has_humongous_reclaim_candidates()) {
949 add_serial_task(new EagerlyReclaimHumongousObjectsTask());
950 }
951
952 if (evac_failure_regions->has_regions_evac_failed()) {
953 add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions));
954 }
955 add_parallel_task(new RedirtyLoggedCardsTask(evac_failure_regions,
956 per_thread_states->rdc_buffers(),
957 per_thread_states->num_workers()));
958
959 if (UseTLAB && ResizeTLAB) {
960 add_parallel_task(new ResizeTLABsTask());
961 }
962 add_parallel_task(new FreeCollectionSetTask(evacuation_info,
963 per_thread_states->surviving_young_words(),
964 evac_failure_regions));
965 }
|