23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
27 #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp"
30 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
33 #ifdef COMPILER1
34 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
35 #endif
36 #ifdef COMPILER2
37 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
38 #endif
39
40 class ShenandoahBarrierSetC1;
41 class ShenandoahBarrierSetC2;
42
43 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
44 BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
45 make_barrier_set_c1<ShenandoahBarrierSetC1>(),
46 make_barrier_set_c2<ShenandoahBarrierSetC2>(),
47 ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : NULL,
48 BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
49 _heap(heap),
50 _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
51 _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
52 {
53 }
54
55 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
56 BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
57 return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
58 }
59
60 void ShenandoahBarrierSet::print_on(outputStream* st) const {
61 st->print("ShenandoahBarrierSet");
62 }
63
64 bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) {
65 if (!ShenandoahLoadRefBarrier) return false;
66 // Only needed for references
67 return is_reference_type(type);
68 }
69
70 bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators, BasicType type) {
71 if (!ShenandoahSATBBarrier) return false;
72 // Only needed for references
101 ShenandoahThreadLocalData::initialize_gclab(thread);
102 ShenandoahThreadLocalData::set_disarmed_value(thread, ShenandoahCodeRoots::disarmed_value());
103
104 if (ShenandoahStackWatermarkBarrier) {
105 JavaThread* const jt = JavaThread::cast(thread);
106 StackWatermark* const watermark = new ShenandoahStackWatermark(jt);
107 StackWatermarkSet::add_watermark(jt, watermark);
108 }
109 }
110 }
111
112 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
113 SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
114 _satb_mark_queue_set.flush_queue(queue);
115 if (thread->is_Java_thread()) {
116 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
117 if (gclab != NULL) {
118 gclab->retire();
119 }
120
121 // SATB protocol requires to keep alive reacheable oops from roots at the beginning of GC
122 if (ShenandoahStackWatermarkBarrier) {
123 if (_heap->is_concurrent_mark_in_progress()) {
124 ShenandoahKeepAliveClosure oops;
125 StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc);
126 } else if (_heap->is_concurrent_weak_root_in_progress() && _heap->is_evacuation_in_progress()) {
127 ShenandoahContextEvacuateUpdateRootsClosure oops;
128 StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc);
129 }
130 }
131 }
132 }
133
134 void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
135 if (_heap->has_forwarded_objects() || (ShenandoahIUBarrier && _heap->is_concurrent_mark_in_progress())) {
136 clone_barrier(src);
137 }
138 }
|
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
27 #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp"
30 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
33 #ifdef COMPILER1
34 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
35 #endif
36 #ifdef COMPILER2
37 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
38 #endif
39
40 class ShenandoahBarrierSetC1;
41 class ShenandoahBarrierSetC2;
42
43 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap, MemRegion heap_region) :
44 BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
45 make_barrier_set_c1<ShenandoahBarrierSetC1>(),
46 make_barrier_set_c2<ShenandoahBarrierSetC2>(),
47 ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : NULL,
48 BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
49 _heap(heap),
50 _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
51 _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
52 {
53 if (heap->mode()->is_generational()) {
54 _card_table = new ShenandoahCardTable(heap_region);
55 _card_table->initialize();
56 }
57 }
58
59 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
60 BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
61 return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
62 }
63
64 void ShenandoahBarrierSet::print_on(outputStream* st) const {
65 st->print("ShenandoahBarrierSet");
66 }
67
68 bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) {
69 if (!ShenandoahLoadRefBarrier) return false;
70 // Only needed for references
71 return is_reference_type(type);
72 }
73
74 bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators, BasicType type) {
75 if (!ShenandoahSATBBarrier) return false;
76 // Only needed for references
105 ShenandoahThreadLocalData::initialize_gclab(thread);
106 ShenandoahThreadLocalData::set_disarmed_value(thread, ShenandoahCodeRoots::disarmed_value());
107
108 if (ShenandoahStackWatermarkBarrier) {
109 JavaThread* const jt = JavaThread::cast(thread);
110 StackWatermark* const watermark = new ShenandoahStackWatermark(jt);
111 StackWatermarkSet::add_watermark(jt, watermark);
112 }
113 }
114 }
115
116 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
117 SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
118 _satb_mark_queue_set.flush_queue(queue);
119 if (thread->is_Java_thread()) {
120 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
121 if (gclab != NULL) {
122 gclab->retire();
123 }
124
125 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
126 // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.
127 // This is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each
128 // PLAB is aligned with the start of each card's memory range.
129 if (plab != NULL) {
130 _heap->retire_plab(plab);
131 }
132
133 // SATB protocol requires to keep alive reacheable oops from roots at the beginning of GC
134 if (ShenandoahStackWatermarkBarrier) {
135 if (_heap->is_concurrent_mark_in_progress()) {
136 ShenandoahKeepAliveClosure oops;
137 StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc);
138 } else if (_heap->is_concurrent_weak_root_in_progress() && _heap->is_evacuation_in_progress()) {
139 ShenandoahContextEvacuateUpdateRootsClosure oops;
140 StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc);
141 }
142 }
143 }
144 }
145
146 void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
147 if (_heap->has_forwarded_objects() || (ShenandoahIUBarrier && _heap->is_concurrent_mark_in_progress())) {
148 clone_barrier(src);
149 }
150 }
151
152 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
153 if (!_heap->mode()->is_generational()) {
154 return;
155 }
156
157 HeapWord* end = (HeapWord*)((char*) start + (count * heapOopSize));
158 // In the case of compressed oops, start and end may potentially be misaligned;
159 // so we need to conservatively align the first downward (this is not
160 // strictly necessary for current uses, but a case of good hygiene and,
161 // if you will, aesthetics) and the second upward (this is essential for
162 // current uses) to a HeapWord boundary, so we mark all cards overlapping
163 // this write.
164 HeapWord* aligned_start = align_down(start, HeapWordSize);
165 HeapWord* aligned_end = align_up (end, HeapWordSize);
166 // If compressed oops were not being used, these should already be aligned
167 assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
168 "Expected heap word alignment of start and end");
169 _heap->card_scan()->mark_range_as_dirty(aligned_start, (aligned_end - aligned_start));
170 }
171
|