< prev index next >

src/hotspot/share/gc/parallel/psCompactionManager.cpp

Print this page

102 
103   _shadow_region_array = new (mtGC) GrowableArray<size_t >(10, mtGC);
104 
105   _shadow_region_monitor = new Monitor(Mutex::nosafepoint, "CompactionManager_lock");
106 }
107 
108 void ParCompactionManager::flush_all_string_dedup_requests() {
109   uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
110   for (uint i=0; i<parallel_gc_threads; i++) {
111     _manager_array[i]->flush_string_dedup_requests();
112   }
113 }
114 
115 ParCompactionManager*
116 ParCompactionManager::gc_thread_compaction_manager(uint index) {
117   assert(index < ParallelGCThreads, "index out of range");
118   assert(_manager_array != nullptr, "Sanity");
119   return _manager_array[index];
120 }
121 
122 void ParCompactionManager::push_objArray(oop obj) {
123   assert(obj->is_objArray(), "precondition");
124   _mark_and_push_closure.do_klass(obj->klass());
125 
126   objArrayOop obj_array = objArrayOop(obj);
127   size_t array_length = obj_array->length();
128   size_t initial_chunk_size =
129     _partial_array_splitter.start(&_marking_stack, obj_array, nullptr, array_length);
130   follow_array(obj_array, 0, initial_chunk_size);
131 }
132 
133 void ParCompactionManager::process_array_chunk(PartialArrayState* state, bool stolen) {
134   // Access before release by claim().
135   oop obj = state->source();
136   PartialArraySplitter::Claim claim =
137     _partial_array_splitter.claim(state, &_marking_stack, stolen);
138   follow_array(objArrayOop(obj), claim._start, claim._end);
139 }
140 
141 void ParCompactionManager::follow_marking_stacks() {
142   ScannerTask task;
143   do {
144     // First, try to move tasks from the overflow stack into the shared buffer, so
145     // that other threads can steal. Otherwise process the overflow stack first.
146     while (marking_stack()->pop_overflow(task)) {
147       if (!marking_stack()->try_push_to_taskqueue(task)) {
148         follow_contents(task, false);
149       }
150     }

102 
103   _shadow_region_array = new (mtGC) GrowableArray<size_t >(10, mtGC);
104 
105   _shadow_region_monitor = new Monitor(Mutex::nosafepoint, "CompactionManager_lock");
106 }
107 
108 void ParCompactionManager::flush_all_string_dedup_requests() {
109   uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
110   for (uint i=0; i<parallel_gc_threads; i++) {
111     _manager_array[i]->flush_string_dedup_requests();
112   }
113 }
114 
115 ParCompactionManager*
116 ParCompactionManager::gc_thread_compaction_manager(uint index) {
117   assert(index < ParallelGCThreads, "index out of range");
118   assert(_manager_array != nullptr, "Sanity");
119   return _manager_array[index];
120 }
121 
122 void ParCompactionManager::push_objArray(objArrayOop obj) {
123   assert(obj->is_array_with_oops(), "precondition");
124   _mark_and_push_closure.do_klass(obj->klass());
125 
126   size_t array_length = obj->length();

127   size_t initial_chunk_size =
128     _partial_array_splitter.start(&_marking_stack, obj, nullptr, array_length);
129   follow_array(obj, 0, initial_chunk_size);
130 }
131 
132 void ParCompactionManager::process_array_chunk(PartialArrayState* state, bool stolen) {
133   // Access before release by claim().
134   oop obj = state->source();
135   PartialArraySplitter::Claim claim =
136     _partial_array_splitter.claim(state, &_marking_stack, stolen);
137   follow_array(objArrayOop(obj), claim._start, claim._end);
138 }
139 
140 void ParCompactionManager::follow_marking_stacks() {
141   ScannerTask task;
142   do {
143     // First, try to move tasks from the overflow stack into the shared buffer, so
144     // that other threads can steal. Otherwise process the overflow stack first.
145     while (marking_stack()->pop_overflow(task)) {
146       if (!marking_stack()->try_push_to_taskqueue(task)) {
147         follow_contents(task, false);
148       }
149     }
< prev index next >