1 /* 2 * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHPARALLELCLEANING_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHPARALLELCLEANING_HPP 27 28 #include "classfile/metadataOnStackMark.hpp" 29 #include "classfile/symbolTable.hpp" 30 #include "code/codeCache.hpp" 31 #include "gc_interface/collectedHeap.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "utilities/workgroup.hpp" 34 35 class ShenandoahStringSymbolTableUnlinkTask : public AbstractGangTask { 36 private: 37 BoolObjectClosure* _is_alive; 38 int _initial_string_table_size; 39 int _initial_symbol_table_size; 40 41 bool _process_strings; 42 int _strings_processed; 43 int _strings_removed; 44 45 bool _process_symbols; 46 int _symbols_processed; 47 int _symbols_removed; 48 49 bool _do_in_parallel; 50 public: 51 ShenandoahStringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) : 52 AbstractGangTask("String/Symbol Unlinking"), 53 _is_alive(is_alive), 54 _do_in_parallel(Universe::heap()->use_parallel_gc_threads()), 55 _process_strings(process_strings), _strings_processed(0), _strings_removed(0), 56 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) { 57 58 _initial_string_table_size = StringTable::the_table()->table_size(); 59 _initial_symbol_table_size = SymbolTable::the_table()->table_size(); 60 if (process_strings) { 61 StringTable::clear_parallel_claimed_index(); 62 } 63 if (process_symbols) { 64 SymbolTable::clear_parallel_claimed_index(); 65 } 66 } 67 68 ~ShenandoahStringSymbolTableUnlinkTask() { 69 guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size, 70 err_msg("claim value " INT32_FORMAT " after unlink less than initial string table size " INT32_FORMAT, 71 StringTable::parallel_claimed_index(), _initial_string_table_size)); 72 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, 73 err_msg("claim value " INT32_FORMAT " after unlink less than initial symbol table size " INT32_FORMAT, 74 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size)); 75 76 if (G1TraceStringSymbolTableScrubbing) { 77 gclog_or_tty->print_cr("Cleaned string and symbol table, " 78 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " 79 "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", 80 strings_processed(), strings_removed(), 81 symbols_processed(), symbols_removed()); 82 } 83 } 84 85 void work(uint worker_id) { 86 if (_do_in_parallel) { 87 int strings_processed = 0; 88 int strings_removed = 0; 89 int symbols_processed = 0; 90 int symbols_removed = 0; 91 if (_process_strings) { 92 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed); 93 Atomic::add(strings_processed, &_strings_processed); 94 Atomic::add(strings_removed, &_strings_removed); 95 } 96 if (_process_symbols) { 97 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed); 98 Atomic::add(symbols_processed, &_symbols_processed); 99 Atomic::add(symbols_removed, &_symbols_removed); 100 } 101 } else { 102 if (_process_strings) { 103 StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed); 104 } 105 if (_process_symbols) { 106 SymbolTable::unlink(&_symbols_processed, &_symbols_removed); 107 } 108 } 109 } 110 111 size_t strings_processed() const { return (size_t)_strings_processed; } 112 size_t strings_removed() const { return (size_t)_strings_removed; } 113 114 size_t symbols_processed() const { return (size_t)_symbols_processed; } 115 size_t symbols_removed() const { return (size_t)_symbols_removed; } 116 }; 117 118 class ShenandoahCodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC { 119 private: 120 static Monitor* _lock; 121 122 BoolObjectClosure* const _is_alive; 123 const bool _unloading_occurred; 124 const uint _num_workers; 125 126 // Variables used to claim nmethods. 127 nmethod* _first_nmethod; 128 volatile nmethod* _claimed_nmethod; 129 130 // The list of nmethods that need to be processed by the second pass. 131 volatile nmethod* _postponed_list; 132 volatile uint _num_entered_barrier; 133 134 public: 135 ShenandoahCodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) : 136 _is_alive(is_alive), 137 _unloading_occurred(unloading_occurred), 138 _num_workers(num_workers), 139 _first_nmethod(NULL), 140 _claimed_nmethod(NULL), 141 _postponed_list(NULL), 142 _num_entered_barrier(0) 143 { 144 nmethod::increase_unloading_clock(); 145 _first_nmethod = CodeCache::alive_nmethod(CodeCache::first()); 146 _claimed_nmethod = (volatile nmethod*)_first_nmethod; 147 } 148 149 ~ShenandoahCodeCacheUnloadingTask() { 150 CodeCache::verify_clean_inline_caches(); 151 152 CodeCache::set_needs_cache_clean(false); 153 guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be"); 154 155 CodeCache::verify_icholder_relocations(); 156 } 157 158 private: 159 void add_to_postponed_list(nmethod* nm) { 160 nmethod* old; 161 do { 162 old = (nmethod*)_postponed_list; 163 nm->set_unloading_next(old); 164 } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old); 165 } 166 167 void clean_nmethod(nmethod* nm) { 168 bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred); 169 170 if (postponed) { 171 // This nmethod referred to an nmethod that has not been cleaned/unloaded yet. 172 add_to_postponed_list(nm); 173 } 174 175 // Mark that this thread has been cleaned/unloaded. 176 // After this call, it will be safe to ask if this nmethod was unloaded or not. 177 nm->set_unloading_clock(nmethod::global_unloading_clock()); 178 } 179 180 void clean_nmethod_postponed(nmethod* nm) { 181 nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred); 182 } 183 184 static const int MaxClaimNmethods = 16; 185 186 void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) { 187 nmethod* first; 188 nmethod* last; 189 190 do { 191 *num_claimed_nmethods = 0; 192 193 first = last = (nmethod*)_claimed_nmethod; 194 195 if (first != NULL) { 196 for (int i = 0; i < MaxClaimNmethods; i++) { 197 last = CodeCache::alive_nmethod(CodeCache::next(last)); 198 199 if (last == NULL) { 200 break; 201 } 202 203 claimed_nmethods[i] = last; 204 (*num_claimed_nmethods)++; 205 } 206 } 207 208 } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first); 209 } 210 211 nmethod* claim_postponed_nmethod() { 212 nmethod* claim; 213 nmethod* next; 214 215 do { 216 claim = (nmethod*)_postponed_list; 217 if (claim == NULL) { 218 return NULL; 219 } 220 221 next = claim->unloading_next(); 222 223 } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim); 224 225 return claim; 226 } 227 228 public: 229 // Mark that we're done with the first pass of nmethod cleaning. 230 void barrier_mark(uint worker_id) { 231 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 232 _num_entered_barrier++; 233 if (_num_entered_barrier == _num_workers) { 234 ml.notify_all(); 235 } 236 } 237 238 // See if we have to wait for the other workers to 239 // finish their first-pass nmethod cleaning work. 240 void barrier_wait(uint worker_id) { 241 if (_num_entered_barrier < _num_workers) { 242 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); 243 while (_num_entered_barrier < _num_workers) { 244 ml.wait(Mutex::_no_safepoint_check_flag, 0, false); 245 } 246 } 247 } 248 249 // Cleaning and unloading of nmethods. Some work has to be postponed 250 // to the second pass, when we know which nmethods survive. 251 void work_first_pass(uint worker_id) { 252 // The first nmethods is claimed by the first worker. 253 if (worker_id == 0 && _first_nmethod != NULL) { 254 clean_nmethod(_first_nmethod); 255 _first_nmethod = NULL; 256 } 257 258 int num_claimed_nmethods; 259 nmethod* claimed_nmethods[MaxClaimNmethods]; 260 261 while (true) { 262 claim_nmethods(claimed_nmethods, &num_claimed_nmethods); 263 264 if (num_claimed_nmethods == 0) { 265 break; 266 } 267 268 for (int i = 0; i < num_claimed_nmethods; i++) { 269 clean_nmethod(claimed_nmethods[i]); 270 } 271 } 272 273 // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark. 274 // Need to retire the buffers now that this thread has stopped cleaning nmethods. 275 MetadataOnStackMark::retire_buffer_for_thread(Thread::current()); 276 } 277 278 void work_second_pass(uint worker_id) { 279 nmethod* nm; 280 // Take care of postponed nmethods. 281 while ((nm = claim_postponed_nmethod()) != NULL) { 282 clean_nmethod_postponed(nm); 283 } 284 } 285 }; 286 287 class ShenandoahKlassCleaningTask : public StackObj { 288 BoolObjectClosure* _is_alive; 289 volatile jint _clean_klass_tree_claimed; 290 ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator; 291 292 public: 293 ShenandoahKlassCleaningTask(BoolObjectClosure* is_alive) : 294 _is_alive(is_alive), 295 _clean_klass_tree_claimed(0), 296 _klass_iterator() { 297 } 298 299 private: 300 bool claim_clean_klass_tree_task() { 301 if (_clean_klass_tree_claimed) { 302 return false; 303 } 304 305 return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0; 306 } 307 308 InstanceKlass* claim_next_klass() { 309 Klass* klass; 310 do { 311 klass =_klass_iterator.next_klass(); 312 } while (klass != NULL && !klass->oop_is_instance()); 313 314 return (InstanceKlass*)klass; 315 } 316 317 public: 318 319 void clean_klass(InstanceKlass* ik) { 320 ik->clean_weak_instanceklass_links(_is_alive); 321 322 if (JvmtiExport::has_redefined_a_class()) { 323 InstanceKlass::purge_previous_versions(ik); 324 } 325 } 326 327 void work() { 328 ResourceMark rm; 329 330 // One worker will clean the subklass/sibling klass tree. 331 if (claim_clean_klass_tree_task()) { 332 Klass::clean_subklass_tree(_is_alive); 333 } 334 335 // All workers will help cleaning the classes, 336 InstanceKlass* klass; 337 while ((klass = claim_next_klass()) != NULL) { 338 clean_klass(klass); 339 } 340 } 341 }; 342 343 // To minimize the remark pause times, the tasks below are done in parallel. 344 class ShenandoahParallelCleaningTask : public AbstractGangTask { 345 private: 346 ShenandoahStringSymbolTableUnlinkTask _string_symbol_task; 347 ShenandoahCodeCacheUnloadingTask _code_cache_task; 348 ShenandoahKlassCleaningTask _klass_cleaning_task; 349 350 public: 351 // The constructor is run in the VMThread. 352 ShenandoahParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) : 353 AbstractGangTask("Parallel Cleaning"), 354 _string_symbol_task(is_alive, process_strings, process_symbols), 355 _code_cache_task(num_workers, is_alive, unloading_occurred), 356 _klass_cleaning_task(is_alive) { 357 } 358 359 void pre_work_verification() { 360 // The VM Thread will have registered Metadata during the single-threaded phase of MetadataStackOnMark. 361 assert(Thread::current()->is_VM_thread() 362 || !MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty"); 363 } 364 365 void post_work_verification() { 366 assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty"); 367 } 368 369 // The parallel work done by all worker threads. 370 void work(uint worker_id) { 371 pre_work_verification(); 372 373 // Do first pass of code cache cleaning. 374 _code_cache_task.work_first_pass(worker_id); 375 376 // Let the threads mark that the first pass is done. 377 _code_cache_task.barrier_mark(worker_id); 378 379 // Clean the Strings and Symbols. 380 _string_symbol_task.work(worker_id); 381 382 // Wait for all workers to finish the first code cache cleaning pass. 383 _code_cache_task.barrier_wait(worker_id); 384 385 // Do the second code cache cleaning work, which realize on 386 // the liveness information gathered during the first pass. 387 _code_cache_task.work_second_pass(worker_id); 388 389 // Clean all klasses that were not unloaded. 390 _klass_cleaning_task.work(); 391 392 post_work_verification(); 393 } 394 395 }; 396 397 #endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHPARALLELCLEANING_HPP