1 /* 2 * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/archiveBuilder.hpp" 26 #include "cds/archiveHeapLoader.inline.hpp" 27 #include "cds/archiveUtils.hpp" 28 #include "cds/cdsConfig.hpp" 29 #include "cds/classListParser.hpp" 30 #include "cds/classListWriter.hpp" 31 #include "cds/dynamicArchive.hpp" 32 #include "cds/filemap.hpp" 33 #include "cds/heapShared.hpp" 34 #include "cds/lambdaProxyClassDictionary.hpp" 35 #include "cds/metaspaceShared.hpp" 36 #include "classfile/systemDictionaryShared.hpp" 37 #include "classfile/vmClasses.hpp" 38 #include "interpreter/bootstrapInfo.hpp" 39 #include "memory/metaspaceUtils.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "oops/compressedOops.inline.hpp" 42 #include "oops/klass.inline.hpp" 43 #include "runtime/arguments.hpp" 44 #include "utilities/bitMap.inline.hpp" 45 #include "utilities/debug.hpp" 46 #include "utilities/formatBuffer.hpp" 47 #include "utilities/globalDefinitions.hpp" 48 #include "utilities/spinYield.hpp" 49 50 CHeapBitMap* ArchivePtrMarker::_ptrmap = nullptr; 51 CHeapBitMap* ArchivePtrMarker::_rw_ptrmap = nullptr; 52 CHeapBitMap* ArchivePtrMarker::_ro_ptrmap = nullptr; 53 VirtualSpace* ArchivePtrMarker::_vs; 54 55 bool ArchivePtrMarker::_compacted; 56 57 void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) { 58 assert(_ptrmap == nullptr, "initialize only once"); 59 assert(_rw_ptrmap == nullptr, "initialize only once"); 60 assert(_ro_ptrmap == nullptr, "initialize only once"); 61 _vs = vs; 62 _compacted = false; 63 _ptrmap = ptrmap; 64 65 // Use this as initial guesstimate. We should need less space in the 66 // archive, but if we're wrong the bitmap will be expanded automatically. 67 size_t estimated_archive_size = MetaspaceGC::capacity_until_GC(); 68 // But set it smaller in debug builds so we always test the expansion code. 69 // (Default archive is about 12MB). 70 DEBUG_ONLY(estimated_archive_size = 6 * M); 71 72 // We need one bit per pointer in the archive. 73 _ptrmap->initialize(estimated_archive_size / sizeof(intptr_t)); 74 } 75 76 void ArchivePtrMarker::initialize_rw_ro_maps(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap) { 77 address* buff_bottom = (address*)ArchiveBuilder::current()->buffer_bottom(); 78 address* rw_bottom = (address*)ArchiveBuilder::current()->rw_region()->base(); 79 address* ro_bottom = (address*)ArchiveBuilder::current()->ro_region()->base(); 80 81 // The bit in _ptrmap that cover the very first word in the rw/ro regions. 82 size_t rw_start = rw_bottom - buff_bottom; 83 size_t ro_start = ro_bottom - buff_bottom; 84 85 // The number of bits used by the rw/ro ptrmaps. We might have lots of zero 86 // bits at the bottom and top of rw/ro ptrmaps, but these zeros will be 87 // removed by FileMapInfo::write_bitmap_region(). 88 size_t rw_size = ArchiveBuilder::current()->rw_region()->used() / sizeof(address); 89 size_t ro_size = ArchiveBuilder::current()->ro_region()->used() / sizeof(address); 90 91 // The last (exclusive) bit in _ptrmap that covers the rw/ro regions. 92 // Note: _ptrmap is dynamically expanded only when an actual pointer is written, so 93 // it may not be as large as we want. 94 size_t rw_end = MIN2<size_t>(rw_start + rw_size, _ptrmap->size()); 95 size_t ro_end = MIN2<size_t>(ro_start + ro_size, _ptrmap->size()); 96 97 rw_ptrmap->initialize(rw_size); 98 ro_ptrmap->initialize(ro_size); 99 100 for (size_t rw_bit = rw_start; rw_bit < rw_end; rw_bit++) { 101 rw_ptrmap->at_put(rw_bit - rw_start, _ptrmap->at(rw_bit)); 102 } 103 104 for(size_t ro_bit = ro_start; ro_bit < ro_end; ro_bit++) { 105 ro_ptrmap->at_put(ro_bit - ro_start, _ptrmap->at(ro_bit)); 106 } 107 108 _rw_ptrmap = rw_ptrmap; 109 _ro_ptrmap = ro_ptrmap; 110 } 111 112 void ArchivePtrMarker::mark_pointer(address* ptr_loc) { 113 assert(_ptrmap != nullptr, "not initialized"); 114 assert(!_compacted, "cannot mark anymore"); 115 116 if (ptr_base() <= ptr_loc && ptr_loc < ptr_end()) { 117 address value = *ptr_loc; 118 // We don't want any pointer that points to very bottom of the archive, otherwise when 119 // MetaspaceShared::default_base_address()==0, we can't distinguish between a pointer 120 // to nothing (null) vs a pointer to an objects that happens to be at the very bottom 121 // of the archive. 122 assert(value != (address)ptr_base(), "don't point to the bottom of the archive"); 123 124 if (value != nullptr) { 125 assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses"); 126 size_t idx = ptr_loc - ptr_base(); 127 if (_ptrmap->size() <= idx) { 128 _ptrmap->resize((idx + 1) * 2); 129 } 130 assert(idx < _ptrmap->size(), "must be"); 131 _ptrmap->set_bit(idx); 132 //tty->print_cr("Marking pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ %5zu", p2i(ptr_loc), p2i(*ptr_loc), idx); 133 } 134 } 135 } 136 137 void ArchivePtrMarker::clear_pointer(address* ptr_loc) { 138 assert(_ptrmap != nullptr, "not initialized"); 139 assert(!_compacted, "cannot clear anymore"); 140 141 assert(ptr_base() <= ptr_loc && ptr_loc < ptr_end(), "must be"); 142 assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses"); 143 size_t idx = ptr_loc - ptr_base(); 144 assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked"); 145 _ptrmap->clear_bit(idx); 146 //tty->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ %5zu", p2i(ptr_loc), p2i(*ptr_loc), idx); 147 } 148 149 class ArchivePtrBitmapCleaner: public BitMapClosure { 150 CHeapBitMap* _ptrmap; 151 address* _ptr_base; 152 address _relocatable_base; 153 address _relocatable_end; 154 size_t _max_non_null_offset; 155 156 public: 157 ArchivePtrBitmapCleaner(CHeapBitMap* ptrmap, address* ptr_base, address relocatable_base, address relocatable_end) : 158 _ptrmap(ptrmap), _ptr_base(ptr_base), 159 _relocatable_base(relocatable_base), _relocatable_end(relocatable_end), _max_non_null_offset(0) {} 160 161 bool do_bit(size_t offset) { 162 address* ptr_loc = _ptr_base + offset; 163 address ptr_value = *ptr_loc; 164 if (ptr_value != nullptr) { 165 assert(_relocatable_base <= ptr_value && ptr_value < _relocatable_end, "do not point to arbitrary locations!"); 166 if (_max_non_null_offset < offset) { 167 _max_non_null_offset = offset; 168 } 169 } else { 170 _ptrmap->clear_bit(offset); 171 DEBUG_ONLY(log_trace(cds, reloc)("Clearing pointer [" PTR_FORMAT "] -> null @ %9zu", p2i(ptr_loc), offset)); 172 } 173 174 return true; 175 } 176 177 size_t max_non_null_offset() const { return _max_non_null_offset; } 178 }; 179 180 void ArchivePtrMarker::compact(address relocatable_base, address relocatable_end) { 181 assert(!_compacted, "cannot compact again"); 182 ArchivePtrBitmapCleaner cleaner(_ptrmap, ptr_base(), relocatable_base, relocatable_end); 183 _ptrmap->iterate(&cleaner); 184 compact(cleaner.max_non_null_offset()); 185 } 186 187 void ArchivePtrMarker::compact(size_t max_non_null_offset) { 188 assert(!_compacted, "cannot compact again"); 189 _ptrmap->resize(max_non_null_offset + 1); 190 _compacted = true; 191 } 192 193 char* DumpRegion::expand_top_to(char* newtop) { 194 assert(is_allocatable(), "must be initialized and not packed"); 195 assert(newtop >= _top, "must not grow backwards"); 196 if (newtop > _end) { 197 ArchiveBuilder::current()->report_out_of_space(_name, newtop - _top); 198 ShouldNotReachHere(); 199 } 200 201 commit_to(newtop); 202 _top = newtop; 203 204 if (_max_delta > 0) { 205 uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1)); 206 if (delta > _max_delta) { 207 // This is just a sanity check and should not appear in any real world usage. This 208 // happens only if you allocate more than 2GB of shared objects and would require 209 // millions of shared classes. 210 log_error(cds)("Out of memory in the CDS archive: Please reduce the number of shared classes."); 211 MetaspaceShared::unrecoverable_writing_error(); 212 } 213 } 214 215 return _top; 216 } 217 218 void DumpRegion::commit_to(char* newtop) { 219 assert(CDSConfig::is_dumping_archive(), "sanity"); 220 char* base = _rs->base(); 221 size_t need_committed_size = newtop - base; 222 size_t has_committed_size = _vs->committed_size(); 223 if (need_committed_size < has_committed_size) { 224 return; 225 } 226 227 size_t min_bytes = need_committed_size - has_committed_size; 228 size_t preferred_bytes = 1 * M; 229 size_t uncommitted = _vs->reserved_size() - has_committed_size; 230 231 size_t commit = MAX2(min_bytes, preferred_bytes); 232 commit = MIN2(commit, uncommitted); 233 assert(commit <= uncommitted, "sanity"); 234 235 if (!_vs->expand_by(commit, false)) { 236 log_error(cds)("Failed to expand shared space to %zu bytes", 237 need_committed_size); 238 MetaspaceShared::unrecoverable_writing_error(); 239 } 240 241 const char* which; 242 if (_rs->base() == (char*)MetaspaceShared::symbol_rs_base()) { 243 which = "symbol"; 244 } else { 245 which = "shared"; 246 } 247 log_debug(cds)("Expanding %s spaces by %7zu bytes [total %9zu bytes ending at %p]", 248 which, commit, _vs->actual_committed_size(), _vs->high()); 249 } 250 251 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { 252 // Always align to at least minimum alignment 253 alignment = MAX2(SharedSpaceObjectAlignment, alignment); 254 char* p = (char*)align_up(_top, alignment); 255 char* newtop = p + align_up(num_bytes, (size_t)SharedSpaceObjectAlignment); 256 expand_top_to(newtop); 257 memset(p, 0, newtop - p); 258 return p; 259 } 260 261 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { 262 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); 263 intptr_t *p = (intptr_t*)_top; 264 char* newtop = _top + sizeof(intptr_t); 265 expand_top_to(newtop); 266 *p = n; 267 if (need_to_mark) { 268 ArchivePtrMarker::mark_pointer(p); 269 } 270 } 271 272 void DumpRegion::print(size_t total_bytes) const { 273 log_debug(cds)("%s space: %9zu [ %4.1f%% of total] out of %9zu bytes [%5.1f%% used] at " INTPTR_FORMAT, 274 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), 275 p2i(ArchiveBuilder::current()->to_requested(_base))); 276 } 277 278 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { 279 log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d", 280 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); 281 if (strcmp(_name, failing_region) == 0) { 282 log_error(cds)(" required = %d", int(needed_bytes)); 283 } 284 } 285 286 void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) { 287 _rs = rs; 288 _vs = vs; 289 // Start with 0 committed bytes. The memory will be committed as needed. 290 if (!_vs->initialize(*_rs, 0)) { 291 fatal("Unable to allocate memory for shared space"); 292 } 293 _base = _top = _rs->base(); 294 _end = _rs->end(); 295 } 296 297 void DumpRegion::pack(DumpRegion* next) { 298 assert(!is_packed(), "sanity"); 299 _end = (char*)align_up(_top, MetaspaceShared::core_region_alignment()); 300 _is_packed = true; 301 if (next != nullptr) { 302 next->_rs = _rs; 303 next->_vs = _vs; 304 next->_base = next->_top = this->_end; 305 next->_end = _rs->end(); 306 } 307 } 308 309 void WriteClosure::do_ptr(void** p) { 310 // Write ptr into the archive; ptr can be: 311 // (a) null -> written as 0 312 // (b) a "buffered" address -> written as is 313 // (c) a "source" address -> convert to "buffered" and write 314 // The common case is (c). E.g., when writing the vmClasses into the archive. 315 // We have (b) only when we don't have a corresponding source object. E.g., 316 // the archived c++ vtable entries. 317 address ptr = *(address*)p; 318 if (ptr != nullptr && !ArchiveBuilder::current()->is_in_buffer_space(ptr)) { 319 ptr = ArchiveBuilder::current()->get_buffered_addr(ptr); 320 } 321 // null pointers do not need to be converted to offsets 322 if (ptr != nullptr) { 323 ptr = (address)ArchiveBuilder::current()->buffer_to_offset(ptr); 324 } 325 _dump_region->append_intptr_t((intptr_t)ptr, false); 326 } 327 328 void ReadClosure::do_ptr(void** p) { 329 assert(*p == nullptr, "initializing previous initialized pointer."); 330 intptr_t obj = nextPtr(); 331 assert(obj >= 0, "sanity."); 332 *p = (obj != 0) ? (void*)(_base_address + obj) : (void*)obj; 333 } 334 335 void ReadClosure::do_u4(u4* p) { 336 intptr_t obj = nextPtr(); 337 *p = (u4)(uintx(obj)); 338 } 339 340 void ReadClosure::do_int(int* p) { 341 intptr_t obj = nextPtr(); 342 *p = (int)(intx(obj)); 343 } 344 345 void ReadClosure::do_bool(bool* p) { 346 intptr_t obj = nextPtr(); 347 *p = (bool)(uintx(obj)); 348 } 349 350 void ReadClosure::do_tag(int tag) { 351 int old_tag; 352 old_tag = (int)(intptr_t)nextPtr(); 353 // do_int(&old_tag); 354 assert(tag == old_tag, "tag doesn't match (%d, expected %d)", old_tag, tag); 355 FileMapInfo::assert_mark(tag == old_tag); 356 } 357 358 void ArchiveUtils::log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) { 359 if (ClassListWriter::is_enabled()) { 360 if (LambdaProxyClassDictionary::is_supported_invokedynamic(bootstrap_specifier)) { 361 const constantPoolHandle& pool = bootstrap_specifier->pool(); 362 if (SystemDictionaryShared::is_builtin_loader(pool->pool_holder()->class_loader_data())) { 363 // Currently lambda proxy classes are supported only for the built-in loaders. 364 ResourceMark rm(THREAD); 365 int pool_index = bootstrap_specifier->bss_index(); 366 ClassListWriter w; 367 w.stream()->print("%s %s", ClassListParser::lambda_proxy_tag(), pool->pool_holder()->name()->as_C_string()); 368 CDSIndyInfo cii; 369 ClassListParser::populate_cds_indy_info(pool, pool_index, &cii, CHECK); 370 GrowableArray<const char*>* indy_items = cii.items(); 371 for (int i = 0; i < indy_items->length(); i++) { 372 w.stream()->print(" %s", indy_items->at(i)); 373 } 374 w.stream()->cr(); 375 } 376 } 377 } 378 } 379 380 bool ArchiveUtils::has_aot_initialized_mirror(InstanceKlass* src_ik) { 381 if (SystemDictionaryShared::is_excluded_class(src_ik)) { 382 assert(!ArchiveBuilder::current()->has_been_buffered(src_ik), "sanity"); 383 return false; 384 } 385 return ArchiveBuilder::current()->get_buffered_addr(src_ik)->has_aot_initialized_mirror(); 386 } 387 388 size_t HeapRootSegments::size_in_bytes(size_t seg_idx) { 389 assert(seg_idx < _count, "In range"); 390 return objArrayOopDesc::object_size(size_in_elems(seg_idx)) * HeapWordSize; 391 } 392 393 int HeapRootSegments::size_in_elems(size_t seg_idx) { 394 assert(seg_idx < _count, "In range"); 395 if (seg_idx != _count - 1) { 396 return _max_size_in_elems; 397 } else { 398 // Last slice, leftover 399 return _roots_count % _max_size_in_elems; 400 } 401 } 402 403 size_t HeapRootSegments::segment_offset(size_t seg_idx) { 404 assert(seg_idx < _count, "In range"); 405 return _base_offset + seg_idx * _max_size_in_bytes; 406 } 407 408 ArchiveWorkers::ArchiveWorkers() : 409 _end_semaphore(0), 410 _num_workers(max_workers()), 411 _started_workers(0), 412 _finish_tokens(0), 413 _state(UNUSED), 414 _task(nullptr) {} 415 416 ArchiveWorkers::~ArchiveWorkers() { 417 assert(Atomic::load(&_state) != WORKING, "Should not be working"); 418 } 419 420 int ArchiveWorkers::max_workers() { 421 // The pool is used for short-lived bursty tasks. We do not want to spend 422 // too much time creating and waking up threads unnecessarily. Plus, we do 423 // not want to overwhelm large machines. This is why we want to be very 424 // conservative about the number of workers actually needed. 425 return MAX2(0, log2i_graceful(os::active_processor_count())); 426 } 427 428 bool ArchiveWorkers::is_parallel() { 429 return _num_workers > 0; 430 } 431 432 void ArchiveWorkers::start_worker_if_needed() { 433 while (true) { 434 int cur = Atomic::load(&_started_workers); 435 if (cur >= _num_workers) { 436 return; 437 } 438 if (Atomic::cmpxchg(&_started_workers, cur, cur + 1, memory_order_relaxed) == cur) { 439 new ArchiveWorkerThread(this); 440 return; 441 } 442 } 443 } 444 445 void ArchiveWorkers::run_task(ArchiveWorkerTask* task) { 446 assert(Atomic::load(&_state) == UNUSED, "Should be unused yet"); 447 assert(Atomic::load(&_task) == nullptr, "Should not have running tasks"); 448 Atomic::store(&_state, WORKING); 449 450 if (is_parallel()) { 451 run_task_multi(task); 452 } else { 453 run_task_single(task); 454 } 455 456 assert(Atomic::load(&_state) == WORKING, "Should be working"); 457 Atomic::store(&_state, SHUTDOWN); 458 } 459 460 void ArchiveWorkers::run_task_single(ArchiveWorkerTask* task) { 461 // Single thread needs no chunking. 462 task->configure_max_chunks(1); 463 464 // Execute the task ourselves, as there are no workers. 465 task->work(0, 1); 466 } 467 468 void ArchiveWorkers::run_task_multi(ArchiveWorkerTask* task) { 469 // Multiple threads can work with multiple chunks. 470 task->configure_max_chunks(_num_workers * CHUNKS_PER_WORKER); 471 472 // Set up the run and publish the task. Issue one additional finish token 473 // to cover the semaphore shutdown path, see below. 474 Atomic::store(&_finish_tokens, _num_workers + 1); 475 Atomic::release_store(&_task, task); 476 477 // Kick off pool startup by starting a single worker, and proceed 478 // immediately to executing the task locally. 479 start_worker_if_needed(); 480 481 // Execute the task ourselves, while workers are catching up. 482 // This allows us to hide parts of task handoff latency. 483 task->run(); 484 485 // Done executing task locally, wait for any remaining workers to complete. 486 // Once all workers report, we can proceed to termination. To do this safely, 487 // we need to make sure every worker has left. A spin-wait alone would suffice, 488 // but we do not want to burn cycles on it. A semaphore alone would not be safe, 489 // since workers can still be inside it as we proceed from wait here. So we block 490 // on semaphore first, and then spin-wait for all workers to terminate. 491 _end_semaphore.wait(); 492 SpinYield spin; 493 while (Atomic::load(&_finish_tokens) != 0) { 494 spin.wait(); 495 } 496 497 OrderAccess::fence(); 498 499 assert(Atomic::load(&_finish_tokens) == 0, "All tokens are consumed"); 500 } 501 502 void ArchiveWorkers::run_as_worker() { 503 assert(is_parallel(), "Should be in parallel mode"); 504 505 ArchiveWorkerTask* task = Atomic::load_acquire(&_task); 506 task->run(); 507 508 // All work done in threads should be visible to caller. 509 OrderAccess::fence(); 510 511 // Signal the pool the work is complete, and we are exiting. 512 // Worker cannot do anything else with the pool after this. 513 if (Atomic::sub(&_finish_tokens, 1, memory_order_relaxed) == 1) { 514 // Last worker leaving. Notify the pool it can unblock to spin-wait. 515 // Then consume the last token and leave. 516 _end_semaphore.signal(); 517 int last = Atomic::sub(&_finish_tokens, 1, memory_order_relaxed); 518 assert(last == 0, "Should be"); 519 } 520 } 521 522 void ArchiveWorkerTask::run() { 523 while (true) { 524 int chunk = Atomic::load(&_chunk); 525 if (chunk >= _max_chunks) { 526 return; 527 } 528 if (Atomic::cmpxchg(&_chunk, chunk, chunk + 1, memory_order_relaxed) == chunk) { 529 assert(0 <= chunk && chunk < _max_chunks, "Sanity"); 530 work(chunk, _max_chunks); 531 } 532 } 533 } 534 535 void ArchiveWorkerTask::configure_max_chunks(int max_chunks) { 536 if (_max_chunks == 0) { 537 _max_chunks = max_chunks; 538 } 539 } 540 541 ArchiveWorkerThread::ArchiveWorkerThread(ArchiveWorkers* pool) : NamedThread(), _pool(pool) { 542 set_name("ArchiveWorkerThread"); 543 if (os::create_thread(this, os::os_thread)) { 544 os::start_thread(this); 545 } else { 546 vm_exit_during_initialization("Unable to create archive worker", 547 os::native_thread_creation_failed_msg()); 548 } 549 } 550 551 void ArchiveWorkerThread::run() { 552 // Avalanche startup: each worker starts two others. 553 _pool->start_worker_if_needed(); 554 _pool->start_worker_if_needed(); 555 556 // Set ourselves up. 557 os::set_priority(this, NearMaxPriority); 558 559 // Work. 560 _pool->run_as_worker(); 561 } 562 563 void ArchiveWorkerThread::post_run() { 564 this->NamedThread::post_run(); 565 delete this; 566 }