1 /*
2 * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotLogging.hpp"
26 #include "cds/aotMetaspace.hpp"
27 #include "cds/archiveBuilder.hpp"
28 #include "cds/archiveHeapLoader.inline.hpp"
29 #include "cds/archiveUtils.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/classListParser.hpp"
32 #include "cds/classListWriter.hpp"
33 #include "cds/dynamicArchive.hpp"
34 #include "cds/filemap.hpp"
35 #include "cds/heapShared.hpp"
36 #include "cds/lambdaProxyClassDictionary.hpp"
37 #include "classfile/systemDictionaryShared.hpp"
38 #include "classfile/vmClasses.hpp"
39 #include "interpreter/bootstrapInfo.hpp"
40 #include "memory/metaspaceUtils.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "oops/compressedOops.inline.hpp"
43 #include "oops/klass.inline.hpp"
44 #include "runtime/arguments.hpp"
45 #include "utilities/bitMap.inline.hpp"
46 #include "utilities/debug.hpp"
47 #include "utilities/formatBuffer.hpp"
48 #include "utilities/globalDefinitions.hpp"
49 #include "utilities/spinYield.hpp"
50
51 CHeapBitMap* ArchivePtrMarker::_ptrmap = nullptr;
52 CHeapBitMap* ArchivePtrMarker::_rw_ptrmap = nullptr;
53 CHeapBitMap* ArchivePtrMarker::_ro_ptrmap = nullptr;
54 VirtualSpace* ArchivePtrMarker::_vs;
55
56 bool ArchivePtrMarker::_compacted;
57
58 void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) {
59 assert(_ptrmap == nullptr, "initialize only once");
60 assert(_rw_ptrmap == nullptr, "initialize only once");
61 assert(_ro_ptrmap == nullptr, "initialize only once");
62 _vs = vs;
63 _compacted = false;
64 _ptrmap = ptrmap;
65
66 // Use this as initial guesstimate. We should need less space in the
67 // archive, but if we're wrong the bitmap will be expanded automatically.
68 size_t estimated_archive_size = MetaspaceGC::capacity_until_GC();
69 // But set it smaller in debug builds so we always test the expansion code.
70 // (Default archive is about 12MB).
71 DEBUG_ONLY(estimated_archive_size = 6 * M);
72
73 // We need one bit per pointer in the archive.
74 _ptrmap->initialize(estimated_archive_size / sizeof(intptr_t));
75 }
76
77 void ArchivePtrMarker::initialize_rw_ro_maps(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap) {
78 address* buff_bottom = (address*)ArchiveBuilder::current()->buffer_bottom();
79 address* rw_bottom = (address*)ArchiveBuilder::current()->rw_region()->base();
80 address* ro_bottom = (address*)ArchiveBuilder::current()->ro_region()->base();
81
82 // The bit in _ptrmap that cover the very first word in the rw/ro regions.
83 size_t rw_start = rw_bottom - buff_bottom;
84 size_t ro_start = ro_bottom - buff_bottom;
85
86 // The number of bits used by the rw/ro ptrmaps. We might have lots of zero
87 // bits at the bottom and top of rw/ro ptrmaps, but these zeros will be
88 // removed by FileMapInfo::write_bitmap_region().
89 size_t rw_size = ArchiveBuilder::current()->rw_region()->used() / sizeof(address);
90 size_t ro_size = ArchiveBuilder::current()->ro_region()->used() / sizeof(address);
91
92 // The last (exclusive) bit in _ptrmap that covers the rw/ro regions.
93 // Note: _ptrmap is dynamically expanded only when an actual pointer is written, so
94 // it may not be as large as we want.
95 size_t rw_end = MIN2<size_t>(rw_start + rw_size, _ptrmap->size());
96 size_t ro_end = MIN2<size_t>(ro_start + ro_size, _ptrmap->size());
97
98 rw_ptrmap->initialize(rw_size);
99 ro_ptrmap->initialize(ro_size);
100
101 for (size_t rw_bit = rw_start; rw_bit < rw_end; rw_bit++) {
102 rw_ptrmap->at_put(rw_bit - rw_start, _ptrmap->at(rw_bit));
103 }
104
105 for(size_t ro_bit = ro_start; ro_bit < ro_end; ro_bit++) {
106 ro_ptrmap->at_put(ro_bit - ro_start, _ptrmap->at(ro_bit));
107 }
108
109 _rw_ptrmap = rw_ptrmap;
110 _ro_ptrmap = ro_ptrmap;
111 }
112
113 void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
114 assert(_ptrmap != nullptr, "not initialized");
115 assert(!_compacted, "cannot mark anymore");
116
117 if (ptr_base() <= ptr_loc && ptr_loc < ptr_end()) {
118 address value = *ptr_loc;
119 // We don't want any pointer that points to very bottom of the archive, otherwise when
120 // AOTMetaspace::default_base_address()==0, we can't distinguish between a pointer
121 // to nothing (null) vs a pointer to an objects that happens to be at the very bottom
122 // of the archive.
123 assert(value != (address)ptr_base(), "don't point to the bottom of the archive");
124
125 if (value != nullptr) {
126 assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
127 size_t idx = ptr_loc - ptr_base();
128 if (_ptrmap->size() <= idx) {
129 _ptrmap->resize((idx + 1) * 2);
130 }
131 assert(idx < _ptrmap->size(), "must be");
132 _ptrmap->set_bit(idx);
133 //tty->print_cr("Marking pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ %5zu", p2i(ptr_loc), p2i(*ptr_loc), idx);
134 }
135 }
136 }
137
138 void ArchivePtrMarker::clear_pointer(address* ptr_loc) {
139 assert(_ptrmap != nullptr, "not initialized");
140 assert(!_compacted, "cannot clear anymore");
141
142 assert(ptr_base() <= ptr_loc && ptr_loc < ptr_end(), "must be");
143 assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
144 size_t idx = ptr_loc - ptr_base();
145 assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked");
146 _ptrmap->clear_bit(idx);
147 //tty->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ %5zu", p2i(ptr_loc), p2i(*ptr_loc), idx);
148 }
149
150 class ArchivePtrBitmapCleaner: public BitMapClosure {
151 CHeapBitMap* _ptrmap;
152 address* _ptr_base;
153 address _relocatable_base;
154 address _relocatable_end;
155 size_t _max_non_null_offset;
156
157 public:
158 ArchivePtrBitmapCleaner(CHeapBitMap* ptrmap, address* ptr_base, address relocatable_base, address relocatable_end) :
159 _ptrmap(ptrmap), _ptr_base(ptr_base),
160 _relocatable_base(relocatable_base), _relocatable_end(relocatable_end), _max_non_null_offset(0) {}
161
162 bool do_bit(size_t offset) {
163 address* ptr_loc = _ptr_base + offset;
164 address ptr_value = *ptr_loc;
165 if (ptr_value != nullptr) {
166 assert(_relocatable_base <= ptr_value && ptr_value < _relocatable_end, "do not point to arbitrary locations!");
167 if (_max_non_null_offset < offset) {
168 _max_non_null_offset = offset;
169 }
170 } else {
171 _ptrmap->clear_bit(offset);
172 DEBUG_ONLY(log_trace(aot, reloc)("Clearing pointer [" PTR_FORMAT "] -> null @ %9zu", p2i(ptr_loc), offset));
173 }
174
175 return true;
176 }
177
178 size_t max_non_null_offset() const { return _max_non_null_offset; }
179 };
180
181 void ArchivePtrMarker::compact(address relocatable_base, address relocatable_end) {
182 assert(!_compacted, "cannot compact again");
183 ArchivePtrBitmapCleaner cleaner(_ptrmap, ptr_base(), relocatable_base, relocatable_end);
184 _ptrmap->iterate(&cleaner);
185 compact(cleaner.max_non_null_offset());
186 }
187
188 void ArchivePtrMarker::compact(size_t max_non_null_offset) {
189 assert(!_compacted, "cannot compact again");
190 _ptrmap->resize(max_non_null_offset + 1);
191 _compacted = true;
192 }
193
194 char* DumpRegion::expand_top_to(char* newtop) {
195 assert(is_allocatable(), "must be initialized and not packed");
196 assert(newtop >= _top, "must not grow backwards");
197 if (newtop > _end) {
198 ArchiveBuilder::current()->report_out_of_space(_name, newtop - _top);
199 ShouldNotReachHere();
200 }
201
202 commit_to(newtop);
203 _top = newtop;
204
205 if (_max_delta > 0) {
206 uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1));
207 if (delta > _max_delta) {
208 // This is just a sanity check and should not appear in any real world usage. This
209 // happens only if you allocate more than 2GB of shared objects and would require
210 // millions of shared classes.
211 aot_log_error(aot)("Out of memory in the CDS archive: Please reduce the number of shared classes.");
212 AOTMetaspace::unrecoverable_writing_error();
213 }
214 }
215
216 return _top;
217 }
218
219 void DumpRegion::commit_to(char* newtop) {
220 assert(CDSConfig::is_dumping_archive(), "sanity");
221 char* base = _rs->base();
222 size_t need_committed_size = newtop - base;
223 size_t has_committed_size = _vs->committed_size();
224 if (need_committed_size < has_committed_size) {
225 return;
226 }
227
228 size_t min_bytes = need_committed_size - has_committed_size;
229 size_t preferred_bytes = 1 * M;
230 size_t uncommitted = _vs->reserved_size() - has_committed_size;
231
232 size_t commit = MAX2(min_bytes, preferred_bytes);
233 commit = MIN2(commit, uncommitted);
234 assert(commit <= uncommitted, "sanity");
235
236 if (!_vs->expand_by(commit, false)) {
237 aot_log_error(aot)("Failed to expand shared space to %zu bytes",
238 need_committed_size);
239 AOTMetaspace::unrecoverable_writing_error();
240 }
241
242 const char* which;
243 if (_rs->base() == (char*)AOTMetaspace::symbol_rs_base()) {
244 which = "symbol";
245 } else {
246 which = "shared";
247 }
248 log_debug(aot)("Expanding %s spaces by %7zu bytes [total %9zu bytes ending at %p]",
249 which, commit, _vs->actual_committed_size(), _vs->high());
250 }
251
252 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) {
253 // Always align to at least minimum alignment
254 alignment = MAX2(SharedSpaceObjectAlignment, alignment);
255 char* p = (char*)align_up(_top, alignment);
256 char* newtop = p + align_up(num_bytes, (size_t)SharedSpaceObjectAlignment);
257 expand_top_to(newtop);
258 memset(p, 0, newtop - p);
259 return p;
260 }
261
262 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
263 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
264 intptr_t *p = (intptr_t*)_top;
265 char* newtop = _top + sizeof(intptr_t);
266 expand_top_to(newtop);
267 *p = n;
268 if (need_to_mark) {
269 ArchivePtrMarker::mark_pointer(p);
270 }
271 }
272
273 void DumpRegion::print(size_t total_bytes) const {
274 char* base = used() > 0 ? ArchiveBuilder::current()->to_requested(_base) : nullptr;
275 log_debug(aot)("%s space: %9zu [ %4.1f%% of total] out of %9zu bytes [%5.1f%% used] at " INTPTR_FORMAT,
276 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
277 p2i(base));
278 }
279
280 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
281 aot_log_error(aot)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
282 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
283 if (strcmp(_name, failing_region) == 0) {
284 aot_log_error(aot)(" required = %d", int(needed_bytes));
285 }
286 }
287
288 void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
289 _rs = rs;
290 _vs = vs;
291 // Start with 0 committed bytes. The memory will be committed as needed.
292 if (!_vs->initialize(*_rs, 0)) {
293 fatal("Unable to allocate memory for shared space");
294 }
295 _base = _top = _rs->base();
296 _end = _rs->end();
297 }
298
299 void DumpRegion::pack(DumpRegion* next) {
300 if (!is_packed()) {
301 _end = (char*)align_up(_top, AOTMetaspace::core_region_alignment());
302 _is_packed = true;
303 }
304 _end = (char*)align_up(_top, AOTMetaspace::core_region_alignment());
305 _is_packed = true;
306 if (next != nullptr) {
307 next->_rs = _rs;
308 next->_vs = _vs;
309 next->_base = next->_top = this->_end;
310 next->_end = _rs->end();
311 }
312 }
313
314 void WriteClosure::do_ptr(void** p) {
315 // Write ptr into the archive; ptr can be:
316 // (a) null -> written as 0
317 // (b) a "buffered" address -> written as is
318 // (c) a "source" address -> convert to "buffered" and write
319 // The common case is (c). E.g., when writing the vmClasses into the archive.
320 // We have (b) only when we don't have a corresponding source object. E.g.,
321 // the archived c++ vtable entries.
322 address ptr = *(address*)p;
323 if (ptr != nullptr && !ArchiveBuilder::current()->is_in_buffer_space(ptr)) {
324 ptr = ArchiveBuilder::current()->get_buffered_addr(ptr);
325 }
326 // null pointers do not need to be converted to offsets
327 if (ptr != nullptr) {
328 ptr = (address)ArchiveBuilder::current()->buffer_to_offset(ptr);
329 }
330 _dump_region->append_intptr_t((intptr_t)ptr, false);
331 }
332
333 void ReadClosure::do_ptr(void** p) {
334 assert(*p == nullptr, "initializing previous initialized pointer.");
335 intptr_t obj = nextPtr();
336 assert(obj >= 0, "sanity.");
337 *p = (obj != 0) ? (void*)(_base_address + obj) : (void*)obj;
338 }
339
340 void ReadClosure::do_u4(u4* p) {
341 intptr_t obj = nextPtr();
342 *p = (u4)(uintx(obj));
343 }
344
345 void ReadClosure::do_int(int* p) {
346 intptr_t obj = nextPtr();
347 *p = (int)(intx(obj));
348 }
349
350 void ReadClosure::do_bool(bool* p) {
351 intptr_t obj = nextPtr();
352 *p = (bool)(uintx(obj));
353 }
354
355 void ReadClosure::do_tag(int tag) {
356 int old_tag;
357 old_tag = (int)(intptr_t)nextPtr();
358 // do_int(&old_tag);
359 assert(tag == old_tag, "tag doesn't match (%d, expected %d)", old_tag, tag);
360 FileMapInfo::assert_mark(tag == old_tag);
361 }
362
363 void ArchiveUtils::log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) {
364 if (ClassListWriter::is_enabled()) {
365 if (LambdaProxyClassDictionary::is_supported_invokedynamic(bootstrap_specifier)) {
366 const constantPoolHandle& pool = bootstrap_specifier->pool();
367 if (SystemDictionaryShared::is_builtin_loader(pool->pool_holder()->class_loader_data())) {
368 // Currently lambda proxy classes are supported only for the built-in loaders.
369 ResourceMark rm(THREAD);
370 int pool_index = bootstrap_specifier->bss_index();
371 ClassListWriter w;
372 w.stream()->print("%s %s", ClassListParser::lambda_proxy_tag(), pool->pool_holder()->name()->as_C_string());
373 CDSIndyInfo cii;
374 ClassListParser::populate_cds_indy_info(pool, pool_index, &cii, CHECK);
375 GrowableArray<const char*>* indy_items = cii.items();
376 for (int i = 0; i < indy_items->length(); i++) {
377 w.stream()->print(" %s", indy_items->at(i));
378 }
379 w.stream()->cr();
380 }
381 }
382 }
383 }
384
385 bool ArchiveUtils::has_aot_initialized_mirror(InstanceKlass* src_ik) {
386 if (!ArchiveBuilder::current()->has_been_archived(src_ik)) {
387 return false;
388 }
389 return ArchiveBuilder::current()->get_buffered_addr(src_ik)->has_aot_initialized_mirror();
390 }
391
392 size_t HeapRootSegments::size_in_bytes(size_t seg_idx) {
393 assert(seg_idx < _count, "In range");
394 return objArrayOopDesc::object_size(size_in_elems(seg_idx)) * HeapWordSize;
395 }
396
397 int HeapRootSegments::size_in_elems(size_t seg_idx) {
398 assert(seg_idx < _count, "In range");
399 if (seg_idx != _count - 1) {
400 return _max_size_in_elems;
401 } else {
402 // Last slice, leftover
403 return _roots_count % _max_size_in_elems;
404 }
405 }
406
407 size_t HeapRootSegments::segment_offset(size_t seg_idx) {
408 assert(seg_idx < _count, "In range");
409 return _base_offset + seg_idx * _max_size_in_bytes;
410 }
411
412 ArchiveWorkers::ArchiveWorkers() :
413 _end_semaphore(0),
414 _num_workers(max_workers()),
415 _started_workers(0),
416 _finish_tokens(0),
417 _state(UNUSED),
418 _task(nullptr) {}
419
420 ArchiveWorkers::~ArchiveWorkers() {
421 assert(AtomicAccess::load(&_state) != WORKING, "Should not be working");
422 }
423
424 int ArchiveWorkers::max_workers() {
425 // The pool is used for short-lived bursty tasks. We do not want to spend
426 // too much time creating and waking up threads unnecessarily. Plus, we do
427 // not want to overwhelm large machines. This is why we want to be very
428 // conservative about the number of workers actually needed.
429 return MAX2(0, log2i_graceful(os::active_processor_count()));
430 }
431
432 bool ArchiveWorkers::is_parallel() {
433 return _num_workers > 0;
434 }
435
436 void ArchiveWorkers::start_worker_if_needed() {
437 while (true) {
438 int cur = AtomicAccess::load(&_started_workers);
439 if (cur >= _num_workers) {
440 return;
441 }
442 if (AtomicAccess::cmpxchg(&_started_workers, cur, cur + 1, memory_order_relaxed) == cur) {
443 new ArchiveWorkerThread(this);
444 return;
445 }
446 }
447 }
448
449 void ArchiveWorkers::run_task(ArchiveWorkerTask* task) {
450 assert(AtomicAccess::load(&_state) == UNUSED, "Should be unused yet");
451 assert(AtomicAccess::load(&_task) == nullptr, "Should not have running tasks");
452 AtomicAccess::store(&_state, WORKING);
453
454 if (is_parallel()) {
455 run_task_multi(task);
456 } else {
457 run_task_single(task);
458 }
459
460 assert(AtomicAccess::load(&_state) == WORKING, "Should be working");
461 AtomicAccess::store(&_state, SHUTDOWN);
462 }
463
464 void ArchiveWorkers::run_task_single(ArchiveWorkerTask* task) {
465 // Single thread needs no chunking.
466 task->configure_max_chunks(1);
467
468 // Execute the task ourselves, as there are no workers.
469 task->work(0, 1);
470 }
471
472 void ArchiveWorkers::run_task_multi(ArchiveWorkerTask* task) {
473 // Multiple threads can work with multiple chunks.
474 task->configure_max_chunks(_num_workers * CHUNKS_PER_WORKER);
475
476 // Set up the run and publish the task. Issue one additional finish token
477 // to cover the semaphore shutdown path, see below.
478 AtomicAccess::store(&_finish_tokens, _num_workers + 1);
479 AtomicAccess::release_store(&_task, task);
480
481 // Kick off pool startup by starting a single worker, and proceed
482 // immediately to executing the task locally.
483 start_worker_if_needed();
484
485 // Execute the task ourselves, while workers are catching up.
486 // This allows us to hide parts of task handoff latency.
487 task->run();
488
489 // Done executing task locally, wait for any remaining workers to complete.
490 // Once all workers report, we can proceed to termination. To do this safely,
491 // we need to make sure every worker has left. A spin-wait alone would suffice,
492 // but we do not want to burn cycles on it. A semaphore alone would not be safe,
493 // since workers can still be inside it as we proceed from wait here. So we block
494 // on semaphore first, and then spin-wait for all workers to terminate.
495 _end_semaphore.wait();
496 SpinYield spin;
497 while (AtomicAccess::load(&_finish_tokens) != 0) {
498 spin.wait();
499 }
500
501 OrderAccess::fence();
502
503 assert(AtomicAccess::load(&_finish_tokens) == 0, "All tokens are consumed");
504 }
505
506 void ArchiveWorkers::run_as_worker() {
507 assert(is_parallel(), "Should be in parallel mode");
508
509 ArchiveWorkerTask* task = AtomicAccess::load_acquire(&_task);
510 task->run();
511
512 // All work done in threads should be visible to caller.
513 OrderAccess::fence();
514
515 // Signal the pool the work is complete, and we are exiting.
516 // Worker cannot do anything else with the pool after this.
517 if (AtomicAccess::sub(&_finish_tokens, 1, memory_order_relaxed) == 1) {
518 // Last worker leaving. Notify the pool it can unblock to spin-wait.
519 // Then consume the last token and leave.
520 _end_semaphore.signal();
521 int last = AtomicAccess::sub(&_finish_tokens, 1, memory_order_relaxed);
522 assert(last == 0, "Should be");
523 }
524 }
525
526 void ArchiveWorkerTask::run() {
527 while (true) {
528 int chunk = AtomicAccess::load(&_chunk);
529 if (chunk >= _max_chunks) {
530 return;
531 }
532 if (AtomicAccess::cmpxchg(&_chunk, chunk, chunk + 1, memory_order_relaxed) == chunk) {
533 assert(0 <= chunk && chunk < _max_chunks, "Sanity");
534 work(chunk, _max_chunks);
535 }
536 }
537 }
538
539 void ArchiveWorkerTask::configure_max_chunks(int max_chunks) {
540 if (_max_chunks == 0) {
541 _max_chunks = max_chunks;
542 }
543 }
544
545 ArchiveWorkerThread::ArchiveWorkerThread(ArchiveWorkers* pool) : NamedThread(), _pool(pool) {
546 set_name("ArchiveWorkerThread");
547 if (os::create_thread(this, os::os_thread)) {
548 os::start_thread(this);
549 } else {
550 vm_exit_during_initialization("Unable to create archive worker",
551 os::native_thread_creation_failed_msg());
552 }
553 }
554
555 void ArchiveWorkerThread::run() {
556 // Avalanche startup: each worker starts two others.
557 _pool->start_worker_if_needed();
558 _pool->start_worker_if_needed();
559
560 // Set ourselves up.
561 os::set_priority(this, NearMaxPriority);
562
563 // Work.
564 _pool->run_as_worker();
565 }
566
567 void ArchiveWorkerThread::post_run() {
568 this->NamedThread::post_run();
569 delete this;
570 }