1 /* 2 * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "classfile/javaClasses.hpp" 29 #include "gc/shared/workerThread.hpp" 30 #include "gc/shenandoah/shenandoahGeneration.hpp" 31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 33 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 35 #include "gc/shenandoah/shenandoahUtils.hpp" 36 #include "runtime/atomic.hpp" 37 #include "logging/log.hpp" 38 39 static ReferenceType reference_type(oop reference) { 40 return InstanceKlass::cast(reference->klass())->reference_type(); 41 } 42 43 static const char* reference_type_name(ReferenceType type) { 44 switch (type) { 45 case REF_SOFT: 46 return "Soft"; 47 48 case REF_WEAK: 49 return "Weak"; 50 51 case REF_FINAL: 52 return "Final"; 53 54 case REF_PHANTOM: 55 return "Phantom"; 56 57 default: 58 ShouldNotReachHere(); 59 return nullptr; 60 } 61 } 62 63 template <typename T> 64 static void card_mark_barrier(T* field, oop value) { 65 ShenandoahHeap* heap = ShenandoahHeap::heap(); 66 assert(heap->is_in_or_null(value), "Should be in heap"); 67 assert(ShenandoahCardBarrier, "Card-mark barrier should be on"); 68 if (heap->is_in_old(field) && heap->is_in_young(value)) { 69 // For Shenandoah, each generation collects all the _referents_ that belong to the 70 // collected generation. We can end up with discovered lists that contain a mixture 71 // of old and young _references_. These references are linked together through the 72 // discovered field in java.lang.Reference. In some cases, creating or editing this 73 // list may result in the creation of _new_ old-to-young pointers which must dirty 74 // the corresponding card. Failing to do this may cause heap verification errors and 75 // lead to incorrect GC behavior. 76 heap->card_scan()->mark_card_as_dirty(reinterpret_cast<HeapWord*>(field)); 77 } 78 } 79 80 template <typename T> 81 static void set_oop_field(T* field, oop value); 82 83 template <> 84 void set_oop_field<oop>(oop* field, oop value) { 85 *field = value; 86 if (ShenandoahCardBarrier) { 87 card_mark_barrier(field, value); 88 } 89 } 90 91 template <> 92 void set_oop_field<narrowOop>(narrowOop* field, oop value) { 93 *field = CompressedOops::encode(value); 94 if (ShenandoahCardBarrier) { 95 card_mark_barrier(field, value); 96 } 97 } 98 99 static oop lrb(oop obj) { 100 if (obj != nullptr && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) { 101 return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj); 102 } else { 103 return obj; 104 } 105 } 106 107 template <typename T> 108 static volatile T* reference_referent_addr(oop reference) { 109 return (volatile T*)java_lang_ref_Reference::referent_addr_raw(reference); 110 } 111 112 template <typename T> 113 static oop reference_referent(oop reference) { 114 T heap_oop = Atomic::load(reference_referent_addr<T>(reference)); 115 return CompressedOops::decode(heap_oop); 116 } 117 118 static void reference_clear_referent(oop reference) { 119 java_lang_ref_Reference::clear_referent_raw(reference); 120 } 121 122 template <typename T> 123 static T* reference_discovered_addr(oop reference) { 124 return reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference)); 125 } 126 127 template <typename T> 128 static oop reference_discovered(oop reference) { 129 T heap_oop = *reference_discovered_addr<T>(reference); 130 return lrb(CompressedOops::decode(heap_oop)); 131 } 132 133 template <typename T> 134 static void reference_set_discovered(oop reference, oop discovered); 135 136 template <> 137 void reference_set_discovered<oop>(oop reference, oop discovered) { 138 *reference_discovered_addr<oop>(reference) = discovered; 139 } 140 141 template <> 142 void reference_set_discovered<narrowOop>(oop reference, oop discovered) { 143 *reference_discovered_addr<narrowOop>(reference) = CompressedOops::encode(discovered); 144 } 145 146 template<typename T> 147 static bool reference_cas_discovered(oop reference, oop discovered) { 148 T* addr = reinterpret_cast<T *>(java_lang_ref_Reference::discovered_addr_raw(reference)); 149 return ShenandoahHeap::atomic_update_oop_check(discovered, addr, nullptr); 150 } 151 152 template <typename T> 153 static T* reference_next_addr(oop reference) { 154 return reinterpret_cast<T*>(java_lang_ref_Reference::next_addr_raw(reference)); 155 } 156 157 template <typename T> 158 static oop reference_next(oop reference) { 159 T heap_oop = RawAccess<>::oop_load(reference_next_addr<T>(reference)); 160 return lrb(CompressedOops::decode(heap_oop)); 161 } 162 163 static void reference_set_next(oop reference, oop next) { 164 java_lang_ref_Reference::set_next_raw(reference, next); 165 } 166 167 static void soft_reference_update_clock() { 168 const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 169 java_lang_ref_SoftReference::set_clock(now); 170 } 171 172 ShenandoahRefProcThreadLocal::ShenandoahRefProcThreadLocal() : 173 _discovered_list(nullptr), 174 _encountered_count(), 175 _discovered_count(), 176 _enqueued_count() { 177 } 178 179 void ShenandoahRefProcThreadLocal::reset() { 180 _discovered_list = nullptr; 181 _mark_closure = nullptr; 182 for (uint i = 0; i < reference_type_count; i++) { 183 _encountered_count[i] = 0; 184 _discovered_count[i] = 0; 185 _enqueued_count[i] = 0; 186 } 187 } 188 189 template <typename T> 190 T* ShenandoahRefProcThreadLocal::discovered_list_addr() { 191 return reinterpret_cast<T*>(&_discovered_list); 192 } 193 194 template <> 195 oop ShenandoahRefProcThreadLocal::discovered_list_head<oop>() const { 196 return *reinterpret_cast<const oop*>(&_discovered_list); 197 } 198 199 template <> 200 oop ShenandoahRefProcThreadLocal::discovered_list_head<narrowOop>() const { 201 return CompressedOops::decode(*reinterpret_cast<const narrowOop*>(&_discovered_list)); 202 } 203 204 template <> 205 void ShenandoahRefProcThreadLocal::set_discovered_list_head<narrowOop>(oop head) { 206 *discovered_list_addr<narrowOop>() = CompressedOops::encode(head); 207 } 208 209 template <> 210 void ShenandoahRefProcThreadLocal::set_discovered_list_head<oop>(oop head) { 211 *discovered_list_addr<oop>() = head; 212 } 213 214 ShenandoahReferenceProcessor::ShenandoahReferenceProcessor(uint max_workers) : 215 _soft_reference_policy(nullptr), 216 _ref_proc_thread_locals(NEW_C_HEAP_ARRAY(ShenandoahRefProcThreadLocal, max_workers, mtGC)), 217 _pending_list(nullptr), 218 _pending_list_tail(&_pending_list), 219 _iterate_discovered_list_id(0U), 220 _stats() { 221 for (size_t i = 0; i < max_workers; i++) { 222 _ref_proc_thread_locals[i].reset(); 223 } 224 } 225 226 void ShenandoahReferenceProcessor::reset_thread_locals() { 227 uint max_workers = ShenandoahHeap::heap()->max_workers(); 228 for (uint i = 0; i < max_workers; i++) { 229 _ref_proc_thread_locals[i].reset(); 230 } 231 } 232 233 void ShenandoahReferenceProcessor::set_mark_closure(uint worker_id, ShenandoahMarkRefsSuperClosure* mark_closure) { 234 _ref_proc_thread_locals[worker_id].set_mark_closure(mark_closure); 235 } 236 237 void ShenandoahReferenceProcessor::set_soft_reference_policy(bool clear) { 238 static AlwaysClearPolicy always_clear_policy; 239 static LRUMaxHeapPolicy lru_max_heap_policy; 240 241 if (clear) { 242 log_info(gc, ref)("Clearing All SoftReferences"); 243 _soft_reference_policy = &always_clear_policy; 244 } else { 245 _soft_reference_policy = &lru_max_heap_policy; 246 } 247 248 _soft_reference_policy->setup(); 249 } 250 251 template <typename T> 252 bool ShenandoahReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const { 253 if (type == REF_FINAL) { 254 // A FinalReference is inactive if its next field is non-null. An application can't 255 // call enqueue() or clear() on a FinalReference. 256 return reference_next<T>(reference) != nullptr; 257 } else { 258 // A non-FinalReference is inactive if the referent is null. The referent can only 259 // be null if the application called Reference.enqueue() or Reference.clear(). 260 return referent == nullptr; 261 } 262 } 263 264 bool ShenandoahReferenceProcessor::is_strongly_live(oop referent) const { 265 return ShenandoahHeap::heap()->marking_context()->is_marked_strong(referent); 266 } 267 268 bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const { 269 if (type != REF_SOFT) { 270 // Not a SoftReference 271 return false; 272 } 273 274 // Ask SoftReference policy 275 const jlong clock = java_lang_ref_SoftReference::clock(); 276 assert(clock != 0, "Clock not initialized"); 277 assert(_soft_reference_policy != nullptr, "Policy not initialized"); 278 return !_soft_reference_policy->should_clear_reference(reference, clock); 279 } 280 281 template <typename T> 282 bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType type) const { 283 T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference); 284 T heap_oop = RawAccess<>::oop_load(referent_addr); 285 oop referent = CompressedOops::decode(heap_oop); 286 ShenandoahHeap* heap = ShenandoahHeap::heap(); 287 288 if (is_inactive<T>(reference, referent, type)) { 289 log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference)); 290 return false; 291 } 292 293 if (is_strongly_live(referent)) { 294 log_trace(gc,ref)("Reference strongly live: " PTR_FORMAT, p2i(reference)); 295 return false; 296 } 297 298 if (is_softly_live(reference, type)) { 299 log_trace(gc,ref)("Reference softly live: " PTR_FORMAT, p2i(reference)); 300 return false; 301 } 302 303 if (!heap->is_in_active_generation(referent)) { 304 log_trace(gc,ref)("Referent outside of active generation: " PTR_FORMAT, p2i(referent)); 305 return false; 306 } 307 308 return true; 309 } 310 311 template <typename T> 312 bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const { 313 const oop referent = reference_referent<T>(reference); 314 if (referent == nullptr) { 315 // Reference has been cleared, by a call to Reference.enqueue() 316 // or Reference.clear() from the application, which means we 317 // should drop the reference. 318 return true; 319 } 320 321 // Check if the referent is still alive, in which case we should 322 // drop the reference. 323 if (type == REF_PHANTOM) { 324 return ShenandoahHeap::heap()->complete_marking_context()->is_marked(referent); 325 } else { 326 return ShenandoahHeap::heap()->complete_marking_context()->is_marked_strong(referent); 327 } 328 } 329 330 template <typename T> 331 void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType type) const { 332 if (type == REF_FINAL) { 333 // Don't clear referent. It is needed by the Finalizer thread to make the call 334 // to finalize(). A FinalReference is instead made inactive by self-looping the 335 // next field. An application can't call FinalReference.enqueue(), so there is 336 // no race to worry about when setting the next field. 337 assert(reference_next<T>(reference) == nullptr, "Already inactive"); 338 assert(ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent<T>(reference)), "only make inactive final refs with alive referents"); 339 reference_set_next(reference, reference); 340 } else { 341 // Clear referent 342 reference_clear_referent(reference); 343 } 344 } 345 346 template <typename T> 347 bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, uint worker_id) { 348 if (!should_discover<T>(reference, type)) { 349 // Not discovered 350 return false; 351 } 352 353 if (reference_discovered<T>(reference) != nullptr) { 354 // Already discovered. This can happen if the reference is marked finalizable first, and then strong, 355 // in which case it will be seen 2x by marking. 356 log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference)); 357 return true; 358 } 359 360 if (type == REF_FINAL) { 361 ShenandoahMarkRefsSuperClosure* cl = _ref_proc_thread_locals[worker_id].mark_closure(); 362 bool weak = cl->is_weak(); 363 cl->set_weak(true); 364 if (UseCompressedOops) { 365 cl->do_oop(reinterpret_cast<narrowOop*>(java_lang_ref_Reference::referent_addr_raw(reference))); 366 } else { 367 cl->do_oop(reinterpret_cast<oop*>(java_lang_ref_Reference::referent_addr_raw(reference))); 368 } 369 cl->set_weak(weak); 370 } 371 372 // Add reference to discovered list 373 // Each worker thread has a private copy of refproc_data, which includes a private discovered list. This means 374 // there's no risk that a different worker thread will try to manipulate my discovered list head while I'm making 375 // reference the head of my discovered list. 376 ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id]; 377 oop discovered_head = refproc_data.discovered_list_head<T>(); 378 if (discovered_head == nullptr) { 379 // Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their 380 // discovered field: if it is null, then it is not-yet discovered, otherwise it is discovered 381 discovered_head = reference; 382 } 383 if (reference_cas_discovered<T>(reference, discovered_head)) { 384 // We successfully set this reference object's next pointer to discovered_head. This marks reference as discovered. 385 // If reference_cas_discovered fails, that means some other worker thread took credit for discovery of this reference, 386 // and that other thread will place reference on its discovered list, so I can ignore reference. 387 388 // In case we have created an interesting pointer, mark the remembered set card as dirty. 389 ShenandoahHeap* heap = ShenandoahHeap::heap(); 390 if (ShenandoahCardBarrier) { 391 T* addr = reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference)); 392 card_mark_barrier(addr, discovered_head); 393 } 394 395 // Make the discovered_list_head point to reference. 396 refproc_data.set_discovered_list_head<T>(reference); 397 assert(refproc_data.discovered_list_head<T>() == reference, "reference must be new discovered head"); 398 log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 399 _ref_proc_thread_locals[worker_id].inc_discovered(type); 400 } 401 return true; 402 } 403 404 bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceType type) { 405 if (!RegisterReferences) { 406 // Reference processing disabled 407 return false; 408 } 409 410 log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s, %s)", 411 p2i(reference), reference_type_name(type), ShenandoahHeap::heap()->heap_region_containing(reference)->affiliation_name()); 412 uint worker_id = WorkerThread::worker_id(); 413 _ref_proc_thread_locals[worker_id].inc_encountered(type); 414 415 if (UseCompressedOops) { 416 return discover<narrowOop>(reference, type, worker_id); 417 } else { 418 return discover<oop>(reference, type, worker_id); 419 } 420 } 421 422 template <typename T> 423 oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) { 424 log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 425 426 ShenandoahHeap* heap = ShenandoahHeap::heap(); 427 oop referent = reference_referent<T>(reference); 428 assert(referent == nullptr || heap->marking_context()->is_marked(referent), "only drop references with alive referents"); 429 430 // Unlink and return next in list 431 oop next = reference_discovered<T>(reference); 432 reference_set_discovered<T>(reference, nullptr); 433 // When this reference was discovered, it would not have been marked. If it ends up surviving 434 // the cycle, we need to dirty the card if the reference is old and the referent is young. Note 435 // that if the reference is not dropped, then its pointer to the referent will be nulled before 436 // evacuation begins so card does not need to be dirtied. 437 if (heap->mode()->is_generational() && heap->is_in_old(reference) && heap->is_in_young(referent)) { 438 // Note: would be sufficient to mark only the card that holds the start of this Reference object. 439 heap->card_scan()->mark_range_as_dirty(cast_from_oop<HeapWord*>(reference), reference->size()); 440 } 441 return next; 442 } 443 444 template <typename T> 445 T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint worker_id) { 446 log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 447 448 // Update statistics 449 _ref_proc_thread_locals[worker_id].inc_enqueued(type); 450 451 // Make reference inactive 452 make_inactive<T>(reference, type); 453 454 // Return next in list 455 return reference_discovered_addr<T>(reference); 456 } 457 458 template <typename T> 459 void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) { 460 log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head<T>())); 461 T* list = refproc_data.discovered_list_addr<T>(); 462 // The list head is basically a GC root, we need to resolve and update it, 463 // otherwise we will later swap a from-space ref into Universe::pending_list(). 464 if (!CompressedOops::is_null(*list)) { 465 oop first_resolved = lrb(CompressedOops::decode_not_null(*list)); 466 set_oop_field(list, first_resolved); 467 } 468 T* p = list; 469 while (true) { 470 const oop reference = lrb(CompressedOops::decode(*p)); 471 if (reference == nullptr) { 472 break; 473 } 474 log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference)); 475 const ReferenceType type = reference_type(reference); 476 477 if (should_drop<T>(reference, type)) { 478 set_oop_field(p, drop<T>(reference, type)); 479 } else { 480 p = keep<T>(reference, type, worker_id); 481 } 482 483 const oop discovered = lrb(reference_discovered<T>(reference)); 484 if (reference == discovered) { 485 // Reset terminating self-loop to null 486 reference_set_discovered<T>(reference, oop(nullptr)); 487 break; 488 } 489 } 490 491 // Prepend discovered references to internal pending list 492 // set_oop_field maintains the card mark barrier as this list is constructed. 493 if (!CompressedOops::is_null(*list)) { 494 oop head = lrb(CompressedOops::decode_not_null(*list)); 495 shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); 496 oop prev = Atomic::xchg(&_pending_list, head); 497 set_oop_field(p, prev); 498 if (prev == nullptr) { 499 // First to prepend to list, record tail 500 _pending_list_tail = reinterpret_cast<void*>(p); 501 } 502 503 // Clear discovered list 504 set_oop_field(list, oop(nullptr)); 505 } 506 } 507 508 void ShenandoahReferenceProcessor::work() { 509 // Process discovered references 510 uint max_workers = ShenandoahHeap::heap()->max_workers(); 511 uint worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; 512 while (worker_id < max_workers) { 513 if (UseCompressedOops) { 514 process_references<narrowOop>(_ref_proc_thread_locals[worker_id], worker_id); 515 } else { 516 process_references<oop>(_ref_proc_thread_locals[worker_id], worker_id); 517 } 518 worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; 519 } 520 } 521 522 class ShenandoahReferenceProcessorTask : public WorkerTask { 523 private: 524 bool const _concurrent; 525 ShenandoahPhaseTimings::Phase const _phase; 526 ShenandoahReferenceProcessor* const _reference_processor; 527 528 public: 529 ShenandoahReferenceProcessorTask(ShenandoahPhaseTimings::Phase phase, bool concurrent, ShenandoahReferenceProcessor* reference_processor) : 530 WorkerTask("ShenandoahReferenceProcessorTask"), 531 _concurrent(concurrent), 532 _phase(phase), 533 _reference_processor(reference_processor) { 534 } 535 536 virtual void work(uint worker_id) { 537 if (_concurrent) { 538 ShenandoahConcurrentWorkerSession worker_session(worker_id); 539 ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id); 540 _reference_processor->work(); 541 } else { 542 ShenandoahParallelWorkerSession worker_session(worker_id); 543 ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id); 544 _reference_processor->work(); 545 } 546 } 547 }; 548 549 void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) { 550 551 Atomic::release_store_fence(&_iterate_discovered_list_id, 0U); 552 553 // Process discovered lists 554 ShenandoahReferenceProcessorTask task(phase, concurrent, this); 555 workers->run_task(&task); 556 557 // Update SoftReference clock 558 soft_reference_update_clock(); 559 560 // Collect, log and trace statistics 561 collect_statistics(); 562 563 enqueue_references(concurrent); 564 } 565 566 void ShenandoahReferenceProcessor::enqueue_references_locked() { 567 // Prepend internal pending list to external pending list 568 shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); 569 570 // During reference processing, we maintain a local list of references that are identified by 571 // _pending_list and _pending_list_tail. _pending_list_tail points to the next field of the last Reference object on 572 // the local list. 573 // 574 // There is also a global list of reference identified by Universe::_reference_pending_list 575 576 // The following code has the effect of: 577 // 1. Making the global Universe::_reference_pending_list point to my local list 578 // 2. Overwriting the next field of the last Reference on my local list to point at the previous head of the 579 // global Universe::_reference_pending_list 580 581 oop former_head_of_global_list = Universe::swap_reference_pending_list(_pending_list); 582 if (UseCompressedOops) { 583 set_oop_field<narrowOop>(reinterpret_cast<narrowOop*>(_pending_list_tail), former_head_of_global_list); 584 } else { 585 set_oop_field<oop>(reinterpret_cast<oop*>(_pending_list_tail), former_head_of_global_list); 586 } 587 } 588 589 void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) { 590 if (_pending_list == nullptr) { 591 // Nothing to enqueue 592 return; 593 } 594 if (!concurrent) { 595 // When called from mark-compact or degen-GC, the locking is done by the VMOperation, 596 enqueue_references_locked(); 597 } else { 598 // Heap_lock protects external pending list 599 MonitorLocker ml(Heap_lock); 600 601 enqueue_references_locked(); 602 603 // Notify ReferenceHandler thread 604 ml.notify_all(); 605 } 606 607 // Reset internal pending list 608 _pending_list = nullptr; 609 _pending_list_tail = &_pending_list; 610 } 611 612 template<typename T> 613 void ShenandoahReferenceProcessor::clean_discovered_list(T* list) { 614 T discovered = *list; 615 while (!CompressedOops::is_null(discovered)) { 616 oop discovered_ref = CompressedOops::decode_not_null(discovered); 617 set_oop_field<T>(list, oop(nullptr)); 618 list = reference_discovered_addr<T>(discovered_ref); 619 discovered = *list; 620 } 621 } 622 623 void ShenandoahReferenceProcessor::abandon_partial_discovery() { 624 uint max_workers = ShenandoahHeap::heap()->max_workers(); 625 for (uint index = 0; index < max_workers; index++) { 626 if (UseCompressedOops) { 627 clean_discovered_list<narrowOop>(_ref_proc_thread_locals[index].discovered_list_addr<narrowOop>()); 628 } else { 629 clean_discovered_list<oop>(_ref_proc_thread_locals[index].discovered_list_addr<oop>()); 630 } 631 } 632 if (_pending_list != nullptr) { 633 oop pending = _pending_list; 634 _pending_list = nullptr; 635 if (UseCompressedOops) { 636 narrowOop* list = reference_discovered_addr<narrowOop>(pending); 637 clean_discovered_list<narrowOop>(list); 638 } else { 639 oop* list = reference_discovered_addr<oop>(pending); 640 clean_discovered_list<oop>(list); 641 } 642 } 643 _pending_list_tail = &_pending_list; 644 } 645 646 void ShenandoahReferenceProcessor::collect_statistics() { 647 Counters encountered = {}; 648 Counters discovered = {}; 649 Counters enqueued = {}; 650 uint max_workers = ShenandoahHeap::heap()->max_workers(); 651 for (uint i = 0; i < max_workers; i++) { 652 for (size_t type = 0; type < reference_type_count; type++) { 653 encountered[type] += _ref_proc_thread_locals[i].encountered((ReferenceType)type); 654 discovered[type] += _ref_proc_thread_locals[i].discovered((ReferenceType)type); 655 enqueued[type] += _ref_proc_thread_locals[i].enqueued((ReferenceType)type); 656 } 657 } 658 659 _stats = ReferenceProcessorStats(discovered[REF_SOFT], 660 discovered[REF_WEAK], 661 discovered[REF_FINAL], 662 discovered[REF_PHANTOM]); 663 664 log_info(gc,ref)("Encountered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 665 encountered[REF_SOFT], encountered[REF_WEAK], encountered[REF_FINAL], encountered[REF_PHANTOM]); 666 log_info(gc,ref)("Discovered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 667 discovered[REF_SOFT], discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]); 668 log_info(gc,ref)("Enqueued references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 669 enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]); 670 }