1 /* 2 * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "classfile/javaClasses.hpp" 29 #include "gc/shared/workerThread.hpp" 30 #include "gc/shenandoah/shenandoahGeneration.hpp" 31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 33 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 35 #include "gc/shenandoah/shenandoahUtils.hpp" 36 #include "runtime/atomic.hpp" 37 #include "logging/log.hpp" 38 39 static ReferenceType reference_type(oop reference) { 40 return InstanceKlass::cast(reference->klass())->reference_type(); 41 } 42 43 static const char* reference_type_name(ReferenceType type) { 44 switch (type) { 45 case REF_SOFT: 46 return "Soft"; 47 48 case REF_WEAK: 49 return "Weak"; 50 51 case REF_FINAL: 52 return "Final"; 53 54 case REF_PHANTOM: 55 return "Phantom"; 56 57 default: 58 ShouldNotReachHere(); 59 return nullptr; 60 } 61 } 62 63 template <typename T> 64 static void card_mark_barrier(T* field, oop value) { 65 assert(ShenandoahCardBarrier, "Card-mark barrier should be on"); 66 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 67 assert(heap->is_in_or_null(value), "Should be in heap"); 68 if (heap->is_in_old(field) && heap->is_in_young(value)) { 69 // For Shenandoah, each generation collects all the _referents_ that belong to the 70 // collected generation. We can end up with discovered lists that contain a mixture 71 // of old and young _references_. These references are linked together through the 72 // discovered field in java.lang.Reference. In some cases, creating or editing this 73 // list may result in the creation of _new_ old-to-young pointers which must dirty 74 // the corresponding card. Failing to do this may cause heap verification errors and 75 // lead to incorrect GC behavior. 76 heap->old_generation()->mark_card_as_dirty(field); 77 } 78 } 79 80 template <typename T> 81 static void set_oop_field(T* field, oop value); 82 83 template <> 84 void set_oop_field<oop>(oop* field, oop value) { 85 *field = value; 86 if (ShenandoahCardBarrier) { 87 card_mark_barrier(field, value); 88 } 89 } 90 91 template <> 92 void set_oop_field<narrowOop>(narrowOop* field, oop value) { 93 *field = CompressedOops::encode(value); 94 if (ShenandoahCardBarrier) { 95 card_mark_barrier(field, value); 96 } 97 } 98 99 static oop lrb(oop obj) { 100 if (obj != nullptr && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) { 101 return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj); 102 } else { 103 return obj; 104 } 105 } 106 107 template <typename T> 108 static volatile T* reference_referent_addr(oop reference) { 109 return (volatile T*)java_lang_ref_Reference::referent_addr_raw(reference); 110 } 111 112 inline oop reference_coop_decode_raw(narrowOop v) { 113 return CompressedOops::is_null(v) ? nullptr : CompressedOops::decode_raw(v); 114 } 115 116 inline oop reference_coop_decode_raw(oop v) { 117 return v; 118 } 119 120 // Raw referent, it can be dead. You cannot treat it as oop without additional safety 121 // checks, this is why it is HeapWord*. The decoding uses a special-case inlined 122 // CompressedOops::decode method that bypasses normal oop-ness checks. 123 template <typename T> 124 static HeapWord* reference_referent_raw(oop reference) { 125 T raw_oop = Atomic::load(reference_referent_addr<T>(reference)); 126 return cast_from_oop<HeapWord*>(reference_coop_decode_raw(raw_oop)); 127 } 128 129 static void reference_clear_referent(oop reference) { 130 java_lang_ref_Reference::clear_referent_raw(reference); 131 } 132 133 template <typename T> 134 static T* reference_discovered_addr(oop reference) { 135 return reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference)); 136 } 137 138 template <typename T> 139 static oop reference_discovered(oop reference) { 140 T heap_oop = *reference_discovered_addr<T>(reference); 141 return lrb(CompressedOops::decode(heap_oop)); 142 } 143 144 template <typename T> 145 static void reference_set_discovered(oop reference, oop discovered); 146 147 template <> 148 void reference_set_discovered<oop>(oop reference, oop discovered) { 149 *reference_discovered_addr<oop>(reference) = discovered; 150 } 151 152 template <> 153 void reference_set_discovered<narrowOop>(oop reference, oop discovered) { 154 *reference_discovered_addr<narrowOop>(reference) = CompressedOops::encode(discovered); 155 } 156 157 template<typename T> 158 static bool reference_cas_discovered(oop reference, oop discovered) { 159 T* addr = reinterpret_cast<T *>(java_lang_ref_Reference::discovered_addr_raw(reference)); 160 return ShenandoahHeap::atomic_update_oop_check(discovered, addr, nullptr); 161 } 162 163 template <typename T> 164 static T* reference_next_addr(oop reference) { 165 return reinterpret_cast<T*>(java_lang_ref_Reference::next_addr_raw(reference)); 166 } 167 168 template <typename T> 169 static oop reference_next(oop reference) { 170 T heap_oop = RawAccess<>::oop_load(reference_next_addr<T>(reference)); 171 return lrb(CompressedOops::decode(heap_oop)); 172 } 173 174 static void reference_set_next(oop reference, oop next) { 175 java_lang_ref_Reference::set_next_raw(reference, next); 176 } 177 178 static void soft_reference_update_clock() { 179 const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 180 java_lang_ref_SoftReference::set_clock(now); 181 } 182 183 ShenandoahRefProcThreadLocal::ShenandoahRefProcThreadLocal() : 184 _discovered_list(nullptr), 185 _encountered_count(), 186 _discovered_count(), 187 _enqueued_count() { 188 } 189 190 void ShenandoahRefProcThreadLocal::reset() { 191 _discovered_list = nullptr; 192 _mark_closure = nullptr; 193 for (uint i = 0; i < reference_type_count; i++) { 194 _encountered_count[i] = 0; 195 _discovered_count[i] = 0; 196 _enqueued_count[i] = 0; 197 } 198 } 199 200 template <typename T> 201 T* ShenandoahRefProcThreadLocal::discovered_list_addr() { 202 return reinterpret_cast<T*>(&_discovered_list); 203 } 204 205 template <> 206 oop ShenandoahRefProcThreadLocal::discovered_list_head<oop>() const { 207 return *reinterpret_cast<const oop*>(&_discovered_list); 208 } 209 210 template <> 211 oop ShenandoahRefProcThreadLocal::discovered_list_head<narrowOop>() const { 212 return CompressedOops::decode(*reinterpret_cast<const narrowOop*>(&_discovered_list)); 213 } 214 215 template <> 216 void ShenandoahRefProcThreadLocal::set_discovered_list_head<narrowOop>(oop head) { 217 *discovered_list_addr<narrowOop>() = CompressedOops::encode(head); 218 } 219 220 template <> 221 void ShenandoahRefProcThreadLocal::set_discovered_list_head<oop>(oop head) { 222 *discovered_list_addr<oop>() = head; 223 } 224 225 ShenandoahReferenceProcessor::ShenandoahReferenceProcessor(uint max_workers) : 226 _soft_reference_policy(nullptr), 227 _ref_proc_thread_locals(NEW_C_HEAP_ARRAY(ShenandoahRefProcThreadLocal, max_workers, mtGC)), 228 _pending_list(nullptr), 229 _pending_list_tail(&_pending_list), 230 _iterate_discovered_list_id(0U), 231 _stats() { 232 for (size_t i = 0; i < max_workers; i++) { 233 _ref_proc_thread_locals[i].reset(); 234 } 235 } 236 237 void ShenandoahReferenceProcessor::reset_thread_locals() { 238 uint max_workers = ShenandoahHeap::heap()->max_workers(); 239 for (uint i = 0; i < max_workers; i++) { 240 _ref_proc_thread_locals[i].reset(); 241 } 242 } 243 244 void ShenandoahReferenceProcessor::set_mark_closure(uint worker_id, ShenandoahMarkRefsSuperClosure* mark_closure) { 245 _ref_proc_thread_locals[worker_id].set_mark_closure(mark_closure); 246 } 247 248 void ShenandoahReferenceProcessor::set_soft_reference_policy(bool clear) { 249 static AlwaysClearPolicy always_clear_policy; 250 static LRUMaxHeapPolicy lru_max_heap_policy; 251 252 if (clear) { 253 log_info(gc, ref)("Clearing All SoftReferences"); 254 _soft_reference_policy = &always_clear_policy; 255 } else { 256 _soft_reference_policy = &lru_max_heap_policy; 257 } 258 259 _soft_reference_policy->setup(); 260 } 261 262 template <typename T> 263 bool ShenandoahReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const { 264 if (type == REF_FINAL) { 265 // A FinalReference is inactive if its next field is non-null. An application can't 266 // call enqueue() or clear() on a FinalReference. 267 return reference_next<T>(reference) != nullptr; 268 } else { 269 // A non-FinalReference is inactive if the referent is null. The referent can only 270 // be null if the application called Reference.enqueue() or Reference.clear(). 271 return referent == nullptr; 272 } 273 } 274 275 bool ShenandoahReferenceProcessor::is_strongly_live(oop referent) const { 276 return ShenandoahHeap::heap()->marking_context()->is_marked_strong(referent); 277 } 278 279 bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const { 280 if (type != REF_SOFT) { 281 // Not a SoftReference 282 return false; 283 } 284 285 // Ask SoftReference policy 286 const jlong clock = java_lang_ref_SoftReference::clock(); 287 assert(clock != 0, "Clock not initialized"); 288 assert(_soft_reference_policy != nullptr, "Policy not initialized"); 289 return !_soft_reference_policy->should_clear_reference(reference, clock); 290 } 291 292 template <typename T> 293 bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType type) const { 294 T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference); 295 T heap_oop = RawAccess<>::oop_load(referent_addr); 296 oop referent = CompressedOops::decode(heap_oop); 297 ShenandoahHeap* heap = ShenandoahHeap::heap(); 298 299 if (is_inactive<T>(reference, referent, type)) { 300 log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference)); 301 return false; 302 } 303 304 if (is_strongly_live(referent)) { 305 log_trace(gc,ref)("Reference strongly live: " PTR_FORMAT, p2i(reference)); 306 return false; 307 } 308 309 if (is_softly_live(reference, type)) { 310 log_trace(gc,ref)("Reference softly live: " PTR_FORMAT, p2i(reference)); 311 return false; 312 } 313 314 if (!heap->is_in_active_generation(referent)) { 315 log_trace(gc,ref)("Referent outside of active generation: " PTR_FORMAT, p2i(referent)); 316 return false; 317 } 318 319 return true; 320 } 321 322 template <typename T> 323 bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const { 324 HeapWord* raw_referent = reference_referent_raw<T>(reference); 325 if (raw_referent == nullptr) { 326 // Reference has been cleared, by a call to Reference.enqueue() 327 // or Reference.clear() from the application, which means we 328 // should drop the reference. 329 return true; 330 } 331 332 // Check if the referent is still alive, in which case we should 333 // drop the reference. 334 if (type == REF_PHANTOM) { 335 return ShenandoahHeap::heap()->complete_marking_context()->is_marked(raw_referent); 336 } else { 337 return ShenandoahHeap::heap()->complete_marking_context()->is_marked_strong(raw_referent); 338 } 339 } 340 341 template <typename T> 342 void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType type) const { 343 if (type == REF_FINAL) { 344 // Don't clear referent. It is needed by the Finalizer thread to make the call 345 // to finalize(). A FinalReference is instead made inactive by self-looping the 346 // next field. An application can't call FinalReference.enqueue(), so there is 347 // no race to worry about when setting the next field. 348 assert(reference_next<T>(reference) == nullptr, "Already inactive"); 349 assert(ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent_raw<T>(reference)), "only make inactive final refs with alive referents"); 350 reference_set_next(reference, reference); 351 } else { 352 // Clear referent 353 reference_clear_referent(reference); 354 } 355 } 356 357 template <typename T> 358 bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, uint worker_id) { 359 if (!should_discover<T>(reference, type)) { 360 // Not discovered 361 return false; 362 } 363 364 if (reference_discovered<T>(reference) != nullptr) { 365 // Already discovered. This can happen if the reference is marked finalizable first, and then strong, 366 // in which case it will be seen 2x by marking. 367 log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference)); 368 return true; 369 } 370 371 if (type == REF_FINAL) { 372 ShenandoahMarkRefsSuperClosure* cl = _ref_proc_thread_locals[worker_id].mark_closure(); 373 bool weak = cl->is_weak(); 374 cl->set_weak(true); 375 if (UseCompressedOops) { 376 cl->do_oop(reinterpret_cast<narrowOop*>(java_lang_ref_Reference::referent_addr_raw(reference))); 377 } else { 378 cl->do_oop(reinterpret_cast<oop*>(java_lang_ref_Reference::referent_addr_raw(reference))); 379 } 380 cl->set_weak(weak); 381 } 382 383 // Add reference to discovered list 384 // Each worker thread has a private copy of refproc_data, which includes a private discovered list. This means 385 // there's no risk that a different worker thread will try to manipulate my discovered list head while I'm making 386 // reference the head of my discovered list. 387 ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id]; 388 oop discovered_head = refproc_data.discovered_list_head<T>(); 389 if (discovered_head == nullptr) { 390 // Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their 391 // discovered field: if it is null, then it is not-yet discovered, otherwise it is discovered 392 discovered_head = reference; 393 } 394 if (reference_cas_discovered<T>(reference, discovered_head)) { 395 // We successfully set this reference object's next pointer to discovered_head. This marks reference as discovered. 396 // If reference_cas_discovered fails, that means some other worker thread took credit for discovery of this reference, 397 // and that other thread will place reference on its discovered list, so I can ignore reference. 398 399 // In case we have created an interesting pointer, mark the remembered set card as dirty. 400 if (ShenandoahCardBarrier) { 401 T* addr = reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference)); 402 card_mark_barrier(addr, discovered_head); 403 } 404 405 // Make the discovered_list_head point to reference. 406 refproc_data.set_discovered_list_head<T>(reference); 407 assert(refproc_data.discovered_list_head<T>() == reference, "reference must be new discovered head"); 408 log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 409 _ref_proc_thread_locals[worker_id].inc_discovered(type); 410 } 411 return true; 412 } 413 414 bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceType type) { 415 if (!RegisterReferences) { 416 // Reference processing disabled 417 return false; 418 } 419 420 log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s, %s)", 421 p2i(reference), reference_type_name(type), ShenandoahHeap::heap()->heap_region_containing(reference)->affiliation_name()); 422 uint worker_id = WorkerThread::worker_id(); 423 _ref_proc_thread_locals[worker_id].inc_encountered(type); 424 425 if (UseCompressedOops) { 426 return discover<narrowOop>(reference, type, worker_id); 427 } else { 428 return discover<oop>(reference, type, worker_id); 429 } 430 } 431 432 template <typename T> 433 oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) { 434 log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 435 436 HeapWord* raw_referent = reference_referent_raw<T>(reference); 437 438 #ifdef ASSERT 439 assert(raw_referent == nullptr || ShenandoahHeap::heap()->marking_context()->is_marked(raw_referent), 440 "only drop references with alive referents"); 441 #endif 442 443 ShenandoahHeap* heap = ShenandoahHeap::heap(); 444 445 // Unlink and return next in list 446 oop next = reference_discovered<T>(reference); 447 reference_set_discovered<T>(reference, nullptr); 448 // When this reference was discovered, it would not have been marked. If it ends up surviving 449 // the cycle, we need to dirty the card if the reference is old and the referent is young. Note 450 // that if the reference is not dropped, then its pointer to the referent will be nulled before 451 // evacuation begins so card does not need to be dirtied. 452 if (heap->mode()->is_generational() && heap->is_in_old(reference) && heap->is_in_young(raw_referent)) { 453 // Note: would be sufficient to mark only the card that holds the start of this Reference object. 454 heap->old_generation()->card_scan()->mark_range_as_dirty(cast_from_oop<HeapWord*>(reference), reference->size()); 455 } 456 return next; 457 } 458 459 template <typename T> 460 T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint worker_id) { 461 log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); 462 463 // Update statistics 464 _ref_proc_thread_locals[worker_id].inc_enqueued(type); 465 466 // Make reference inactive 467 make_inactive<T>(reference, type); 468 469 // Return next in list 470 return reference_discovered_addr<T>(reference); 471 } 472 473 template <typename T> 474 void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) { 475 log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head<T>())); 476 T* list = refproc_data.discovered_list_addr<T>(); 477 // The list head is basically a GC root, we need to resolve and update it, 478 // otherwise we will later swap a from-space ref into Universe::pending_list(). 479 if (!CompressedOops::is_null(*list)) { 480 oop first_resolved = lrb(CompressedOops::decode_not_null(*list)); 481 set_oop_field(list, first_resolved); 482 } 483 T* p = list; 484 while (true) { 485 const oop reference = lrb(CompressedOops::decode(*p)); 486 if (reference == nullptr) { 487 break; 488 } 489 log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference)); 490 const ReferenceType type = reference_type(reference); 491 492 if (should_drop<T>(reference, type)) { 493 set_oop_field(p, drop<T>(reference, type)); 494 } else { 495 p = keep<T>(reference, type, worker_id); 496 } 497 498 const oop discovered = lrb(reference_discovered<T>(reference)); 499 if (reference == discovered) { 500 // Reset terminating self-loop to null 501 reference_set_discovered<T>(reference, oop(nullptr)); 502 break; 503 } 504 } 505 506 // Prepend discovered references to internal pending list 507 // set_oop_field maintains the card mark barrier as this list is constructed. 508 if (!CompressedOops::is_null(*list)) { 509 oop head = lrb(CompressedOops::decode_not_null(*list)); 510 shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); 511 oop prev = Atomic::xchg(&_pending_list, head); 512 set_oop_field(p, prev); 513 if (prev == nullptr) { 514 // First to prepend to list, record tail 515 _pending_list_tail = reinterpret_cast<void*>(p); 516 } 517 518 // Clear discovered list 519 set_oop_field(list, oop(nullptr)); 520 } 521 } 522 523 void ShenandoahReferenceProcessor::work() { 524 // Process discovered references 525 uint max_workers = ShenandoahHeap::heap()->max_workers(); 526 uint worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; 527 while (worker_id < max_workers) { 528 if (UseCompressedOops) { 529 process_references<narrowOop>(_ref_proc_thread_locals[worker_id], worker_id); 530 } else { 531 process_references<oop>(_ref_proc_thread_locals[worker_id], worker_id); 532 } 533 worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; 534 } 535 } 536 537 class ShenandoahReferenceProcessorTask : public WorkerTask { 538 private: 539 bool const _concurrent; 540 ShenandoahPhaseTimings::Phase const _phase; 541 ShenandoahReferenceProcessor* const _reference_processor; 542 543 public: 544 ShenandoahReferenceProcessorTask(ShenandoahPhaseTimings::Phase phase, bool concurrent, ShenandoahReferenceProcessor* reference_processor) : 545 WorkerTask("ShenandoahReferenceProcessorTask"), 546 _concurrent(concurrent), 547 _phase(phase), 548 _reference_processor(reference_processor) { 549 } 550 551 virtual void work(uint worker_id) { 552 if (_concurrent) { 553 ShenandoahConcurrentWorkerSession worker_session(worker_id); 554 ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id); 555 _reference_processor->work(); 556 } else { 557 ShenandoahParallelWorkerSession worker_session(worker_id); 558 ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id); 559 _reference_processor->work(); 560 } 561 } 562 }; 563 564 void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) { 565 566 Atomic::release_store_fence(&_iterate_discovered_list_id, 0U); 567 568 // Process discovered lists 569 ShenandoahReferenceProcessorTask task(phase, concurrent, this); 570 workers->run_task(&task); 571 572 // Update SoftReference clock 573 soft_reference_update_clock(); 574 575 // Collect, log and trace statistics 576 collect_statistics(); 577 578 enqueue_references(concurrent); 579 } 580 581 void ShenandoahReferenceProcessor::enqueue_references_locked() { 582 // Prepend internal pending list to external pending list 583 shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); 584 585 // During reference processing, we maintain a local list of references that are identified by 586 // _pending_list and _pending_list_tail. _pending_list_tail points to the next field of the last Reference object on 587 // the local list. 588 // 589 // There is also a global list of reference identified by Universe::_reference_pending_list 590 591 // The following code has the effect of: 592 // 1. Making the global Universe::_reference_pending_list point to my local list 593 // 2. Overwriting the next field of the last Reference on my local list to point at the previous head of the 594 // global Universe::_reference_pending_list 595 596 oop former_head_of_global_list = Universe::swap_reference_pending_list(_pending_list); 597 if (UseCompressedOops) { 598 set_oop_field<narrowOop>(reinterpret_cast<narrowOop*>(_pending_list_tail), former_head_of_global_list); 599 } else { 600 set_oop_field<oop>(reinterpret_cast<oop*>(_pending_list_tail), former_head_of_global_list); 601 } 602 } 603 604 void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) { 605 if (_pending_list == nullptr) { 606 // Nothing to enqueue 607 return; 608 } 609 if (!concurrent) { 610 // When called from mark-compact or degen-GC, the locking is done by the VMOperation, 611 enqueue_references_locked(); 612 } else { 613 // Heap_lock protects external pending list 614 MonitorLocker ml(Heap_lock); 615 616 enqueue_references_locked(); 617 618 // Notify ReferenceHandler thread 619 ml.notify_all(); 620 } 621 622 // Reset internal pending list 623 _pending_list = nullptr; 624 _pending_list_tail = &_pending_list; 625 } 626 627 template<typename T> 628 void ShenandoahReferenceProcessor::clean_discovered_list(T* list) { 629 T discovered = *list; 630 while (!CompressedOops::is_null(discovered)) { 631 oop discovered_ref = CompressedOops::decode_not_null(discovered); 632 set_oop_field<T>(list, oop(nullptr)); 633 list = reference_discovered_addr<T>(discovered_ref); 634 discovered = *list; 635 } 636 } 637 638 void ShenandoahReferenceProcessor::abandon_partial_discovery() { 639 uint max_workers = ShenandoahHeap::heap()->max_workers(); 640 for (uint index = 0; index < max_workers; index++) { 641 if (UseCompressedOops) { 642 clean_discovered_list<narrowOop>(_ref_proc_thread_locals[index].discovered_list_addr<narrowOop>()); 643 } else { 644 clean_discovered_list<oop>(_ref_proc_thread_locals[index].discovered_list_addr<oop>()); 645 } 646 } 647 if (_pending_list != nullptr) { 648 oop pending = _pending_list; 649 _pending_list = nullptr; 650 if (UseCompressedOops) { 651 narrowOop* list = reference_discovered_addr<narrowOop>(pending); 652 clean_discovered_list<narrowOop>(list); 653 } else { 654 oop* list = reference_discovered_addr<oop>(pending); 655 clean_discovered_list<oop>(list); 656 } 657 } 658 _pending_list_tail = &_pending_list; 659 } 660 661 void ShenandoahReferenceProcessor::collect_statistics() { 662 Counters encountered = {}; 663 Counters discovered = {}; 664 Counters enqueued = {}; 665 uint max_workers = ShenandoahHeap::heap()->max_workers(); 666 for (uint i = 0; i < max_workers; i++) { 667 for (size_t type = 0; type < reference_type_count; type++) { 668 encountered[type] += _ref_proc_thread_locals[i].encountered((ReferenceType)type); 669 discovered[type] += _ref_proc_thread_locals[i].discovered((ReferenceType)type); 670 enqueued[type] += _ref_proc_thread_locals[i].enqueued((ReferenceType)type); 671 } 672 } 673 674 _stats = ReferenceProcessorStats(discovered[REF_SOFT], 675 discovered[REF_WEAK], 676 discovered[REF_FINAL], 677 discovered[REF_PHANTOM]); 678 679 log_info(gc,ref)("Encountered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 680 encountered[REF_SOFT], encountered[REF_WEAK], encountered[REF_FINAL], encountered[REF_PHANTOM]); 681 log_info(gc,ref)("Discovered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 682 discovered[REF_SOFT], discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]); 683 log_info(gc,ref)("Enqueued references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, 684 enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]); 685 }