1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP 26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP 27 28 #include "gc_implementation/shared/gcTrace.hpp" 29 #include "memory/referencePolicy.hpp" 30 #include "memory/referenceProcessorStats.hpp" 31 #include "memory/referenceType.hpp" 32 #include "oops/instanceRefKlass.hpp" 33 34 class GCTimer; 35 36 // ReferenceProcessor class encapsulates the per-"collector" processing 37 // of java.lang.Reference objects for GC. The interface is useful for supporting 38 // a generational abstraction, in particular when there are multiple 39 // generations that are being independently collected -- possibly 40 // concurrently and/or incrementally. Note, however, that the 41 // ReferenceProcessor class abstracts away from a generational setting 42 // by using only a heap interval (called "span" below), thus allowing 43 // its use in a straightforward manner in a general, non-generational 44 // setting. 45 // 46 // The basic idea is that each ReferenceProcessor object concerns 47 // itself with ("weak") reference processing in a specific "span" 48 // of the heap of interest to a specific collector. Currently, 49 // the span is a convex interval of the heap, but, efficiency 50 // apart, there seems to be no reason it couldn't be extended 51 // (with appropriate modifications) to any "non-convex interval". 52 53 // forward references 54 class ReferencePolicy; 55 class AbstractRefProcTaskExecutor; 56 57 // List of discovered references. 58 class DiscoveredList { 59 public: 60 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 61 oop head() const { 62 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : 63 _oop_head; 64 } 65 HeapWord* adr_head() { 66 return UseCompressedOops ? (HeapWord*)&_compressed_head : 67 (HeapWord*)&_oop_head; 68 } 69 void set_head(oop o) { 70 if (UseCompressedOops) { 71 // Must compress the head ptr. 72 _compressed_head = oopDesc::encode_heap_oop(o); 73 } else { 74 _oop_head = o; 75 } 76 } 77 bool is_empty() const { return head() == NULL; } 78 size_t length() { return _len; } 79 void set_length(size_t len) { _len = len; } 80 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 81 void dec_length(size_t dec) { _len -= dec; } 82 private: 83 // Set value depending on UseCompressedOops. This could be a template class 84 // but then we have to fix all the instantiations and declarations that use this class. 85 oop _oop_head; 86 narrowOop _compressed_head; 87 size_t _len; 88 }; 89 90 // Iterator for the list of discovered references. 91 class DiscoveredListIterator { 92 private: 93 DiscoveredList& _refs_list; 94 HeapWord* _prev_next; 95 oop _prev; 96 oop _ref; 97 HeapWord* _discovered_addr; 98 oop _next; 99 HeapWord* _referent_addr; 100 oop _referent; 101 OopClosure* _keep_alive; 102 BoolObjectClosure* _is_alive; 103 104 DEBUG_ONLY( 105 oop _first_seen; // cyclic linked list check 106 ) 107 108 NOT_PRODUCT( 109 size_t _processed; 110 size_t _removed; 111 ) 112 113 public: 114 inline DiscoveredListIterator(DiscoveredList& refs_list, 115 OopClosure* keep_alive, 116 BoolObjectClosure* is_alive): 117 _refs_list(refs_list), 118 _prev_next(refs_list.adr_head()), 119 _prev(NULL), 120 _ref(refs_list.head()), 121 #ifdef ASSERT 122 _first_seen(refs_list.head()), 123 #endif 124 #ifndef PRODUCT 125 _processed(0), 126 _removed(0), 127 #endif 128 _next(NULL), 129 _keep_alive(keep_alive), 130 _is_alive(is_alive) 131 { } 132 133 // End Of List. 134 inline bool has_next() const { return _ref != NULL; } 135 136 // Get oop to the Reference object. 137 inline oop obj() const { return _ref; } 138 139 // Get oop to the referent object. 140 inline oop referent() const { return _referent; } 141 142 // Returns true if referent is alive. 143 inline bool is_referent_alive() const { 144 return _is_alive->do_object_b(_referent); 145 } 146 147 // Loads data for the current reference. 148 // The "allow_null_referent" argument tells us to allow for the possibility 149 // of a NULL referent in the discovered Reference object. This typically 150 // happens in the case of concurrent collectors that may have done the 151 // discovery concurrently, or interleaved, with mutator execution. 152 void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 153 154 // Move to the next discovered reference. 155 inline void next() { 156 _prev_next = _discovered_addr; 157 _prev = _ref; 158 move_to_next(); 159 } 160 161 // Remove the current reference from the list 162 void remove(); 163 164 // Make the Reference object active again. 165 void make_active(); 166 167 // Make the referent alive. 168 inline void make_referent_alive() { 169 if (UseCompressedOops) { 170 _keep_alive->do_oop((narrowOop*)_referent_addr); 171 } else { 172 _keep_alive->do_oop((oop*)_referent_addr); 173 } 174 } 175 176 // Update the discovered field. 177 inline void update_discovered() { 178 // First _prev_next ref actually points into DiscoveredList (gross). 179 if (UseCompressedOops) { 180 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) { 181 _keep_alive->do_oop((narrowOop*)_prev_next); 182 } 183 } else { 184 if (!oopDesc::is_null(*(oop*)_prev_next)) { 185 _keep_alive->do_oop((oop*)_prev_next); 186 } 187 } 188 } 189 190 // NULL out referent pointer. 191 void clear_referent(); 192 193 // Statistics 194 NOT_PRODUCT( 195 inline size_t processed() const { return _processed; } 196 inline size_t removed() const { return _removed; } 197 ) 198 199 inline void move_to_next() { 200 if (_ref == _next) { 201 // End of the list. 202 _ref = NULL; 203 } else { 204 _ref = _next; 205 } 206 assert(_ref != _first_seen, "cyclic ref_list found"); 207 NOT_PRODUCT(_processed++); 208 } 209 }; 210 211 class ReferenceProcessor : public CHeapObj<mtGC> { 212 213 public: 214 size_t total_count(DiscoveredList lists[]); 215 216 protected: 217 // Compatibility with pre-4965777 JDK's 218 static bool _pending_list_uses_discovered_field; 219 220 // The SoftReference master timestamp clock 221 static jlong _soft_ref_timestamp_clock; 222 223 MemRegion _span; // (right-open) interval of heap 224 // subject to wkref discovery 225 226 bool _discovering_refs; // true when discovery enabled 227 bool _discovery_is_atomic; // if discovery is atomic wrt 228 // other collectors in configuration 229 bool _discovery_is_mt; // true if reference discovery is MT. 230 231 bool _enqueuing_is_done; // true if all weak references enqueued 232 bool _processing_is_mt; // true during phases when 233 // reference processing is MT. 234 uint _next_id; // round-robin mod _num_q counter in 235 // support of work distribution 236 237 // For collectors that do not keep GC liveness information 238 // in the object header, this field holds a closure that 239 // helps the reference processor determine the reachability 240 // of an oop. It is currently initialized to NULL for all 241 // collectors except for CMS and G1. 242 BoolObjectClosure* _is_alive_non_header; 243 244 // Soft ref clearing policies 245 // . the default policy 246 static ReferencePolicy* _default_soft_ref_policy; 247 // . the "clear all" policy 248 static ReferencePolicy* _always_clear_soft_ref_policy; 249 // . the current policy below is either one of the above 250 ReferencePolicy* _current_soft_ref_policy; 251 252 // The discovered ref lists themselves 253 254 // The active MT'ness degree of the queues below 255 uint _num_q; 256 // The maximum MT'ness degree of the queues below 257 uint _max_num_q; 258 259 // Master array of discovered oops 260 DiscoveredList* _discovered_refs; 261 262 // Arrays of lists of oops, one per thread (pointers into master array above) 263 DiscoveredList* _discoveredSoftRefs; 264 DiscoveredList* _discoveredWeakRefs; 265 DiscoveredList* _discoveredFinalRefs; 266 DiscoveredList* _discoveredPhantomRefs; 267 DiscoveredList* _discoveredCleanerRefs; 268 269 public: 270 static int number_of_subclasses_of_ref() { return (REF_CLEANER - REF_OTHER); } 271 272 uint num_q() { return _num_q; } 273 uint max_num_q() { return _max_num_q; } 274 void set_active_mt_degree(uint v) { _num_q = v; } 275 276 DiscoveredList* discovered_refs() { return _discovered_refs; } 277 278 ReferencePolicy* setup_policy(bool always_clear) { 279 _current_soft_ref_policy = always_clear ? 280 _always_clear_soft_ref_policy : _default_soft_ref_policy; 281 _current_soft_ref_policy->setup(); // snapshot the policy threshold 282 return _current_soft_ref_policy; 283 } 284 285 // Process references with a certain reachability level. 286 size_t process_discovered_reflist(DiscoveredList refs_lists[], 287 ReferencePolicy* policy, 288 bool clear_referent, 289 BoolObjectClosure* is_alive, 290 OopClosure* keep_alive, 291 VoidClosure* complete_gc, 292 AbstractRefProcTaskExecutor* task_executor); 293 294 void process_phaseJNI(BoolObjectClosure* is_alive, 295 OopClosure* keep_alive, 296 VoidClosure* complete_gc); 297 298 // Work methods used by the method process_discovered_reflist 299 // Phase1: keep alive all those referents that are otherwise 300 // dead but which must be kept alive by policy (and their closure). 301 void process_phase1(DiscoveredList& refs_list, 302 ReferencePolicy* policy, 303 BoolObjectClosure* is_alive, 304 OopClosure* keep_alive, 305 VoidClosure* complete_gc); 306 // Phase2: remove all those references whose referents are 307 // reachable. 308 inline void process_phase2(DiscoveredList& refs_list, 309 BoolObjectClosure* is_alive, 310 OopClosure* keep_alive, 311 VoidClosure* complete_gc) { 312 if (discovery_is_atomic()) { 313 // complete_gc is ignored in this case for this phase 314 pp2_work(refs_list, is_alive, keep_alive); 315 } else { 316 assert(complete_gc != NULL, "Error"); 317 pp2_work_concurrent_discovery(refs_list, is_alive, 318 keep_alive, complete_gc); 319 } 320 } 321 // Work methods in support of process_phase2 322 void pp2_work(DiscoveredList& refs_list, 323 BoolObjectClosure* is_alive, 324 OopClosure* keep_alive); 325 void pp2_work_concurrent_discovery( 326 DiscoveredList& refs_list, 327 BoolObjectClosure* is_alive, 328 OopClosure* keep_alive, 329 VoidClosure* complete_gc); 330 // Phase3: process the referents by either clearing them 331 // or keeping them alive (and their closure) 332 void process_phase3(DiscoveredList& refs_list, 333 bool clear_referent, 334 BoolObjectClosure* is_alive, 335 OopClosure* keep_alive, 336 VoidClosure* complete_gc); 337 338 // Enqueue references with a certain reachability level 339 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); 340 341 // "Preclean" all the discovered reference lists 342 // by removing references with strongly reachable referents. 343 // The first argument is a predicate on an oop that indicates 344 // its (strong) reachability and the second is a closure that 345 // may be used to incrementalize or abort the precleaning process. 346 // The caller is responsible for taking care of potential 347 // interference with concurrent operations on these lists 348 // (or predicates involved) by other threads. Currently 349 // only used by the CMS collector. 350 void preclean_discovered_references(BoolObjectClosure* is_alive, 351 OopClosure* keep_alive, 352 VoidClosure* complete_gc, 353 YieldClosure* yield, 354 GCTimer* gc_timer, 355 GCId gc_id); 356 357 // Delete entries in the discovered lists that have 358 // either a null referent or are not active. Such 359 // Reference objects can result from the clearing 360 // or enqueueing of Reference objects concurrent 361 // with their discovery by a (concurrent) collector. 362 // For a definition of "active" see java.lang.ref.Reference; 363 // Refs are born active, become inactive when enqueued, 364 // and never become active again. The state of being 365 // active is encoded as follows: A Ref is active 366 // if and only if its "next" field is NULL. 367 void clean_up_discovered_references(); 368 void clean_up_discovered_reflist(DiscoveredList& refs_list); 369 370 // Returns the name of the discovered reference list 371 // occupying the i / _num_q slot. 372 const char* list_name(uint i); 373 374 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); 375 376 protected: 377 // "Preclean" the given discovered reference list 378 // by removing references with strongly reachable referents. 379 // Currently used in support of CMS only. 380 void preclean_discovered_reflist(DiscoveredList& refs_list, 381 BoolObjectClosure* is_alive, 382 OopClosure* keep_alive, 383 VoidClosure* complete_gc, 384 YieldClosure* yield); 385 386 // round-robin mod _num_q (not: _not_ mode _max_num_q) 387 uint next_id() { 388 uint id = _next_id; 389 if (++_next_id == _num_q) { 390 _next_id = 0; 391 } 392 return id; 393 } 394 DiscoveredList* get_discovered_list(ReferenceType rt); 395 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, 396 HeapWord* discovered_addr); 397 void verify_ok_to_handle_reflists() PRODUCT_RETURN; 398 399 void clear_discovered_references(DiscoveredList& refs_list); 400 void abandon_partial_discovered_list(DiscoveredList& refs_list); 401 402 // Calculate the number of jni handles. 403 unsigned int count_jni_refs(); 404 405 // Balances reference queues. 406 void balance_queues(DiscoveredList ref_lists[]); 407 408 // Update (advance) the soft ref master clock field. 409 void update_soft_ref_master_clock(); 410 411 public: 412 // Default parameters give you a vanilla reference processor. 413 ReferenceProcessor(MemRegion span, 414 bool mt_processing = false, uint mt_processing_degree = 1, 415 bool mt_discovery = false, uint mt_discovery_degree = 1, 416 bool atomic_discovery = true, 417 BoolObjectClosure* is_alive_non_header = NULL); 418 419 // RefDiscoveryPolicy values 420 enum DiscoveryPolicy { 421 ReferenceBasedDiscovery = 0, 422 ReferentBasedDiscovery = 1, 423 DiscoveryPolicyMin = ReferenceBasedDiscovery, 424 DiscoveryPolicyMax = ReferentBasedDiscovery 425 }; 426 427 static void init_statics(); 428 429 public: 430 // get and set "is_alive_non_header" field 431 BoolObjectClosure* is_alive_non_header() { 432 return _is_alive_non_header; 433 } 434 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { 435 _is_alive_non_header = is_alive_non_header; 436 } 437 438 // get and set span 439 MemRegion span() { return _span; } 440 void set_span(MemRegion span) { _span = span; } 441 442 // start and stop weak ref discovery 443 void enable_discovery(bool verify_disabled, bool check_no_refs); 444 void disable_discovery() { _discovering_refs = false; } 445 bool discovery_enabled() { return _discovering_refs; } 446 447 // whether discovery is atomic wrt other collectors 448 bool discovery_is_atomic() const { return _discovery_is_atomic; } 449 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } 450 451 // whether the JDK in which we are embedded is a pre-4965777 JDK, 452 // and thus whether or not it uses the discovered field to chain 453 // the entries in the pending list. 454 static bool pending_list_uses_discovered_field() { 455 return _pending_list_uses_discovered_field; 456 } 457 458 // whether discovery is done by multiple threads same-old-timeously 459 bool discovery_is_mt() const { return _discovery_is_mt; } 460 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } 461 462 // Whether we are in a phase when _processing_ is MT. 463 bool processing_is_mt() const { return _processing_is_mt; } 464 void set_mt_processing(bool mt) { _processing_is_mt = mt; } 465 466 // whether all enqueuing of weak references is complete 467 bool enqueuing_is_done() { return _enqueuing_is_done; } 468 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } 469 470 // iterate over oops 471 void weak_oops_do(OopClosure* f); // weak roots 472 473 // Balance each of the discovered lists. 474 void balance_all_queues(); 475 void verify_list(DiscoveredList& ref_list); 476 477 // Discover a Reference object, using appropriate discovery criteria 478 bool discover_reference(oop obj, ReferenceType rt); 479 480 // Process references found during GC (called by the garbage collector) 481 ReferenceProcessorStats 482 process_discovered_references(BoolObjectClosure* is_alive, 483 OopClosure* keep_alive, 484 VoidClosure* complete_gc, 485 AbstractRefProcTaskExecutor* task_executor, 486 GCTimer *gc_timer, 487 GCId gc_id); 488 489 // Enqueue references at end of GC (called by the garbage collector) 490 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); 491 492 // If a discovery is in process that is being superceded, abandon it: all 493 // the discovered lists will be empty, and all the objects on them will 494 // have NULL discovered fields. Must be called only at a safepoint. 495 void abandon_partial_discovery(); 496 497 // debugging 498 void verify_no_references_recorded() PRODUCT_RETURN; 499 void verify_referent(oop obj) PRODUCT_RETURN; 500 501 // clear the discovered lists (unlinking each entry). 502 void clear_discovered_references() PRODUCT_RETURN; 503 }; 504 505 // A utility class to disable reference discovery in 506 // the scope which contains it, for given ReferenceProcessor. 507 class NoRefDiscovery: StackObj { 508 private: 509 ReferenceProcessor* _rp; 510 bool _was_discovering_refs; 511 public: 512 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { 513 _was_discovering_refs = _rp->discovery_enabled(); 514 if (_was_discovering_refs) { 515 _rp->disable_discovery(); 516 } 517 } 518 519 ~NoRefDiscovery() { 520 if (_was_discovering_refs) { 521 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/); 522 } 523 } 524 }; 525 526 527 // A utility class to temporarily mutate the span of the 528 // given ReferenceProcessor in the scope that contains it. 529 class ReferenceProcessorSpanMutator: StackObj { 530 private: 531 ReferenceProcessor* _rp; 532 MemRegion _saved_span; 533 534 public: 535 ReferenceProcessorSpanMutator(ReferenceProcessor* rp, 536 MemRegion span): 537 _rp(rp) { 538 _saved_span = _rp->span(); 539 _rp->set_span(span); 540 } 541 542 ~ReferenceProcessorSpanMutator() { 543 _rp->set_span(_saved_span); 544 } 545 }; 546 547 // A utility class to temporarily change the MT'ness of 548 // reference discovery for the given ReferenceProcessor 549 // in the scope that contains it. 550 class ReferenceProcessorMTDiscoveryMutator: StackObj { 551 private: 552 ReferenceProcessor* _rp; 553 bool _saved_mt; 554 555 public: 556 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, 557 bool mt): 558 _rp(rp) { 559 _saved_mt = _rp->discovery_is_mt(); 560 _rp->set_mt_discovery(mt); 561 } 562 563 ~ReferenceProcessorMTDiscoveryMutator() { 564 _rp->set_mt_discovery(_saved_mt); 565 } 566 }; 567 568 569 // A utility class to temporarily change the disposition 570 // of the "is_alive_non_header" closure field of the 571 // given ReferenceProcessor in the scope that contains it. 572 class ReferenceProcessorIsAliveMutator: StackObj { 573 private: 574 ReferenceProcessor* _rp; 575 BoolObjectClosure* _saved_cl; 576 577 public: 578 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, 579 BoolObjectClosure* cl): 580 _rp(rp) { 581 _saved_cl = _rp->is_alive_non_header(); 582 _rp->set_is_alive_non_header(cl); 583 } 584 585 ~ReferenceProcessorIsAliveMutator() { 586 _rp->set_is_alive_non_header(_saved_cl); 587 } 588 }; 589 590 // A utility class to temporarily change the disposition 591 // of the "discovery_is_atomic" field of the 592 // given ReferenceProcessor in the scope that contains it. 593 class ReferenceProcessorAtomicMutator: StackObj { 594 private: 595 ReferenceProcessor* _rp; 596 bool _saved_atomic_discovery; 597 598 public: 599 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, 600 bool atomic): 601 _rp(rp) { 602 _saved_atomic_discovery = _rp->discovery_is_atomic(); 603 _rp->set_atomic_discovery(atomic); 604 } 605 606 ~ReferenceProcessorAtomicMutator() { 607 _rp->set_atomic_discovery(_saved_atomic_discovery); 608 } 609 }; 610 611 612 // A utility class to temporarily change the MT processing 613 // disposition of the given ReferenceProcessor instance 614 // in the scope that contains it. 615 class ReferenceProcessorMTProcMutator: StackObj { 616 private: 617 ReferenceProcessor* _rp; 618 bool _saved_mt; 619 620 public: 621 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, 622 bool mt): 623 _rp(rp) { 624 _saved_mt = _rp->processing_is_mt(); 625 _rp->set_mt_processing(mt); 626 } 627 628 ~ReferenceProcessorMTProcMutator() { 629 _rp->set_mt_processing(_saved_mt); 630 } 631 }; 632 633 634 // This class is an interface used to implement task execution for the 635 // reference processing. 636 class AbstractRefProcTaskExecutor { 637 public: 638 639 // Abstract tasks to execute. 640 class ProcessTask; 641 class EnqueueTask; 642 643 // Executes a task using worker threads. 644 virtual void execute(ProcessTask& task) = 0; 645 virtual void execute(EnqueueTask& task) = 0; 646 647 // Switch to single threaded mode. 648 virtual void set_single_threaded_mode() { }; 649 }; 650 651 // Abstract reference processing task to execute. 652 class AbstractRefProcTaskExecutor::ProcessTask { 653 protected: 654 ProcessTask(ReferenceProcessor& ref_processor, 655 DiscoveredList refs_lists[], 656 bool marks_oops_alive) 657 : _ref_processor(ref_processor), 658 _refs_lists(refs_lists), 659 _marks_oops_alive(marks_oops_alive) 660 { } 661 662 public: 663 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, 664 OopClosure& keep_alive, 665 VoidClosure& complete_gc) = 0; 666 667 // Returns true if a task marks some oops as alive. 668 bool marks_oops_alive() const 669 { return _marks_oops_alive; } 670 671 bool is_empty() const { 672 return _ref_processor.total_count(_refs_lists) == 0; 673 } 674 675 protected: 676 ReferenceProcessor& _ref_processor; 677 DiscoveredList* _refs_lists; 678 const bool _marks_oops_alive; 679 }; 680 681 // Abstract reference processing task to execute. 682 class AbstractRefProcTaskExecutor::EnqueueTask { 683 protected: 684 EnqueueTask(ReferenceProcessor& ref_processor, 685 DiscoveredList refs_lists[], 686 HeapWord* pending_list_addr, 687 int n_queues) 688 : _ref_processor(ref_processor), 689 _refs_lists(refs_lists), 690 _pending_list_addr(pending_list_addr), 691 _n_queues(n_queues) 692 { } 693 694 public: 695 virtual void work(unsigned int work_id) = 0; 696 697 protected: 698 ReferenceProcessor& _ref_processor; 699 DiscoveredList* _refs_lists; 700 HeapWord* _pending_list_addr; 701 int _n_queues; 702 }; 703 704 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP