1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP 26 #define SHARE_VM_UTILITIES_TASKQUEUE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "memory/padded.hpp" 31 #include "runtime/mutex.hpp" 32 #include "runtime/orderAccess.inline.hpp" 33 #include "utilities/globalDefinitions.hpp" 34 #include "utilities/stack.hpp" 35 36 // Simple TaskQueue stats that are collected by default in debug builds. 37 38 #if !defined(TASKQUEUE_STATS) && defined(ASSERT) 39 #define TASKQUEUE_STATS 1 40 #elif !defined(TASKQUEUE_STATS) 41 #define TASKQUEUE_STATS 0 42 #endif 43 44 #if TASKQUEUE_STATS 45 #define TASKQUEUE_STATS_ONLY(code) code 46 #else 47 #define TASKQUEUE_STATS_ONLY(code) 48 #endif // TASKQUEUE_STATS 49 50 #if TASKQUEUE_STATS 51 class TaskQueueStats { 52 public: 53 enum StatId { 54 push, // number of taskqueue pushes 55 pop, // number of taskqueue pops 56 pop_slow, // subset of taskqueue pops that were done slow-path 57 steal_attempt, // number of taskqueue steal attempts 58 steal, // number of taskqueue steals 59 overflow, // number of overflow pushes 60 overflow_max_len, // max length of overflow stack 61 last_stat_id 62 }; 63 64 public: 65 inline TaskQueueStats() { reset(); } 66 67 inline void record_push() { ++_stats[push]; } 68 inline void record_pop() { ++_stats[pop]; } 69 inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; } 70 inline void record_steal(bool success); 71 inline void record_overflow(size_t new_length); 72 73 TaskQueueStats & operator +=(const TaskQueueStats & addend); 74 75 inline size_t get(StatId id) const { return _stats[id]; } 76 inline const size_t* get() const { return _stats; } 77 78 inline void reset(); 79 80 // Print the specified line of the header (does not include a line separator). 81 static void print_header(unsigned int line, outputStream* const stream = tty, 82 unsigned int width = 10); 83 // Print the statistics (does not include a line separator). 84 void print(outputStream* const stream = tty, unsigned int width = 10) const; 85 86 DEBUG_ONLY(void verify() const;) 87 88 private: 89 size_t _stats[last_stat_id]; 90 static const char * const _names[last_stat_id]; 91 }; 92 93 void TaskQueueStats::record_steal(bool success) { 94 ++_stats[steal_attempt]; 95 if (success) ++_stats[steal]; 96 } 97 98 void TaskQueueStats::record_overflow(size_t new_len) { 99 ++_stats[overflow]; 100 if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len; 101 } 102 103 void TaskQueueStats::reset() { 104 memset(_stats, 0, sizeof(_stats)); 105 } 106 #endif // TASKQUEUE_STATS 107 108 // TaskQueueSuper collects functionality common to all GenericTaskQueue instances. 109 110 template <unsigned int N, MEMFLAGS F> 111 class TaskQueueSuper: public CHeapObj<F> { 112 protected: 113 // Internal type for indexing the queue; also used for the tag. 114 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t; 115 116 enum { MOD_N_MASK = N - 1 }; 117 118 class Age { 119 public: 120 Age(size_t data = 0) { _data = data; } 121 Age(const Age& age) { _data = age._data; } 122 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; } 123 124 Age get() const volatile { return _data; } 125 void set(Age age) volatile { _data = age._data; } 126 127 idx_t top() const volatile { return _fields._top; } 128 idx_t tag() const volatile { return _fields._tag; } 129 130 // Increment top; if it wraps, increment tag also. 131 void increment() { 132 _fields._top = increment_index(_fields._top); 133 if (_fields._top == 0) ++_fields._tag; 134 } 135 136 Age cmpxchg(const Age new_age, const Age old_age) volatile { 137 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data, 138 (volatile intptr_t *)&_data, 139 (intptr_t)old_age._data); 140 } 141 142 bool operator ==(const Age& other) const { return _data == other._data; } 143 144 private: 145 struct fields { 146 idx_t _top; 147 idx_t _tag; 148 }; 149 union { 150 size_t _data; 151 fields _fields; 152 }; 153 }; 154 155 // The first free element after the last one pushed (mod N). 156 volatile uint _bottom; 157 // Add paddings to reduce false-sharing cache contention between _bottom and _age 158 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(uint)); 159 volatile Age _age; 160 161 // These both operate mod N. 162 static uint increment_index(uint ind) { 163 return (ind + 1) & MOD_N_MASK; 164 } 165 static uint decrement_index(uint ind) { 166 return (ind - 1) & MOD_N_MASK; 167 } 168 169 // Returns a number in the range [0..N). If the result is "N-1", it should be 170 // interpreted as 0. 171 uint dirty_size(uint bot, uint top) const { 172 return (bot - top) & MOD_N_MASK; 173 } 174 175 // Returns the size corresponding to the given "bot" and "top". 176 uint size(uint bot, uint top) const { 177 uint sz = dirty_size(bot, top); 178 // Has the queue "wrapped", so that bottom is less than top? There's a 179 // complicated special case here. A pair of threads could perform pop_local 180 // and pop_global operations concurrently, starting from a state in which 181 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom, 182 // and the pop_global in incrementing _top (in which case the pop_global 183 // will be awarded the contested queue element.) The resulting state must 184 // be interpreted as an empty queue. (We only need to worry about one such 185 // event: only the queue owner performs pop_local's, and several concurrent 186 // threads attempting to perform the pop_global will all perform the same 187 // CAS, and only one can succeed.) Any stealing thread that reads after 188 // either the increment or decrement will see an empty queue, and will not 189 // join the competitors. The "sz == -1 || sz == N-1" state will not be 190 // modified by concurrent queues, so the owner thread can reset the state to 191 // _bottom == top so subsequent pushes will be performed normally. 192 return (sz == N - 1) ? 0 : sz; 193 } 194 195 public: 196 TaskQueueSuper() : _bottom(0), _age() {} 197 198 // Return true if the TaskQueue contains/does not contain any tasks. 199 bool peek() const { return _bottom != _age.top(); } 200 bool is_empty() const { return size() == 0; } 201 202 // Return an estimate of the number of elements in the queue. 203 // The "careful" version admits the possibility of pop_local/pop_global 204 // races. 205 uint size() const { 206 return size(_bottom, _age.top()); 207 } 208 209 uint dirty_size() const { 210 return dirty_size(_bottom, _age.top()); 211 } 212 213 void set_empty() { 214 _bottom = 0; 215 _age.set(0); 216 } 217 218 // Maximum number of elements allowed in the queue. This is two less 219 // than the actual queue size, for somewhat complicated reasons. 220 uint max_elems() const { return N - 2; } 221 222 // Total size of queue. 223 static const uint total_size() { return N; } 224 225 TASKQUEUE_STATS_ONLY(TaskQueueStats stats;) 226 }; 227 228 // 229 // GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double- 230 // ended-queue (deque), intended for use in work stealing. Queue operations 231 // are non-blocking. 232 // 233 // A queue owner thread performs push() and pop_local() operations on one end 234 // of the queue, while other threads may steal work using the pop_global() 235 // method. 236 // 237 // The main difference to the original algorithm is that this 238 // implementation allows wrap-around at the end of its allocated 239 // storage, which is an array. 240 // 241 // The original paper is: 242 // 243 // Arora, N. S., Blumofe, R. D., and Plaxton, C. G. 244 // Thread scheduling for multiprogrammed multiprocessors. 245 // Theory of Computing Systems 34, 2 (2001), 115-144. 246 // 247 // The following paper provides an correctness proof and an 248 // implementation for weakly ordered memory models including (pseudo-) 249 // code containing memory barriers for a Chase-Lev deque. Chase-Lev is 250 // similar to ABP, with the main difference that it allows resizing of the 251 // underlying storage: 252 // 253 // Le, N. M., Pop, A., Cohen A., and Nardell, F. Z. 254 // Correct and efficient work-stealing for weak memory models 255 // Proceedings of the 18th ACM SIGPLAN symposium on Principles and 256 // practice of parallel programming (PPoPP 2013), 69-80 257 // 258 259 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> 260 class GenericTaskQueue: public TaskQueueSuper<N, F> { 261 ArrayAllocator<E, F> _array_allocator; 262 protected: 263 typedef typename TaskQueueSuper<N, F>::Age Age; 264 typedef typename TaskQueueSuper<N, F>::idx_t idx_t; 265 266 using TaskQueueSuper<N, F>::_bottom; 267 using TaskQueueSuper<N, F>::_age; 268 using TaskQueueSuper<N, F>::increment_index; 269 using TaskQueueSuper<N, F>::decrement_index; 270 using TaskQueueSuper<N, F>::dirty_size; 271 272 public: 273 using TaskQueueSuper<N, F>::max_elems; 274 using TaskQueueSuper<N, F>::size; 275 276 #if TASKQUEUE_STATS 277 using TaskQueueSuper<N, F>::stats; 278 #endif 279 280 private: 281 // Slow paths for push, pop_local. (pop_global has no fast path.) 282 bool push_slow(E t, uint dirty_n_elems); 283 bool pop_local_slow(uint localBot, Age oldAge); 284 285 public: 286 typedef E element_type; 287 288 // Initializes the queue to empty. 289 GenericTaskQueue(); 290 291 void initialize(); 292 293 // Push the task "t" on the queue. Returns "false" iff the queue is full. 294 inline bool push(E t); 295 296 // Attempts to claim a task from the "local" end of the queue (the most 297 // recently pushed). If successful, returns true and sets t to the task; 298 // otherwise, returns false (the queue is empty). 299 inline bool pop_local(volatile E& t); 300 301 // Like pop_local(), but uses the "global" end of the queue (the least 302 // recently pushed). 303 bool pop_global(volatile E& t); 304 305 // Delete any resource associated with the queue. 306 ~GenericTaskQueue(); 307 308 // apply the closure to all elements in the task queue 309 void oops_do(OopClosure* f); 310 311 private: 312 // Element array. 313 volatile E* _elems; 314 }; 315 316 template<class E, MEMFLAGS F, unsigned int N> 317 GenericTaskQueue<E, F, N>::GenericTaskQueue() { 318 assert(sizeof(Age) == sizeof(size_t), "Depends on this."); 319 } 320 321 template<class E, MEMFLAGS F, unsigned int N> 322 void GenericTaskQueue<E, F, N>::initialize() { 323 _elems = _array_allocator.allocate(N); 324 } 325 326 template<class E, MEMFLAGS F, unsigned int N> 327 void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) { 328 // tty->print_cr("START OopTaskQueue::oops_do"); 329 uint iters = size(); 330 uint index = _bottom; 331 for (uint i = 0; i < iters; ++i) { 332 index = decrement_index(index); 333 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T, 334 // index, &_elems[index], _elems[index]); 335 E* t = (E*)&_elems[index]; // cast away volatility 336 oop* p = (oop*)t; 337 // G1 does its own checking 338 assert(UseG1GC || (*t)->is_oop_or_null(), "Not an oop or null"); 339 f->do_oop(p); 340 } 341 // tty->print_cr("END OopTaskQueue::oops_do"); 342 } 343 344 template<class E, MEMFLAGS F, unsigned int N> 345 bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) { 346 if (dirty_n_elems == N - 1) { 347 // Actually means 0, so do the push. 348 uint localBot = _bottom; 349 // g++ complains if the volatile result of the assignment is 350 // unused, so we cast the volatile away. We cannot cast directly 351 // to void, because gcc treats that as not using the result of the 352 // assignment. However, casting to E& means that we trigger an 353 // unused-value warning. So, we cast the E& to void. 354 (void)const_cast<E&>(_elems[localBot] = t); 355 OrderAccess::release_store(&_bottom, increment_index(localBot)); 356 TASKQUEUE_STATS_ONLY(stats.record_push()); 357 return true; 358 } 359 return false; 360 } 361 362 // pop_local_slow() is done by the owning thread and is trying to 363 // get the last task in the queue. It will compete with pop_global() 364 // that will be used by other threads. The tag age is incremented 365 // whenever the queue goes empty which it will do here if this thread 366 // gets the last task or in pop_global() if the queue wraps (top == 0 367 // and pop_global() succeeds, see pop_global()). 368 template<class E, MEMFLAGS F, unsigned int N> 369 bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) { 370 // This queue was observed to contain exactly one element; either this 371 // thread will claim it, or a competing "pop_global". In either case, 372 // the queue will be logically empty afterwards. Create a new Age value 373 // that represents the empty queue for the given value of "_bottom". (We 374 // must also increment "tag" because of the case where "bottom == 1", 375 // "top == 0". A pop_global could read the queue element in that case, 376 // then have the owner thread do a pop followed by another push. Without 377 // the incrementing of "tag", the pop_global's CAS could succeed, 378 // allowing it to believe it has claimed the stale element.) 379 Age newAge((idx_t)localBot, oldAge.tag() + 1); 380 // Perhaps a competing pop_global has already incremented "top", in which 381 // case it wins the element. 382 if (localBot == oldAge.top()) { 383 // No competing pop_global has yet incremented "top"; we'll try to 384 // install new_age, thus claiming the element. 385 Age tempAge = _age.cmpxchg(newAge, oldAge); 386 if (tempAge == oldAge) { 387 // We win. 388 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); 389 TASKQUEUE_STATS_ONLY(stats.record_pop_slow()); 390 return true; 391 } 392 } 393 // We lose; a completing pop_global gets the element. But the queue is empty 394 // and top is greater than bottom. Fix this representation of the empty queue 395 // to become the canonical one. 396 _age.set(newAge); 397 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); 398 return false; 399 } 400 401 template<class E, MEMFLAGS F, unsigned int N> 402 bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) { 403 Age oldAge = _age.get(); 404 // Architectures with weak memory model require a barrier here 405 // to guarantee that bottom is not older than age, 406 // which is crucial for the correctness of the algorithm. 407 #if !(defined SPARC || defined IA32 || defined AMD64) 408 OrderAccess::fence(); 409 #endif 410 uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom); 411 uint n_elems = size(localBot, oldAge.top()); 412 if (n_elems == 0) { 413 return false; 414 } 415 416 // g++ complains if the volatile result of the assignment is 417 // unused, so we cast the volatile away. We cannot cast directly 418 // to void, because gcc treats that as not using the result of the 419 // assignment. However, casting to E& means that we trigger an 420 // unused-value warning. So, we cast the E& to void. 421 (void) const_cast<E&>(t = _elems[oldAge.top()]); 422 Age newAge(oldAge); 423 newAge.increment(); 424 Age resAge = _age.cmpxchg(newAge, oldAge); 425 426 // Note that using "_bottom" here might fail, since a pop_local might 427 // have decremented it. 428 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity"); 429 return resAge == oldAge; 430 } 431 432 template<class E, MEMFLAGS F, unsigned int N> 433 GenericTaskQueue<E, F, N>::~GenericTaskQueue() {} 434 435 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for 436 // elements that do not fit in the TaskQueue. 437 // 438 // This class hides two methods from super classes: 439 // 440 // push() - push onto the task queue or, if that fails, onto the overflow stack 441 // is_empty() - return true if both the TaskQueue and overflow stack are empty 442 // 443 // Note that size() is not hidden--it returns the number of elements in the 444 // TaskQueue, and does not include the size of the overflow stack. This 445 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues. 446 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> 447 class OverflowTaskQueue: public GenericTaskQueue<E, F, N> 448 { 449 public: 450 typedef Stack<E, F> overflow_t; 451 typedef GenericTaskQueue<E, F, N> taskqueue_t; 452 453 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;) 454 455 // Push task t onto the queue or onto the overflow stack. Return true. 456 inline bool push(E t); 457 458 // Try to push task t onto the queue only. Returns true if successful, false otherwise. 459 inline bool try_push_to_taskqueue(E t); 460 461 // Attempt to pop from the overflow stack; return true if anything was popped. 462 inline bool pop_overflow(E& t); 463 464 inline overflow_t* overflow_stack() { return &_overflow_stack; } 465 466 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); } 467 inline bool overflow_empty() const { return _overflow_stack.is_empty(); } 468 inline bool is_empty() const { 469 return taskqueue_empty() && overflow_empty(); 470 } 471 472 private: 473 overflow_t _overflow_stack; 474 }; 475 476 template <class E, MEMFLAGS F, unsigned int N> 477 bool OverflowTaskQueue<E, F, N>::push(E t) 478 { 479 if (!taskqueue_t::push(t)) { 480 overflow_stack()->push(t); 481 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); 482 } 483 return true; 484 } 485 486 template <class E, MEMFLAGS F, unsigned int N> 487 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t) 488 { 489 if (overflow_empty()) return false; 490 t = overflow_stack()->pop(); 491 return true; 492 } 493 494 template <class E, MEMFLAGS F, unsigned int N> 495 bool OverflowTaskQueue<E, F, N>::try_push_to_taskqueue(E t) { 496 return taskqueue_t::push(t); 497 } 498 class TaskQueueSetSuper { 499 protected: 500 static int randomParkAndMiller(int* seed0); 501 public: 502 // Returns "true" if some TaskQueue in the set contains a task. 503 virtual bool peek() = 0; 504 virtual size_t tasks() = 0; 505 }; 506 507 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper { 508 }; 509 510 template<class T, MEMFLAGS F> 511 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> { 512 private: 513 uint _n; 514 T** _queues; 515 516 public: 517 typedef typename T::element_type E; 518 519 GenericTaskQueueSet(int n) : _n(n) { 520 typedef T* GenericTaskQueuePtr; 521 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F); 522 for (int i = 0; i < n; i++) { 523 _queues[i] = NULL; 524 } 525 } 526 527 bool steal_best_of_2(uint queue_num, int* seed, E& t); 528 529 void register_queue(uint i, T* q); 530 531 T* queue(uint n); 532 533 // The thread with queue number "queue_num" (and whose random number seed is 534 // at "seed") is trying to steal a task from some other queue. (It may try 535 // several queues, according to some configuration parameter.) If some steal 536 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns 537 // false. 538 bool steal(uint queue_num, int* seed, E& t); 539 540 bool peek(); 541 size_t tasks(); 542 543 uint size() const { return _n; } 544 }; 545 546 template<class T, MEMFLAGS F> void 547 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) { 548 assert(i < _n, "index out of range."); 549 _queues[i] = q; 550 } 551 552 template<class T, MEMFLAGS F> T* 553 GenericTaskQueueSet<T, F>::queue(uint i) { 554 return _queues[i]; 555 } 556 557 template<class T, MEMFLAGS F> bool 558 GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) { 559 for (uint i = 0; i < 2 * _n; i++) { 560 if (steal_best_of_2(queue_num, seed, t)) { 561 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true)); 562 return true; 563 } 564 } 565 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false)); 566 return false; 567 } 568 569 template<class T, MEMFLAGS F> bool 570 GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) { 571 if (_n > 2) { 572 uint k1 = queue_num; 573 while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; 574 uint k2 = queue_num; 575 while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; 576 // Sample both and try the larger. 577 uint sz1 = _queues[k1]->size(); 578 uint sz2 = _queues[k2]->size(); 579 if (sz2 > sz1) return _queues[k2]->pop_global(t); 580 else return _queues[k1]->pop_global(t); 581 } else if (_n == 2) { 582 // Just try the other one. 583 uint k = (queue_num + 1) % 2; 584 return _queues[k]->pop_global(t); 585 } else { 586 assert(_n == 1, "can't be zero."); 587 return false; 588 } 589 } 590 591 template<class T, MEMFLAGS F> 592 bool GenericTaskQueueSet<T, F>::peek() { 593 // Try all the queues. 594 for (uint j = 0; j < _n; j++) { 595 if (_queues[j]->peek()) 596 return true; 597 } 598 return false; 599 } 600 601 template<class T, MEMFLAGS F> 602 size_t GenericTaskQueueSet<T, F>::tasks() { 603 size_t n = 0; 604 for (uint j = 0; j < _n; j++) { 605 n += _queues[j]->size(); 606 } 607 return n; 608 } 609 610 // When to terminate from the termination protocol. 611 class TerminatorTerminator: public CHeapObj<mtInternal> { 612 public: 613 virtual bool should_exit_termination() = 0; 614 }; 615 616 // A class to aid in the termination of a set of parallel tasks using 617 // TaskQueueSet's for work stealing. 618 619 #undef TRACESPINNING 620 621 class ParallelTaskTerminator: public StackObj { 622 protected: 623 int _n_threads; 624 TaskQueueSetSuper* _queue_set; 625 char _pad_before[DEFAULT_CACHE_LINE_SIZE]; 626 int _offered_termination; 627 char _pad_after[DEFAULT_CACHE_LINE_SIZE]; 628 629 #ifdef TRACESPINNING 630 static uint _total_yields; 631 static uint _total_spins; 632 static uint _total_peeks; 633 #endif 634 635 bool peek_in_queue_set(); 636 protected: 637 virtual void yield(); 638 void sleep(uint millis); 639 640 public: 641 642 // "n_threads" is the number of threads to be terminated. "queue_set" is a 643 // queue sets of work queues of other threads. 644 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set); 645 646 // The current thread has no work, and is ready to terminate if everyone 647 // else is. If returns "true", all threads are terminated. If returns 648 // "false", available work has been observed in one of the task queues, 649 // so the global task is not complete. 650 virtual bool offer_termination() { 651 return offer_termination(NULL); 652 } 653 654 // As above, but it also terminates if the should_exit_termination() 655 // method of the terminator parameter returns true. If terminator is 656 // NULL, then it is ignored. 657 bool offer_termination(TerminatorTerminator* terminator); 658 659 // Reset the terminator, so that it may be reused again. 660 // The caller is responsible for ensuring that this is done 661 // in an MT-safe manner, once the previous round of use of 662 // the terminator is finished. 663 void reset_for_reuse(); 664 // Same as above but the number of parallel threads is set to the 665 // given number. 666 void reset_for_reuse(int n_threads); 667 668 #ifdef TRACESPINNING 669 static uint total_yields() { return _total_yields; } 670 static uint total_spins() { return _total_spins; } 671 static uint total_peeks() { return _total_peeks; } 672 static void print_termination_counts(); 673 #endif 674 }; 675 676 template<class E, MEMFLAGS F, unsigned int N> inline bool 677 GenericTaskQueue<E, F, N>::push(E t) { 678 uint localBot = _bottom; 679 assert(localBot < N, "_bottom out of range."); 680 idx_t top = _age.top(); 681 uint dirty_n_elems = dirty_size(localBot, top); 682 assert(dirty_n_elems < N, "n_elems out of range."); 683 if (dirty_n_elems < max_elems()) { 684 // g++ complains if the volatile result of the assignment is 685 // unused, so we cast the volatile away. We cannot cast directly 686 // to void, because gcc treats that as not using the result of the 687 // assignment. However, casting to E& means that we trigger an 688 // unused-value warning. So, we cast the E& to void. 689 (void) const_cast<E&>(_elems[localBot] = t); 690 OrderAccess::release_store(&_bottom, increment_index(localBot)); 691 TASKQUEUE_STATS_ONLY(stats.record_push()); 692 return true; 693 } else { 694 return push_slow(t, dirty_n_elems); 695 } 696 } 697 698 template<class E, MEMFLAGS F, unsigned int N> inline bool 699 GenericTaskQueue<E, F, N>::pop_local(volatile E& t) { 700 uint localBot = _bottom; 701 // This value cannot be N-1. That can only occur as a result of 702 // the assignment to bottom in this method. If it does, this method 703 // resets the size to 0 before the next call (which is sequential, 704 // since this is pop_local.) 705 uint dirty_n_elems = dirty_size(localBot, _age.top()); 706 assert(dirty_n_elems != N - 1, "Shouldn't be possible..."); 707 if (dirty_n_elems == 0) return false; 708 localBot = decrement_index(localBot); 709 _bottom = localBot; 710 // This is necessary to prevent any read below from being reordered 711 // before the store just above. 712 OrderAccess::fence(); 713 // g++ complains if the volatile result of the assignment is 714 // unused, so we cast the volatile away. We cannot cast directly 715 // to void, because gcc treats that as not using the result of the 716 // assignment. However, casting to E& means that we trigger an 717 // unused-value warning. So, we cast the E& to void. 718 (void) const_cast<E&>(t = _elems[localBot]); 719 // This is a second read of "age"; the "size()" above is the first. 720 // If there's still at least one element in the queue, based on the 721 // "_bottom" and "age" we've read, then there can be no interference with 722 // a "pop_global" operation, and we're done. 723 idx_t tp = _age.top(); // XXX 724 if (size(localBot, tp) > 0) { 725 assert(dirty_size(localBot, tp) != N - 1, "sanity"); 726 TASKQUEUE_STATS_ONLY(stats.record_pop()); 727 return true; 728 } else { 729 // Otherwise, the queue contained exactly one element; we take the slow 730 // path. 731 732 // The barrier is required to prevent reordering the two reads of _age: 733 // one is the _age.get() below, and the other is _age.top() above the if-stmt. 734 // The algorithm may fail if _age.get() reads an older value than _age.top(). 735 OrderAccess::loadload(); 736 return pop_local_slow(localBot, _age.get()); 737 } 738 } 739 740 typedef GenericTaskQueue<oop, mtGC> OopTaskQueue; 741 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet; 742 743 #ifdef _MSC_VER 744 #pragma warning(push) 745 // warning C4522: multiple assignment operators specified 746 #pragma warning(disable:4522) 747 #endif 748 749 // This is a container class for either an oop* or a narrowOop*. 750 // Both are pushed onto a task queue and the consumer will test is_narrow() 751 // to determine which should be processed. 752 class StarTask { 753 void* _holder; // either union oop* or narrowOop* 754 755 enum { COMPRESSED_OOP_MASK = 1 }; 756 757 public: 758 StarTask(narrowOop* p) { 759 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); 760 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK); 761 } 762 StarTask(oop* p) { 763 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); 764 _holder = (void*)p; 765 } 766 StarTask() { _holder = NULL; } 767 operator oop*() { return (oop*)_holder; } 768 operator narrowOop*() { 769 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK); 770 } 771 772 StarTask& operator=(const StarTask& t) { 773 _holder = t._holder; 774 return *this; 775 } 776 volatile StarTask& operator=(const volatile StarTask& t) volatile { 777 _holder = t._holder; 778 return *this; 779 } 780 781 bool is_narrow() const { 782 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0); 783 } 784 }; 785 786 class ObjArrayTask 787 { 788 public: 789 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { } 790 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) { 791 assert(idx <= size_t(max_jint), "too big"); 792 } 793 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { } 794 795 ObjArrayTask& operator =(const ObjArrayTask& t) { 796 _obj = t._obj; 797 _index = t._index; 798 return *this; 799 } 800 volatile ObjArrayTask& 801 operator =(const volatile ObjArrayTask& t) volatile { 802 (void)const_cast<oop&>(_obj = t._obj); 803 _index = t._index; 804 return *this; 805 } 806 807 inline oop obj() const { return _obj; } 808 inline int index() const { return _index; } 809 810 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid. 811 812 private: 813 oop _obj; 814 int _index; 815 }; 816 817 #ifdef _MSC_VER 818 #pragma warning(pop) 819 #endif 820 821 typedef OverflowTaskQueue<StarTask, mtClass> OopStarTaskQueue; 822 typedef GenericTaskQueueSet<OopStarTaskQueue, mtClass> OopStarTaskQueueSet; 823 824 typedef OverflowTaskQueue<size_t, mtInternal> RegionTaskQueue; 825 typedef GenericTaskQueueSet<RegionTaskQueue, mtClass> RegionTaskQueueSet; 826 827 828 #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP