1 /*
  2  * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "jvm_io.h"
 27 #include "logging/log.hpp"
 28 #include "logging/logStream.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "runtime/atomic.hpp"
 31 #include "runtime/handshake.hpp"
 32 #include "runtime/interfaceSupport.inline.hpp"
 33 #include "runtime/os.hpp"
 34 #include "runtime/osThread.hpp"
 35 #include "runtime/stackWatermarkSet.hpp"
 36 #include "runtime/task.hpp"
 37 #include "runtime/thread.hpp"
 38 #include "runtime/threadSMR.hpp"
 39 #include "runtime/vmThread.hpp"
 40 #include "utilities/formatBuffer.hpp"
 41 #include "utilities/filterQueue.inline.hpp"
 42 #include "utilities/globalDefinitions.hpp"
 43 #include "utilities/preserveException.hpp"
 44 
 45 class HandshakeOperation : public CHeapObj<mtThread> {
 46   friend class HandshakeState;
 47  protected:
 48   HandshakeClosure*   _handshake_cl;
 49   // Keeps track of emitted and completed handshake operations.
 50   // Once it reaches zero all handshake operations have been performed.
 51   int32_t             _pending_threads;
 52   JavaThread*         _target;
 53   Thread*             _requester;
 54 
 55   // Must use AsyncHandshakeOperation when using AsyncHandshakeClosure.
 56   HandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, Thread* requester) :
 57     _handshake_cl(cl),
 58     _pending_threads(1),
 59     _target(target),
 60     _requester(requester) {}
 61 
 62  public:
 63   HandshakeOperation(HandshakeClosure* cl, JavaThread* target, Thread* requester) :
 64     _handshake_cl(cl),
 65     _pending_threads(1),
 66     _target(target),
 67     _requester(requester) {}
 68   virtual ~HandshakeOperation() {}
 69   void prepare(JavaThread* current_target, Thread* executing_thread);
 70   void do_handshake(JavaThread* thread);
 71   bool is_completed() {
 72     int32_t val = Atomic::load(&_pending_threads);
 73     assert(val >= 0, "_pending_threads=%d cannot be negative", val);
 74     return val == 0;
 75   }
 76   void add_target_count(int count) { Atomic::add(&_pending_threads, count); }
 77   int32_t pending_threads()        { return Atomic::load(&_pending_threads); }
 78   const char* name()               { return _handshake_cl->name(); }
 79   bool is_async()                  { return _handshake_cl->is_async(); }
 80   bool is_suspend()                { return _handshake_cl->is_suspend(); }
 81 };
 82 
 83 class AsyncHandshakeOperation : public HandshakeOperation {
 84  private:
 85   jlong _start_time_ns;
 86  public:
 87   AsyncHandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, jlong start_ns)
 88     : HandshakeOperation(cl, target, NULL), _start_time_ns(start_ns) {}
 89   virtual ~AsyncHandshakeOperation() { delete _handshake_cl; }
 90   jlong start_time() const           { return _start_time_ns; }
 91 };
 92 
 93 // Performing handshakes requires a custom yielding strategy because without it
 94 // there is a clear performance regression vs plain spinning. We keep track of
 95 // when we last saw progress by looking at why each targeted thread has not yet
 96 // completed its handshake. After spinning for a while with no progress we will
 97 // yield, but as long as there is progress, we keep spinning. Thus we avoid
 98 // yielding when there is potential work to be done or the handshake is close
 99 // to being finished.
100 class HandshakeSpinYield : public StackObj {
101  private:
102   jlong _start_time_ns;
103   jlong _last_spin_start_ns;
104   jlong _spin_time_ns;
105 
106   int _result_count[2][HandshakeState::_number_states];
107   int _prev_result_pos;
108 
109   int current_result_pos() { return (_prev_result_pos + 1) & 0x1; }
110 
111   void wait_raw(jlong now) {
112     // We start with fine-grained nanosleeping until a millisecond has
113     // passed, at which point we resort to plain naked_short_sleep.
114     if (now - _start_time_ns < NANOSECS_PER_MILLISEC) {
115       os::naked_short_nanosleep(10 * (NANOUNITS / MICROUNITS));
116     } else {
117       os::naked_short_sleep(1);
118     }
119   }
120 
121   void wait_blocked(JavaThread* self, jlong now) {
122     ThreadBlockInVM tbivm(self);
123     wait_raw(now);
124   }
125 
126   bool state_changed() {
127     for (int i = 0; i < HandshakeState::_number_states; i++) {
128       if (_result_count[0][i] != _result_count[1][i]) {
129         return true;
130       }
131     }
132     return false;
133   }
134 
135   void reset_state() {
136     _prev_result_pos++;
137     for (int i = 0; i < HandshakeState::_number_states; i++) {
138       _result_count[current_result_pos()][i] = 0;
139     }
140   }
141 
142  public:
143   HandshakeSpinYield(jlong start_time) :
144     _start_time_ns(start_time), _last_spin_start_ns(start_time),
145     _spin_time_ns(0), _result_count(), _prev_result_pos(0) {
146 
147     const jlong max_spin_time_ns = 100 /* us */ * (NANOUNITS / MICROUNITS);
148     int free_cpus = os::active_processor_count() - 1;
149     _spin_time_ns = (5 /* us */ * (NANOUNITS / MICROUNITS)) * free_cpus; // zero on UP
150     _spin_time_ns = _spin_time_ns > max_spin_time_ns ? max_spin_time_ns : _spin_time_ns;
151   }
152 
153   void add_result(HandshakeState::ProcessResult pr) {
154     _result_count[current_result_pos()][pr]++;
155   }
156 
157   void process() {
158     jlong now = os::javaTimeNanos();
159     if (state_changed()) {
160       reset_state();
161       // We spin for x amount of time since last state change.
162       _last_spin_start_ns = now;
163       return;
164     }
165     jlong wait_target = _last_spin_start_ns + _spin_time_ns;
166     if (wait_target < now) {
167       // On UP this is always true.
168       Thread* self = Thread::current();
169       if (self->is_Java_thread()) {
170         wait_blocked(JavaThread::cast(self), now);
171       } else {
172         wait_raw(now);
173       }
174       _last_spin_start_ns = os::javaTimeNanos();
175     }
176     reset_state();
177   }
178 };
179 
180 static void handle_timeout(HandshakeOperation* op, JavaThread* target) {
181   JavaThreadIteratorWithHandle jtiwh;
182 
183   log_error(handshake)("Handshake timeout: %s(" INTPTR_FORMAT "), pending threads: " INT32_FORMAT,
184                        op->name(), p2i(op), op->pending_threads());
185 
186   if (target == NULL) {
187     for ( ; JavaThread* thr = jtiwh.next(); ) {
188       if (thr->handshake_state()->operation_pending(op)) {
189         log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(thr), p2i(op));
190         // Remember the last one found for more diagnostics below.
191         target = thr;
192       }
193     }
194   } else {
195     log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(target), p2i(op));
196   }
197 
198   if (target != NULL) {
199     if (os::signal_thread(target, SIGILL, "cannot be handshaked")) {
200       // Give target a chance to report the error and terminate the VM.
201       os::naked_sleep(3000);
202     }
203   } else {
204     log_error(handshake)("No thread with an unfinished handshake op(" INTPTR_FORMAT ") found.", p2i(op));
205   }
206   fatal("Handshake timeout");
207 }
208 
209 static void check_handshake_timeout(jlong start_time, HandshakeOperation* op, JavaThread* target = NULL) {
210   // Check if handshake operation has timed out
211   jlong timeout_ns = millis_to_nanos(HandshakeTimeout);
212   if (timeout_ns > 0) {
213     if (os::javaTimeNanos() >= (start_time + timeout_ns)) {
214       handle_timeout(op, target);
215     }
216   }
217 }
218 
219 static void log_handshake_info(jlong start_time_ns, const char* name, int targets, int emitted_handshakes_executed, const char* extra = NULL) {
220   if (log_is_enabled(Info, handshake)) {
221     jlong completion_time = os::javaTimeNanos() - start_time_ns;
222     log_info(handshake)("Handshake \"%s\", Targeted threads: %d, Executed by requesting thread: %d, Total completion time: " JLONG_FORMAT " ns%s%s",
223                         name, targets,
224                         emitted_handshakes_executed,
225                         completion_time,
226                         extra != NULL ? ", " : "",
227                         extra != NULL ? extra : "");
228   }
229 }
230 
231 class VM_HandshakeAllThreads: public VM_Operation {
232   HandshakeOperation* const _op;
233  public:
234   VM_HandshakeAllThreads(HandshakeOperation* op) : _op(op) {}
235 
236   bool evaluate_at_safepoint() const { return false; }
237 
238   void doit() {
239     jlong start_time_ns = os::javaTimeNanos();
240 
241     JavaThreadIteratorWithHandle jtiwh;
242     int number_of_threads_issued = 0;
243     for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
244       thr->handshake_state()->add_operation(_op);
245       number_of_threads_issued++;
246     }
247 
248     if (number_of_threads_issued < 1) {
249       log_handshake_info(start_time_ns, _op->name(), 0, 0, "no threads alive");
250       return;
251     }
252     // _op was created with a count == 1 so don't double count.
253     _op->add_target_count(number_of_threads_issued - 1);
254 
255     log_trace(handshake)("Threads signaled, begin processing blocked threads by VMThread");
256     HandshakeSpinYield hsy(start_time_ns);
257     // Keeps count on how many of own emitted handshakes
258     // this thread execute.
259     int emitted_handshakes_executed = 0;
260     do {
261       // Check if handshake operation has timed out
262       check_handshake_timeout(start_time_ns, _op);
263 
264       // Have VM thread perform the handshake operation for blocked threads.
265       // Observing a blocked state may of course be transient but the processing is guarded
266       // by mutexes and we optimistically begin by working on the blocked threads
267       jtiwh.rewind();
268       for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
269         // A new thread on the ThreadsList will not have an operation,
270         // hence it is skipped in handshake_try_process.
271         HandshakeState::ProcessResult pr = thr->handshake_state()->try_process(_op);
272         hsy.add_result(pr);
273         if (pr == HandshakeState::_succeeded) {
274           emitted_handshakes_executed++;
275         }
276       }
277       hsy.process();
278     } while (!_op->is_completed());
279 
280     // This pairs up with the release store in do_handshake(). It prevents future
281     // loads from floating above the load of _pending_threads in is_completed()
282     // and thus prevents reading stale data modified in the handshake closure
283     // by the Handshakee.
284     OrderAccess::acquire();
285 
286     log_handshake_info(start_time_ns, _op->name(), number_of_threads_issued, emitted_handshakes_executed);
287   }
288 
289   VMOp_Type type() const { return VMOp_HandshakeAllThreads; }
290 };
291 
292 void HandshakeOperation::prepare(JavaThread* current_target, Thread* executing_thread) {
293   if (current_target->is_terminated()) {
294     // Will never execute any handshakes on this thread.
295     return;
296   }
297   if (current_target != executing_thread) {
298     // Only when the target is not executing the handshake itself.
299     StackWatermarkSet::start_processing(current_target, StackWatermarkKind::gc);
300   }
301   if (_requester != NULL && _requester != executing_thread && _requester->is_Java_thread()) {
302     // The handshake closure may contain oop Handles from the _requester.
303     // We must make sure we can use them.
304     StackWatermarkSet::start_processing(JavaThread::cast(_requester), StackWatermarkKind::gc);
305   }
306 }
307 
308 void HandshakeOperation::do_handshake(JavaThread* thread) {
309   jlong start_time_ns = 0;
310   if (log_is_enabled(Debug, handshake, task)) {
311     start_time_ns = os::javaTimeNanos();
312   }
313 
314   // Only actually execute the operation for non terminated threads.
315   if (!thread->is_terminated()) {
316     //NoSafepointVerifier nsv;
317     _handshake_cl->do_thread(thread);
318   }
319 
320   if (start_time_ns != 0) {
321     jlong completion_time = os::javaTimeNanos() - start_time_ns;
322     log_debug(handshake, task)("Operation: %s for thread " PTR_FORMAT ", is_vm_thread: %s, completed in " JLONG_FORMAT " ns",
323                                name(), p2i(thread), BOOL_TO_STR(Thread::current()->is_VM_thread()), completion_time);
324   }
325 
326   // Inform VMThread/Handshaker that we have completed the operation.
327   // When this is executed by the Handshakee we need a release store
328   // here to make sure memory operations executed in the handshake
329   // closure are visible to the VMThread/Handshaker after it reads
330   // that the operation has completed.
331   Atomic::dec(&_pending_threads);
332   // Trailing fence, used to make sure removal of the operation strictly
333   // happened after we completed the operation.
334 
335   // It is no longer safe to refer to 'this' as the VMThread/Handshaker may have destroyed this operation
336 }
337 
338 void Handshake::execute(HandshakeClosure* hs_cl) {
339   HandshakeOperation cto(hs_cl, NULL, Thread::current());
340   VM_HandshakeAllThreads handshake(&cto);
341   VMThread::execute(&handshake);
342 }
343 
344 void Handshake::execute(HandshakeClosure* hs_cl, JavaThread* target) {
345   // tlh == nullptr means we rely on a ThreadsListHandle somewhere
346   // in the caller's context (and we sanity check for that).
347   Handshake::execute(hs_cl, nullptr, target);
348 }
349 
350 void Handshake::execute(HandshakeClosure* hs_cl, ThreadsListHandle* tlh, JavaThread* target) {
351   JavaThread* self = JavaThread::current();
352   HandshakeOperation op(hs_cl, target, Thread::current());
353 
354   jlong start_time_ns = os::javaTimeNanos();
355 
356   guarantee(target != nullptr, "must be");
357   if (tlh == nullptr) {
358     guarantee(Thread::is_JavaThread_protected_by_TLH(target),
359               "missing ThreadsListHandle in calling context.");
360     target->handshake_state()->add_operation(&op);
361   } else if (tlh->includes(target)) {
362     target->handshake_state()->add_operation(&op);
363   } else {
364     char buf[128];
365     jio_snprintf(buf, sizeof(buf),  "(thread= " INTPTR_FORMAT " dead)", p2i(target));
366     log_handshake_info(start_time_ns, op.name(), 0, 0, buf);
367     return;
368   }
369 
370   // Keeps count on how many of own emitted handshakes
371   // this thread execute.
372   int emitted_handshakes_executed = 0;
373   HandshakeSpinYield hsy(start_time_ns);
374   while (!op.is_completed()) {
375     HandshakeState::ProcessResult pr = target->handshake_state()->try_process(&op);
376     if (pr == HandshakeState::_succeeded) {
377       emitted_handshakes_executed++;
378     }
379     if (op.is_completed()) {
380       break;
381     }
382 
383     // Check if handshake operation has timed out
384     check_handshake_timeout(start_time_ns, &op, target);
385 
386     hsy.add_result(pr);
387     // Check for pending handshakes to avoid possible deadlocks where our
388     // target is trying to handshake us.
389     if (SafepointMechanism::should_process(self)) {
390       // Will not suspend here.
391       ThreadBlockInVM tbivm(self);
392     }
393     hsy.process();
394   }
395 
396   // This pairs up with the release store in do_handshake(). It prevents future
397   // loads from floating above the load of _pending_threads in is_completed()
398   // and thus prevents reading stale data modified in the handshake closure
399   // by the Handshakee.
400   OrderAccess::acquire();
401 
402   log_handshake_info(start_time_ns, op.name(), 1, emitted_handshakes_executed);
403 }
404 
405 void Handshake::execute(AsyncHandshakeClosure* hs_cl, JavaThread* target) {
406   jlong start_time_ns = os::javaTimeNanos();
407   AsyncHandshakeOperation* op = new AsyncHandshakeOperation(hs_cl, target, start_time_ns);
408 
409   guarantee(target != nullptr, "must be");
410 
411   Thread* current = Thread::current();
412   if (current != target) {
413     // Another thread is handling the request and it must be protecting
414     // the target.
415     guarantee(Thread::is_JavaThread_protected_by_TLH(target),
416               "missing ThreadsListHandle in calling context.");
417   }
418   // Implied else:
419   // The target is handling the request itself so it can't be dead.
420 
421   target->handshake_state()->add_operation(op);
422 }
423 
424 HandshakeState::HandshakeState(JavaThread* target) :
425   _handshakee(target),
426   _queue(),
427   _lock(Monitor::nosafepoint, "HandshakeState_lock"),
428   _active_handshaker(),
429   _caller(nullptr),
430   _suspended(false),
431   _async_suspend_handshake(false)
432 {
433 }
434 
435 void HandshakeState::add_operation(HandshakeOperation* op) {
436   // Adds are done lock free and so is arming.
437   _queue.push(op);
438   SafepointMechanism::arm_local_poll_release(_handshakee);
439 }
440 
441 bool HandshakeState::operation_pending(HandshakeOperation* op) {
442   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
443   MatchOp mo(op);
444   return _queue.contains(mo);
445 }
446 
447 static bool no_suspend_filter(HandshakeOperation* op) {
448   return !op->is_suspend();
449 }
450 
451 HandshakeOperation* HandshakeState::get_op_for_self(bool allow_suspend) {
452   assert(_handshakee == Thread::current(), "Must be called by self");
453   assert(_lock.owned_by_self(), "Lock must be held");
454   if (allow_suspend) {
455     return _queue.peek();
456   } else {
457     return _queue.peek(no_suspend_filter);
458   }
459 }
460 
461 bool HandshakeState::non_self_queue_filter(HandshakeOperation* op) {
462   if (op->_handshake_cl->can_be_processed_by(Thread::current())) {
463     return !op->is_async();
464   }
465   return false;
466 }
467 
468 bool HandshakeState::have_non_self_executable_operation() {
469   assert(_handshakee != Thread::current(), "Must not be called by self");
470   assert(_lock.owned_by_self(), "Lock must be held");
471   return _queue.contains(non_self_queue_filter);
472 }
473 
474 bool HandshakeState::has_a_non_suspend_operation() {
475   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
476   return _queue.contains(no_suspend_filter);
477 }
478 
479 HandshakeOperation* HandshakeState::get_op() {
480   assert(_handshakee != Thread::current(), "Must not be called by self");
481   assert(_lock.owned_by_self(), "Lock must be held");
482   return _queue.peek(non_self_queue_filter);
483 };
484 
485 void HandshakeState::remove_op(HandshakeOperation* op) {
486   assert(_lock.owned_by_self(), "Lock must be held");
487   MatchOp mo(op);
488   HandshakeOperation* ret = _queue.pop(mo);
489   assert(ret == op, "Popped op must match requested op");
490 };
491 
492 bool HandshakeState::process_by_self(bool allow_suspend) {
493   assert(Thread::current() == _handshakee, "should call from _handshakee");
494   assert(!_handshakee->is_terminated(), "should not be a terminated thread");
495   assert(_handshakee->thread_state() != _thread_blocked, "should not be in a blocked state");
496   assert(_handshakee->thread_state() != _thread_in_native, "should not be in native");
497 
498   ThreadInVMForHandshake tivm(_handshakee);
499   // Handshakes cannot safely safepoint.
500   // The exception to this rule is the asynchronous suspension handshake.
501   // It by-passes the NSV by manually doing the transition.
502   //NoSafepointVerifier nsv;
503 
504   while (has_operation()) {
505     MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
506 
507     HandshakeOperation* op = get_op_for_self(allow_suspend);
508     if (op != NULL) {
509       assert(op->_target == NULL || op->_target == Thread::current(), "Wrong thread");
510       bool async = op->is_async();
511       log_trace(handshake)("Proc handshake %s " INTPTR_FORMAT " on " INTPTR_FORMAT " by self",
512                            async ? "asynchronous" : "synchronous", p2i(op), p2i(_handshakee));
513       op->prepare(_handshakee, _handshakee);
514       if (!async) {
515         HandleMark hm(_handshakee);
516         PreserveExceptionMark pem(_handshakee);
517         op->do_handshake(_handshakee); // acquire, op removed after
518         remove_op(op);
519       } else {
520         // An asynchronous handshake may put the JavaThread in blocked state (safepoint safe).
521         // The destructor ~PreserveExceptionMark touches the exception oop so it must not be executed,
522         // since a safepoint may be in-progress when returning from the async handshake.
523         op->do_handshake(_handshakee); // acquire, op removed after
524         remove_op(op);
525         log_handshake_info(((AsyncHandshakeOperation*)op)->start_time(), op->name(), 1, 0, "asynchronous");
526         delete op;
527         return true; // Must check for safepoints
528       }
529     } else {
530       return false;
531     }
532   }
533   return false;
534 }
535 
536 bool HandshakeState::can_process_handshake() {
537   // handshake_safe may only be called with polls armed.
538   // Handshaker controls this by first claiming the handshake via claim_handshake().
539   return SafepointSynchronize::handshake_safe(_handshakee);
540 }
541 
542 bool HandshakeState::possibly_can_process_handshake() {
543   // Note that this method is allowed to produce false positives.
544   if (_handshakee->is_terminated()) {
545     return true;
546   }
547   switch (_handshakee->thread_state()) {
548   case _thread_in_native:
549     // native threads are safe if they have no java stack or have walkable stack
550     return !_handshakee->has_last_Java_frame() || _handshakee->frame_anchor()->walkable();
551 
552   case _thread_blocked:
553     return true;
554 
555   default:
556     return false;
557   }
558 }
559 
560 bool HandshakeState::claim_handshake() {
561   if (!_lock.try_lock()) {
562     return false;
563   }
564   // Operations are added lock free and then the poll is armed.
565   // If all handshake operations for the handshakee are finished and someone
566   // just adds an operation we may see it here. But if the handshakee is not
567   // armed yet it is not safe to proceed.
568   if (have_non_self_executable_operation()) {
569     OrderAccess::loadload(); // Matches the implicit storestore in add_operation()
570     if (SafepointMechanism::local_poll_armed(_handshakee)) {
571       return true;
572     }
573   }
574   _lock.unlock();
575   return false;
576 }
577 
578 HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* match_op) {
579   if (!has_operation()) {
580     // JT has already cleared its handshake
581     return HandshakeState::_no_operation;
582   }
583 
584   if (!possibly_can_process_handshake()) {
585     // JT is observed in an unsafe state, it must notice the handshake itself
586     return HandshakeState::_not_safe;
587   }
588 
589   // Claim the mutex if there still an operation to be executed.
590   if (!claim_handshake()) {
591     return HandshakeState::_claim_failed;
592   }
593 
594   // If we own the mutex at this point and while owning the mutex we
595   // can observe a safe state the thread cannot possibly continue without
596   // getting caught by the mutex.
597   if (!can_process_handshake()) {
598     _lock.unlock();
599     return HandshakeState::_not_safe;
600   }
601 
602   Thread* current_thread = Thread::current();
603 
604   HandshakeOperation* op = get_op();
605 
606   assert(op != NULL, "Must have an op");
607   assert(SafepointMechanism::local_poll_armed(_handshakee), "Must be");
608   assert(op->_target == NULL || _handshakee == op->_target, "Wrong thread");
609 
610   log_trace(handshake)("Processing handshake " INTPTR_FORMAT " by %s(%s)", p2i(op),
611                        op == match_op ? "handshaker" : "cooperative",
612                        current_thread->is_VM_thread() ? "VM Thread" : "JavaThread");
613 
614   op->prepare(_handshakee, current_thread);
615 
616   set_active_handshaker(current_thread);
617   op->do_handshake(_handshakee); // acquire, op removed after
618   set_active_handshaker(NULL);
619   remove_op(op);
620 
621   _lock.unlock();
622 
623   log_trace(handshake)("%s(" INTPTR_FORMAT ") executed an op for JavaThread: " INTPTR_FORMAT " %s target op: " INTPTR_FORMAT,
624                        current_thread->is_VM_thread() ? "VM Thread" : "JavaThread",
625                        p2i(current_thread), p2i(_handshakee),
626                        op == match_op ? "including" : "excluding", p2i(match_op));
627 
628   return op == match_op ? HandshakeState::_succeeded : HandshakeState::_processed;
629 }
630 
631 void HandshakeState::do_self_suspend() {
632   assert(Thread::current() == _handshakee, "should call from _handshakee");
633   assert(_lock.owned_by_self(), "Lock must be held");
634   assert(!_handshakee->has_last_Java_frame() || _handshakee->frame_anchor()->walkable(), "should have walkable stack");
635   assert(_handshakee->thread_state() == _thread_blocked, "Caller should have transitioned to _thread_blocked");
636 
637   while (is_suspended_or_blocked()) {
638     JVMTI_ONLY(assert(!_handshakee->is_in_VTMT(), "no suspend allowed in VTMT transition");)
639     log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended", p2i(_handshakee));
640     _lock.wait_without_safepoint_check();
641   }
642   log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " resumed", p2i(_handshakee));
643 }
644 
645 // This is the closure that prevents a suspended JavaThread from
646 // escaping the suspend request.
647 class ThreadSelfSuspensionHandshake : public AsyncHandshakeClosure {
648  public:
649   ThreadSelfSuspensionHandshake() : AsyncHandshakeClosure("ThreadSelfSuspensionHandshake") {}
650   void do_thread(Thread* thr) {
651     JavaThread* current = JavaThread::cast(thr);
652     assert(current == Thread::current(), "Must be self executed.");
653     JavaThreadState jts = current->thread_state();
654 
655     current->set_thread_state(_thread_blocked);
656     current->handshake_state()->do_self_suspend();
657     current->set_thread_state(jts);
658     current->handshake_state()->set_async_suspend_handshake(false);
659   }
660   virtual bool is_suspend() { return true; }
661 };
662 
663 bool HandshakeState::suspend_with_handshake(JavaThread* caller) {
664   // This tested for _handshakee->threadObj() != NULL as well, but the test doesn't work
665   // for that.  TODO: can you suspend a thread during initialization ?
666   if (_handshakee->is_exiting()) {
667     log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " exiting", p2i(_handshakee));
668     return false;
669   }
670   bool should_block = caller != nullptr;
671   bool should_suspend = caller == nullptr;
672 
673   if (has_async_suspend_handshake()) {
674     if ((is_suspended() && should_suspend) || (is_blocked() && should_block)) {
675       log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " already suspended or blocked", p2i(_handshakee));

676       return false;
677     } else if (should_suspend) {
678       // Target is going to wake up and leave suspension.
679       // Let's just stop the thread from doing that.
680       log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " re-suspended", p2i(_handshakee));
681       set_suspended(true);
682       return true;
683     } else {
684       assert(should_block, "should block");
685       // Target is going to wake up and leave blocking.
686       // Let's just stop the thread from doing that.
687       log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " re-blocked", p2i(_handshakee));
688       set_caller_thread(caller);
689       return true;
690     }
691   }
692 

693   // Thread is safe, so it must execute the request, thus we can count it as suspended
694   // or blocked from this point.
695   if (should_suspend) {
696     // no suspend request
697     assert(!is_suspended(), "cannot be suspended without a request");
698     set_suspended(true);
699   } else {
700     assert(!is_blocked(), "cannot be blocked without a request");
701     set_caller_thread(caller);
702   }
703 
704   set_async_suspend_handshake(true);
705   log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended, arming ThreadSuspension", p2i(_handshakee));
706   ThreadSelfSuspensionHandshake* ts = new ThreadSelfSuspensionHandshake();
707   Handshake::execute(ts, _handshakee);
708   return true;
709 }
710 
711 // This is the closure that synchronously honors the suspend request.
712 class SuspendThreadHandshake : public HandshakeClosure {
713   JavaThread* _caller;
714   bool        _did_suspend;
715 public:
716   SuspendThreadHandshake(JavaThread* caller) : HandshakeClosure("SuspendThread"), _caller(caller), _did_suspend(false) {}
717   void do_thread(Thread* thr) {
718     JavaThread* target = JavaThread::cast(thr);
719     _did_suspend = target->handshake_state()->suspend_with_handshake(_caller);
720   }
721   bool did_suspend() { return _did_suspend; }
722 };
723 
724 bool HandshakeState::suspend() {
725   JVMTI_ONLY(assert(!_handshakee->is_in_VTMT(), "no suspend allowed in VTMT transition");)
726   JavaThread* self = JavaThread::current();
727   if (_handshakee == self) {
728     // If target is the current thread we can bypass the handshake machinery
729     // and just suspend directly
730     ThreadBlockInVM tbivm(self);
731     MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
732     set_suspended(true);
733     do_self_suspend();
734     return true;
735   } else {
736     SuspendThreadHandshake st(nullptr);
737     Handshake::execute(&st, _handshakee);
738     return st.did_suspend();
739   }
740 }
741 
742 bool HandshakeState::resume() {
743   if (!is_suspended()) {
744     return false;
745   }
746   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
747   if (!is_suspended()) {
748     assert(!_handshakee->is_suspended(), "cannot be suspended without a suspend request");
749     return false;
750   }
751   // Resume the thread.
752   set_suspended(false);
753   _lock.notify();
754   return true;
755 }
756 
757 // One thread blocks execution of another thread until it resumes it.  This is similar to
758 // suspend, and much of the code is shared but it's a separate state from being suspended.
759 // The commonality is that the thread is self-suspended and that thread waits for both
760 // conditions to clear.
761 bool HandshakeState::block_suspend(JavaThread* caller) {
762   assert(caller == JavaThread::current(), "caller must be current thread");
763 
764   SuspendThreadHandshake st(caller);
765   Handshake::execute(&st, _handshakee);
766   bool suspended = st.did_suspend();
767   return suspended;
768 }
769 
770 bool HandshakeState::continue_resume(JavaThread* caller) {
771   assert(caller == JavaThread::current(), "caller must be current thread");
772 
773   // If caller is non-null only resume blocked thread if it's the caller
774   if (!is_blocked() || caller_thread() != caller) {
775     return false;
776   }
777   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
778   assert(is_blocked() && caller_thread() == caller,
779          "this is the only thread that can continue this thread");
780 
781   // Resume the thread.
782   set_caller_thread(nullptr); // !is_blocked()
783   _lock.notify();
784   return true;
785 }
--- EOF ---