1 /*
  2  * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "jvm_io.h"
 27 #include "logging/log.hpp"
 28 #include "logging/logStream.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "runtime/atomic.hpp"
 31 #include "runtime/handshake.hpp"
 32 #include "runtime/interfaceSupport.inline.hpp"
 33 #include "runtime/os.hpp"
 34 #include "runtime/osThread.hpp"
 35 #include "runtime/stackWatermarkSet.hpp"
 36 #include "runtime/task.hpp"
 37 #include "runtime/thread.hpp"
 38 #include "runtime/threadSMR.hpp"
 39 #include "runtime/vmThread.hpp"
 40 #include "utilities/formatBuffer.hpp"
 41 #include "utilities/filterQueue.inline.hpp"
 42 #include "utilities/globalDefinitions.hpp"
 43 #include "utilities/preserveException.hpp"
 44 
 45 class HandshakeOperation : public CHeapObj<mtThread> {
 46   friend class HandshakeState;
 47  protected:
 48   HandshakeClosure*   _handshake_cl;
 49   // Keeps track of emitted and completed handshake operations.
 50   // Once it reaches zero all handshake operations have been performed.
 51   int32_t             _pending_threads;
 52   JavaThread*         _target;
 53   Thread*             _requester;
 54 
 55   // Must use AsyncHandshakeOperation when using AsyncHandshakeClosure.
 56   HandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, Thread* requester) :
 57     _handshake_cl(cl),
 58     _pending_threads(1),
 59     _target(target),
 60     _requester(requester) {}
 61 
 62  public:
 63   HandshakeOperation(HandshakeClosure* cl, JavaThread* target, Thread* requester) :
 64     _handshake_cl(cl),
 65     _pending_threads(1),
 66     _target(target),
 67     _requester(requester) {}
 68   virtual ~HandshakeOperation() {}
 69   void prepare(JavaThread* current_target, Thread* executing_thread);
 70   void do_handshake(JavaThread* thread);
 71   bool is_completed() {
 72     int32_t val = Atomic::load(&_pending_threads);
 73     assert(val >= 0, "_pending_threads=%d cannot be negative", val);
 74     return val == 0;
 75   }
 76   void add_target_count(int count) { Atomic::add(&_pending_threads, count); }
 77   int32_t pending_threads()        { return Atomic::load(&_pending_threads); }
 78   const char* name()               { return _handshake_cl->name(); }
 79   bool is_async()                  { return _handshake_cl->is_async(); }
 80   bool is_suspend()                { return _handshake_cl->is_suspend(); }
 81 };
 82 
 83 class AsyncHandshakeOperation : public HandshakeOperation {
 84  private:
 85   jlong _start_time_ns;
 86  public:
 87   AsyncHandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, jlong start_ns)
 88     : HandshakeOperation(cl, target, NULL), _start_time_ns(start_ns) {}
 89   virtual ~AsyncHandshakeOperation() { delete _handshake_cl; }
 90   jlong start_time() const           { return _start_time_ns; }
 91 };
 92 
 93 // Performing handshakes requires a custom yielding strategy because without it
 94 // there is a clear performance regression vs plain spinning. We keep track of
 95 // when we last saw progress by looking at why each targeted thread has not yet
 96 // completed its handshake. After spinning for a while with no progress we will
 97 // yield, but as long as there is progress, we keep spinning. Thus we avoid
 98 // yielding when there is potential work to be done or the handshake is close
 99 // to being finished.
100 class HandshakeSpinYield : public StackObj {
101  private:
102   jlong _start_time_ns;
103   jlong _last_spin_start_ns;
104   jlong _spin_time_ns;
105 
106   int _result_count[2][HandshakeState::_number_states];
107   int _prev_result_pos;
108 
109   int current_result_pos() { return (_prev_result_pos + 1) & 0x1; }
110 
111   void wait_raw(jlong now) {
112     // We start with fine-grained nanosleeping until a millisecond has
113     // passed, at which point we resort to plain naked_short_sleep.
114     if (now - _start_time_ns < NANOSECS_PER_MILLISEC) {
115       os::naked_short_nanosleep(10 * (NANOUNITS / MICROUNITS));
116     } else {
117       os::naked_short_sleep(1);
118     }
119   }
120 
121   void wait_blocked(JavaThread* self, jlong now) {
122     ThreadBlockInVM tbivm(self);
123     wait_raw(now);
124   }
125 
126   bool state_changed() {
127     for (int i = 0; i < HandshakeState::_number_states; i++) {
128       if (_result_count[0][i] != _result_count[1][i]) {
129         return true;
130       }
131     }
132     return false;
133   }
134 
135   void reset_state() {
136     _prev_result_pos++;
137     for (int i = 0; i < HandshakeState::_number_states; i++) {
138       _result_count[current_result_pos()][i] = 0;
139     }
140   }
141 
142  public:
143   HandshakeSpinYield(jlong start_time) :
144     _start_time_ns(start_time), _last_spin_start_ns(start_time),
145     _spin_time_ns(0), _result_count(), _prev_result_pos(0) {
146 
147     const jlong max_spin_time_ns = 100 /* us */ * (NANOUNITS / MICROUNITS);
148     int free_cpus = os::active_processor_count() - 1;
149     _spin_time_ns = (5 /* us */ * (NANOUNITS / MICROUNITS)) * free_cpus; // zero on UP
150     _spin_time_ns = _spin_time_ns > max_spin_time_ns ? max_spin_time_ns : _spin_time_ns;
151   }
152 
153   void add_result(HandshakeState::ProcessResult pr) {
154     _result_count[current_result_pos()][pr]++;
155   }
156 
157   void process() {
158     jlong now = os::javaTimeNanos();
159     if (state_changed()) {
160       reset_state();
161       // We spin for x amount of time since last state change.
162       _last_spin_start_ns = now;
163       return;
164     }
165     jlong wait_target = _last_spin_start_ns + _spin_time_ns;
166     if (wait_target < now) {
167       // On UP this is always true.
168       Thread* self = Thread::current();
169       if (self->is_Java_thread()) {
170         wait_blocked(JavaThread::cast(self), now);
171       } else {
172         wait_raw(now);
173       }
174       _last_spin_start_ns = os::javaTimeNanos();
175     }
176     reset_state();
177   }
178 };
179 
180 static void handle_timeout(HandshakeOperation* op, JavaThread* target) {
181   JavaThreadIteratorWithHandle jtiwh;
182 
183   log_error(handshake)("Handshake timeout: %s(" INTPTR_FORMAT "), pending threads: " INT32_FORMAT,
184                        op->name(), p2i(op), op->pending_threads());
185 
186   if (target == NULL) {
187     for ( ; JavaThread* thr = jtiwh.next(); ) {
188       if (thr->handshake_state()->operation_pending(op)) {
189         log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(thr), p2i(op));
190         // Remember the last one found for more diagnostics below.
191         target = thr;
192       }
193     }
194   } else {
195     log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(target), p2i(op));
196   }
197 
198   if (target != NULL) {
199     if (os::signal_thread(target, SIGILL, "cannot be handshaked")) {
200       // Give target a chance to report the error and terminate the VM.
201       os::naked_sleep(3000);
202     }
203   } else {
204     log_error(handshake)("No thread with an unfinished handshake op(" INTPTR_FORMAT ") found.", p2i(op));
205   }
206   fatal("Handshake timeout");
207 }
208 
209 static void check_handshake_timeout(jlong start_time, HandshakeOperation* op, JavaThread* target = NULL) {
210   // Check if handshake operation has timed out
211   jlong timeout_ns = millis_to_nanos(HandshakeTimeout);
212   if (timeout_ns > 0) {
213     if (os::javaTimeNanos() >= (start_time + timeout_ns)) {
214       handle_timeout(op, target);
215     }
216   }
217 }
218 
219 static void log_handshake_info(jlong start_time_ns, const char* name, int targets, int emitted_handshakes_executed, const char* extra = NULL) {
220   if (log_is_enabled(Info, handshake)) {
221     jlong completion_time = os::javaTimeNanos() - start_time_ns;
222     log_info(handshake)("Handshake \"%s\", Targeted threads: %d, Executed by requesting thread: %d, Total completion time: " JLONG_FORMAT " ns%s%s",
223                         name, targets,
224                         emitted_handshakes_executed,
225                         completion_time,
226                         extra != NULL ? ", " : "",
227                         extra != NULL ? extra : "");
228   }
229 }
230 
231 class VM_HandshakeAllThreads: public VM_Operation {
232   HandshakeOperation* const _op;
233  public:
234   VM_HandshakeAllThreads(HandshakeOperation* op) : _op(op) {}
235 
236   bool evaluate_at_safepoint() const { return false; }
237 
238   void doit() {
239     jlong start_time_ns = os::javaTimeNanos();
240 
241     JavaThreadIteratorWithHandle jtiwh;
242     int number_of_threads_issued = 0;
243     for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
244       thr->handshake_state()->add_operation(_op);
245       number_of_threads_issued++;
246     }
247 
248     if (number_of_threads_issued < 1) {
249       log_handshake_info(start_time_ns, _op->name(), 0, 0, "no threads alive");
250       return;
251     }
252     // _op was created with a count == 1 so don't double count.
253     _op->add_target_count(number_of_threads_issued - 1);
254 
255     log_trace(handshake)("Threads signaled, begin processing blocked threads by VMThread");
256     HandshakeSpinYield hsy(start_time_ns);
257     // Keeps count on how many of own emitted handshakes
258     // this thread execute.
259     int emitted_handshakes_executed = 0;
260     do {
261       // Check if handshake operation has timed out
262       check_handshake_timeout(start_time_ns, _op);
263 
264       // Have VM thread perform the handshake operation for blocked threads.
265       // Observing a blocked state may of course be transient but the processing is guarded
266       // by mutexes and we optimistically begin by working on the blocked threads
267       jtiwh.rewind();
268       for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
269         // A new thread on the ThreadsList will not have an operation,
270         // hence it is skipped in handshake_try_process.
271         HandshakeState::ProcessResult pr = thr->handshake_state()->try_process(_op);
272         hsy.add_result(pr);
273         if (pr == HandshakeState::_succeeded) {
274           emitted_handshakes_executed++;
275         }
276       }
277       hsy.process();
278     } while (!_op->is_completed());
279 
280     // This pairs up with the release store in do_handshake(). It prevents future
281     // loads from floating above the load of _pending_threads in is_completed()
282     // and thus prevents reading stale data modified in the handshake closure
283     // by the Handshakee.
284     OrderAccess::acquire();
285 
286     log_handshake_info(start_time_ns, _op->name(), number_of_threads_issued, emitted_handshakes_executed);
287   }
288 
289   VMOp_Type type() const { return VMOp_HandshakeAllThreads; }
290 };
291 
292 void HandshakeOperation::prepare(JavaThread* current_target, Thread* executing_thread) {
293   if (current_target->is_terminated()) {
294     // Will never execute any handshakes on this thread.
295     return;
296   }
297   if (current_target != executing_thread) {
298     // Only when the target is not executing the handshake itself.
299     StackWatermarkSet::start_processing(current_target, StackWatermarkKind::gc);
300   }
301   if (_requester != NULL && _requester != executing_thread && _requester->is_Java_thread()) {
302     // The handshake closure may contain oop Handles from the _requester.
303     // We must make sure we can use them.
304     StackWatermarkSet::start_processing(JavaThread::cast(_requester), StackWatermarkKind::gc);
305   }
306 }
307 
308 void HandshakeOperation::do_handshake(JavaThread* thread) {
309   jlong start_time_ns = 0;
310   if (log_is_enabled(Debug, handshake, task)) {
311     start_time_ns = os::javaTimeNanos();
312   }
313 
314   // Only actually execute the operation for non terminated threads.
315   if (!thread->is_terminated()) {
316     NoSafepointVerifier nsv;
317     _handshake_cl->do_thread(thread);
318   }
319 
320   if (start_time_ns != 0) {
321     jlong completion_time = os::javaTimeNanos() - start_time_ns;
322     log_debug(handshake, task)("Operation: %s for thread " PTR_FORMAT ", is_vm_thread: %s, completed in " JLONG_FORMAT " ns",
323                                name(), p2i(thread), BOOL_TO_STR(Thread::current()->is_VM_thread()), completion_time);
324   }
325 
326   // Inform VMThread/Handshaker that we have completed the operation.
327   // When this is executed by the Handshakee we need a release store
328   // here to make sure memory operations executed in the handshake
329   // closure are visible to the VMThread/Handshaker after it reads
330   // that the operation has completed.
331   Atomic::dec(&_pending_threads);
332   // Trailing fence, used to make sure removal of the operation strictly
333   // happened after we completed the operation.
334 
335   // It is no longer safe to refer to 'this' as the VMThread/Handshaker may have destroyed this operation
336 }
337 
338 void Handshake::execute(HandshakeClosure* hs_cl) {
339   HandshakeOperation cto(hs_cl, NULL, Thread::current());
340   VM_HandshakeAllThreads handshake(&cto);
341   VMThread::execute(&handshake);
342 }
343 
344 void Handshake::execute(HandshakeClosure* hs_cl, JavaThread* target) {
345   JavaThread* self = JavaThread::current();
346   HandshakeOperation op(hs_cl, target, Thread::current());
347 
348   jlong start_time_ns = os::javaTimeNanos();
349 
350   ThreadsListHandle tlh;
351   if (tlh.includes(target)) {
352     target->handshake_state()->add_operation(&op);
353   } else {
354     char buf[128];
355     jio_snprintf(buf, sizeof(buf),  "(thread= " INTPTR_FORMAT " dead)", p2i(target));
356     log_handshake_info(start_time_ns, op.name(), 0, 0, buf);
357     return;
358   }
359 
360   // Keeps count on how many of own emitted handshakes
361   // this thread execute.
362   int emitted_handshakes_executed = 0;
363   HandshakeSpinYield hsy(start_time_ns);
364   while (!op.is_completed()) {
365     HandshakeState::ProcessResult pr = target->handshake_state()->try_process(&op);
366     if (pr == HandshakeState::_succeeded) {
367       emitted_handshakes_executed++;
368     }
369     if (op.is_completed()) {
370       break;
371     }
372 
373     // Check if handshake operation has timed out
374     check_handshake_timeout(start_time_ns, &op, target);
375 
376     hsy.add_result(pr);
377     // Check for pending handshakes to avoid possible deadlocks where our
378     // target is trying to handshake us.
379     if (SafepointMechanism::should_process(self)) {
380       // Will not suspend here.
381       ThreadBlockInVM tbivm(self);
382     }
383     hsy.process();
384   }
385 
386   // This pairs up with the release store in do_handshake(). It prevents future
387   // loads from floating above the load of _pending_threads in is_completed()
388   // and thus prevents reading stale data modified in the handshake closure
389   // by the Handshakee.
390   OrderAccess::acquire();
391 
392   log_handshake_info(start_time_ns, op.name(), 1, emitted_handshakes_executed);
393 }
394 
395 void Handshake::execute(AsyncHandshakeClosure* hs_cl, JavaThread* target) {
396   jlong start_time_ns = os::javaTimeNanos();
397   AsyncHandshakeOperation* op = new AsyncHandshakeOperation(hs_cl, target, start_time_ns);
398 
399   ThreadsListHandle tlh;
400   if (tlh.includes(target)) {
401     target->handshake_state()->add_operation(op);
402   } else {
403     log_handshake_info(start_time_ns, op->name(), 0, 0, "(thread dead)");
404     delete op;
405   }
406 }
407 
408 HandshakeState::HandshakeState(JavaThread* target) :
409   _handshakee(target),
410   _queue(),
411   _lock(Monitor::nosafepoint, "HandshakeState_lock"),
412   _active_handshaker(),
413   _suspended(false),
414   _async_suspend_handshake(false)
415 {
416 }
417 
418 void HandshakeState::add_operation(HandshakeOperation* op) {
419   // Adds are done lock free and so is arming.
420   _queue.push(op);
421   SafepointMechanism::arm_local_poll_release(_handshakee);
422 }
423 
424 bool HandshakeState::operation_pending(HandshakeOperation* op) {
425   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
426   MatchOp mo(op);
427   return _queue.contains(mo);
428 }
429 
430 static bool no_suspend_filter(HandshakeOperation* op) {
431   return !op->is_suspend();
432 }
433 
434 HandshakeOperation* HandshakeState::get_op_for_self(bool allow_suspend) {
435   assert(_handshakee == Thread::current(), "Must be called by self");
436   assert(_lock.owned_by_self(), "Lock must be held");
437   if (allow_suspend) {
438     return _queue.peek();
439   } else {
440     return _queue.peek(no_suspend_filter);
441   }
442 }
443 
444 static bool non_self_queue_filter(HandshakeOperation* op) {
445   return !op->is_async();
446 }
447 
448 bool HandshakeState::have_non_self_executable_operation() {
449   assert(_handshakee != Thread::current(), "Must not be called by self");
450   assert(_lock.owned_by_self(), "Lock must be held");
451   return _queue.contains(non_self_queue_filter);
452 }
453 
454 bool HandshakeState::has_a_non_suspend_operation() {
455   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
456   return _queue.contains(no_suspend_filter);
457 }
458 
459 HandshakeOperation* HandshakeState::get_op() {
460   assert(_handshakee != Thread::current(), "Must not be called by self");
461   assert(_lock.owned_by_self(), "Lock must be held");
462   return _queue.peek(non_self_queue_filter);
463 };
464 
465 void HandshakeState::remove_op(HandshakeOperation* op) {
466   assert(_lock.owned_by_self(), "Lock must be held");
467   MatchOp mo(op);
468   HandshakeOperation* ret = _queue.pop(mo);
469   assert(ret == op, "Popped op must match requested op");
470 };
471 
472 bool HandshakeState::process_by_self(bool allow_suspend) {
473   assert(Thread::current() == _handshakee, "should call from _handshakee");
474   assert(!_handshakee->is_terminated(), "should not be a terminated thread");
475   assert(_handshakee->thread_state() != _thread_blocked, "should not be in a blocked state");
476   assert(_handshakee->thread_state() != _thread_in_native, "should not be in native");
477 
478   ThreadInVMForHandshake tivm(_handshakee);
479   // Handshakes cannot safely safepoint.
480   // The exception to this rule is the asynchronous suspension handshake.
481   // It by-passes the NSV by manually doing the transition.
482   NoSafepointVerifier nsv;
483 
484   while (has_operation()) {
485     MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
486 
487     HandshakeOperation* op = get_op_for_self(allow_suspend);
488     if (op != NULL) {
489       assert(op->_target == NULL || op->_target == Thread::current(), "Wrong thread");
490       bool async = op->is_async();
491       log_trace(handshake)("Proc handshake %s " INTPTR_FORMAT " on " INTPTR_FORMAT " by self",
492                            async ? "asynchronous" : "synchronous", p2i(op), p2i(_handshakee));
493       op->prepare(_handshakee, _handshakee);
494       if (!async) {
495         HandleMark hm(_handshakee);
496         PreserveExceptionMark pem(_handshakee);
497         op->do_handshake(_handshakee); // acquire, op removed after
498         remove_op(op);
499       } else {
500         // An asynchronous handshake may put the JavaThread in blocked state (safepoint safe).
501         // The destructor ~PreserveExceptionMark touches the exception oop so it must not be executed,
502         // since a safepoint may be in-progress when returning from the async handshake.
503         op->do_handshake(_handshakee); // acquire, op removed after
504         remove_op(op);
505         log_handshake_info(((AsyncHandshakeOperation*)op)->start_time(), op->name(), 1, 0, "asynchronous");
506         delete op;
507         return true; // Must check for safepoints
508       }
509     } else {
510       return false;
511     }
512   }
513   return false;
514 }
515 
516 bool HandshakeState::can_process_handshake() {
517   // handshake_safe may only be called with polls armed.
518   // Handshaker controls this by first claiming the handshake via claim_handshake().
519   return SafepointSynchronize::handshake_safe(_handshakee);
520 }
521 
522 bool HandshakeState::possibly_can_process_handshake() {
523   // Note that this method is allowed to produce false positives.
524   if (_handshakee->is_terminated()) {
525     return true;
526   }
527   switch (_handshakee->thread_state()) {
528   case _thread_in_native:
529     // native threads are safe if they have no java stack or have walkable stack
530     return !_handshakee->has_last_Java_frame() || _handshakee->frame_anchor()->walkable();
531 
532   case _thread_blocked:
533     return true;
534 
535   default:
536     return false;
537   }
538 }
539 
540 bool HandshakeState::claim_handshake() {
541   if (!_lock.try_lock()) {
542     return false;
543   }
544   // Operations are added lock free and then the poll is armed.
545   // If all handshake operations for the handshakee are finished and someone
546   // just adds an operation we may see it here. But if the handshakee is not
547   // armed yet it is not safe to proceed.
548   if (have_non_self_executable_operation()) {
549     OrderAccess::loadload(); // Matches the implicit storestore in add_operation()
550     if (SafepointMechanism::local_poll_armed(_handshakee)) {
551       return true;
552     }
553   }
554   _lock.unlock();
555   return false;
556 }
557 
558 HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* match_op) {
559   if (!has_operation()) {
560     // JT has already cleared its handshake
561     return HandshakeState::_no_operation;
562   }
563 
564   if (!possibly_can_process_handshake()) {
565     // JT is observed in an unsafe state, it must notice the handshake itself
566     return HandshakeState::_not_safe;
567   }
568 
569   // Claim the mutex if there still an operation to be executed.
570   if (!claim_handshake()) {
571     return HandshakeState::_claim_failed;
572   }
573 
574   // If we own the mutex at this point and while owning the mutex we
575   // can observe a safe state the thread cannot possibly continue without
576   // getting caught by the mutex.
577   if (!can_process_handshake()) {
578     _lock.unlock();
579     return HandshakeState::_not_safe;
580   }
581 
582   Thread* current_thread = Thread::current();
583 
584   HandshakeOperation* op = get_op();
585 
586   assert(op != NULL, "Must have an op");
587   assert(SafepointMechanism::local_poll_armed(_handshakee), "Must be");
588   assert(op->_target == NULL || _handshakee == op->_target, "Wrong thread");
589 
590   log_trace(handshake)("Processing handshake " INTPTR_FORMAT " by %s(%s)", p2i(op),
591                        op == match_op ? "handshaker" : "cooperative",
592                        current_thread->is_VM_thread() ? "VM Thread" : "JavaThread");
593 
594   op->prepare(_handshakee, current_thread);
595 
596   set_active_handshaker(current_thread);
597   op->do_handshake(_handshakee); // acquire, op removed after
598   set_active_handshaker(NULL);
599   remove_op(op);
600 
601   _lock.unlock();
602 
603   log_trace(handshake)("%s(" INTPTR_FORMAT ") executed an op for JavaThread: " INTPTR_FORMAT " %s target op: " INTPTR_FORMAT,
604                        current_thread->is_VM_thread() ? "VM Thread" : "JavaThread",
605                        p2i(current_thread), p2i(_handshakee),
606                        op == match_op ? "including" : "excluding", p2i(match_op));
607 
608   return op == match_op ? HandshakeState::_succeeded : HandshakeState::_processed;
609 }
610 
611 void HandshakeState::do_self_suspend() {
612   assert(Thread::current() == _handshakee, "should call from _handshakee");
613   assert(_lock.owned_by_self(), "Lock must be held");
614   assert(!_handshakee->has_last_Java_frame() || _handshakee->frame_anchor()->walkable(), "should have walkable stack");
615   assert(_handshakee->thread_state() == _thread_blocked, "Caller should have transitioned to _thread_blocked");
616 
617   while (is_suspended()) {
618     log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended", p2i(_handshakee));
619     _lock.wait_without_safepoint_check();
620   }
621   log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " resumed", p2i(_handshakee));
622 }
623 
624 // This is the closure that prevents a suspended JavaThread from
625 // escaping the suspend request.
626 class ThreadSelfSuspensionHandshake : public AsyncHandshakeClosure {
627  public:
628   ThreadSelfSuspensionHandshake() : AsyncHandshakeClosure("ThreadSelfSuspensionHandshake") {}
629   void do_thread(Thread* thr) {
630     JavaThread* current = JavaThread::cast(thr);
631     assert(current == Thread::current(), "Must be self executed.");
632     JavaThreadState jts = current->thread_state();
633 
634     current->set_thread_state(_thread_blocked);
635     current->handshake_state()->do_self_suspend();
636     current->set_thread_state(jts);
637     current->handshake_state()->set_async_suspend_handshake(false);
638   }
639   virtual bool is_suspend() { return true; }
640 };
641 
642 bool HandshakeState::suspend_with_handshake() {
643   assert(_handshakee->threadObj() != NULL, "cannot suspend with a NULL threadObj");
644   if (_handshakee->is_exiting()) {
645     log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " exiting", p2i(_handshakee));
646     return false;
647   }
648   if (has_async_suspend_handshake()) {
649     if (is_suspended()) {
650       // Target is already suspended.
651       log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " already suspended", p2i(_handshakee));
652       return false;
653     } else {
654       // Target is going to wake up and leave suspension.
655       // Let's just stop the thread from doing that.
656       log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " re-suspended", p2i(_handshakee));
657       set_suspended(true);
658       return true;
659     }
660   }
661   // no suspend request
662   assert(!is_suspended(), "cannot be suspended without a suspend request");
663   // Thread is safe, so it must execute the request, thus we can count it as suspended
664   // from this point.
665   set_suspended(true);
666   set_async_suspend_handshake(true);
667   log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended, arming ThreadSuspension", p2i(_handshakee));
668   ThreadSelfSuspensionHandshake* ts = new ThreadSelfSuspensionHandshake();
669   Handshake::execute(ts, _handshakee);
670   return true;
671 }
672 
673 // This is the closure that synchronously honors the suspend request.
674 class SuspendThreadHandshake : public HandshakeClosure {
675   bool _did_suspend;
676 public:
677   SuspendThreadHandshake() : HandshakeClosure("SuspendThread"), _did_suspend(false) {}
678   void do_thread(Thread* thr) {
679     JavaThread* target = JavaThread::cast(thr);
680     _did_suspend = target->handshake_state()->suspend_with_handshake();
681   }
682   bool did_suspend() { return _did_suspend; }
683 };
684 
685 bool HandshakeState::suspend() {
686   JavaThread* self = JavaThread::current();
687   if (_handshakee == self) {
688     // If target is the current thread we can bypass the handshake machinery
689     // and just suspend directly
690     ThreadBlockInVM tbivm(self);
691     MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
692     set_suspended(true);
693     do_self_suspend();
694     return true;
695   } else {
696     SuspendThreadHandshake st;
697     Handshake::execute(&st, _handshakee);
698     return st.did_suspend();
699   }
700 }
701 
702 bool HandshakeState::resume() {
703   if (!is_suspended()) {
704     return false;
705   }
706   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
707   if (!is_suspended()) {
708     assert(!_handshakee->is_suspended(), "cannot be suspended without a suspend request");
709     return false;
710   }
711   // Resume the thread.
712   set_suspended(false);
713   _lock.notify();
714   return true;
715 }