1 /*
  2  * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "jvm_io.h"
 27 #include "logging/log.hpp"
 28 #include "logging/logStream.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "runtime/atomic.hpp"
 31 #include "runtime/handshake.hpp"
 32 #include "runtime/interfaceSupport.inline.hpp"
 33 #include "runtime/os.hpp"
 34 #include "runtime/osThread.hpp"
 35 #include "runtime/stackWatermarkSet.hpp"
 36 #include "runtime/task.hpp"
 37 #include "runtime/thread.hpp"
 38 #include "runtime/threadSMR.hpp"
 39 #include "runtime/vmThread.hpp"
 40 #include "utilities/formatBuffer.hpp"
 41 #include "utilities/filterQueue.inline.hpp"
 42 #include "utilities/globalDefinitions.hpp"
 43 #include "utilities/preserveException.hpp"
 44 
 45 class HandshakeOperation : public CHeapObj<mtThread> {
 46   friend class HandshakeState;
 47  protected:
 48   HandshakeClosure*   _handshake_cl;
 49   // Keeps track of emitted and completed handshake operations.
 50   // Once it reaches zero all handshake operations have been performed.
 51   int32_t             _pending_threads;
 52   JavaThread*         _target;
 53   Thread*             _requester;
 54 
 55   // Must use AsyncHandshakeOperation when using AsyncHandshakeClosure.
 56   HandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, Thread* requester) :
 57     _handshake_cl(cl),
 58     _pending_threads(1),
 59     _target(target),
 60     _requester(requester) {}
 61 
 62  public:
 63   HandshakeOperation(HandshakeClosure* cl, JavaThread* target, Thread* requester) :
 64     _handshake_cl(cl),
 65     _pending_threads(1),
 66     _target(target),
 67     _requester(requester) {}
 68   virtual ~HandshakeOperation() {}
 69   void prepare(JavaThread* current_target, Thread* executing_thread);
 70   void do_handshake(JavaThread* thread);
 71   bool is_completed() {
 72     int32_t val = Atomic::load(&_pending_threads);
 73     assert(val >= 0, "_pending_threads=%d cannot be negative", val);
 74     return val == 0;
 75   }
 76   void add_target_count(int count) { Atomic::add(&_pending_threads, count); }
 77   int32_t pending_threads()        { return Atomic::load(&_pending_threads); }
 78   const char* name()               { return _handshake_cl->name(); }
 79   bool is_async()                  { return _handshake_cl->is_async(); }
 80   bool is_suspend()                { return _handshake_cl->is_suspend(); }
 81 };
 82 
 83 class AsyncHandshakeOperation : public HandshakeOperation {
 84  private:
 85   jlong _start_time_ns;
 86  public:
 87   AsyncHandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, jlong start_ns)
 88     : HandshakeOperation(cl, target, NULL), _start_time_ns(start_ns) {}
 89   virtual ~AsyncHandshakeOperation() { delete _handshake_cl; }
 90   jlong start_time() const           { return _start_time_ns; }
 91 };
 92 
 93 // Performing handshakes requires a custom yielding strategy because without it
 94 // there is a clear performance regression vs plain spinning. We keep track of
 95 // when we last saw progress by looking at why each targeted thread has not yet
 96 // completed its handshake. After spinning for a while with no progress we will
 97 // yield, but as long as there is progress, we keep spinning. Thus we avoid
 98 // yielding when there is potential work to be done or the handshake is close
 99 // to being finished.
100 class HandshakeSpinYield : public StackObj {
101  private:
102   jlong _start_time_ns;
103   jlong _last_spin_start_ns;
104   jlong _spin_time_ns;
105 
106   int _result_count[2][HandshakeState::_number_states];
107   int _prev_result_pos;
108 
109   int current_result_pos() { return (_prev_result_pos + 1) & 0x1; }
110 
111   void wait_raw(jlong now) {
112     // We start with fine-grained nanosleeping until a millisecond has
113     // passed, at which point we resort to plain naked_short_sleep.
114     if (now - _start_time_ns < NANOSECS_PER_MILLISEC) {
115       os::naked_short_nanosleep(10 * (NANOUNITS / MICROUNITS));
116     } else {
117       os::naked_short_sleep(1);
118     }
119   }
120 
121   void wait_blocked(JavaThread* self, jlong now) {
122     ThreadBlockInVM tbivm(self);
123     wait_raw(now);
124   }
125 
126   bool state_changed() {
127     for (int i = 0; i < HandshakeState::_number_states; i++) {
128       if (_result_count[0][i] != _result_count[1][i]) {
129         return true;
130       }
131     }
132     return false;
133   }
134 
135   void reset_state() {
136     _prev_result_pos++;
137     for (int i = 0; i < HandshakeState::_number_states; i++) {
138       _result_count[current_result_pos()][i] = 0;
139     }
140   }
141 
142  public:
143   HandshakeSpinYield(jlong start_time) :
144     _start_time_ns(start_time), _last_spin_start_ns(start_time),
145     _spin_time_ns(0), _result_count(), _prev_result_pos(0) {
146 
147     const jlong max_spin_time_ns = 100 /* us */ * (NANOUNITS / MICROUNITS);
148     int free_cpus = os::active_processor_count() - 1;
149     _spin_time_ns = (5 /* us */ * (NANOUNITS / MICROUNITS)) * free_cpus; // zero on UP
150     _spin_time_ns = _spin_time_ns > max_spin_time_ns ? max_spin_time_ns : _spin_time_ns;
151   }
152 
153   void add_result(HandshakeState::ProcessResult pr) {
154     _result_count[current_result_pos()][pr]++;
155   }
156 
157   void process() {
158     jlong now = os::javaTimeNanos();
159     if (state_changed()) {
160       reset_state();
161       // We spin for x amount of time since last state change.
162       _last_spin_start_ns = now;
163       return;
164     }
165     jlong wait_target = _last_spin_start_ns + _spin_time_ns;
166     if (wait_target < now) {
167       // On UP this is always true.
168       Thread* self = Thread::current();
169       if (self->is_Java_thread()) {
170         wait_blocked(JavaThread::cast(self), now);
171       } else {
172         wait_raw(now);
173       }
174       _last_spin_start_ns = os::javaTimeNanos();
175     }
176     reset_state();
177   }
178 };
179 
180 static void handle_timeout(HandshakeOperation* op, JavaThread* target) {
181   JavaThreadIteratorWithHandle jtiwh;
182 
183   log_error(handshake)("Handshake timeout: %s(" INTPTR_FORMAT "), pending threads: " INT32_FORMAT,
184                        op->name(), p2i(op), op->pending_threads());
185 
186   if (target == NULL) {
187     for ( ; JavaThread* thr = jtiwh.next(); ) {
188       if (thr->handshake_state()->operation_pending(op)) {
189         log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(thr), p2i(op));
190         // Remember the last one found for more diagnostics below.
191         target = thr;
192       }
193     }
194   } else {
195     log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(target), p2i(op));
196   }
197 
198   if (target != NULL) {
199     if (os::signal_thread(target, SIGILL, "cannot be handshaked")) {
200       // Give target a chance to report the error and terminate the VM.
201       os::naked_sleep(3000);
202     }
203   } else {
204     log_error(handshake)("No thread with an unfinished handshake op(" INTPTR_FORMAT ") found.", p2i(op));
205   }
206   fatal("Handshake timeout");
207 }
208 
209 static void check_handshake_timeout(jlong start_time, HandshakeOperation* op, JavaThread* target = NULL) {
210   // Check if handshake operation has timed out
211   jlong timeout_ns = millis_to_nanos(HandshakeTimeout);
212   if (timeout_ns > 0) {
213     if (os::javaTimeNanos() >= (start_time + timeout_ns)) {
214       handle_timeout(op, target);
215     }
216   }
217 }
218 
219 static void log_handshake_info(jlong start_time_ns, const char* name, int targets, int emitted_handshakes_executed, const char* extra = NULL) {
220   if (log_is_enabled(Info, handshake)) {
221     jlong completion_time = os::javaTimeNanos() - start_time_ns;
222     log_info(handshake)("Handshake \"%s\", Targeted threads: %d, Executed by requesting thread: %d, Total completion time: " JLONG_FORMAT " ns%s%s",
223                         name, targets,
224                         emitted_handshakes_executed,
225                         completion_time,
226                         extra != NULL ? ", " : "",
227                         extra != NULL ? extra : "");
228   }
229 }
230 
231 class VM_HandshakeAllThreads: public VM_Operation {
232   HandshakeOperation* const _op;
233  public:
234   VM_HandshakeAllThreads(HandshakeOperation* op) : _op(op) {}
235 
236   bool evaluate_at_safepoint() const { return false; }
237 
238   void doit() {
239     jlong start_time_ns = os::javaTimeNanos();
240 
241     JavaThreadIteratorWithHandle jtiwh;
242     int number_of_threads_issued = 0;
243     for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
244       thr->handshake_state()->add_operation(_op);
245       number_of_threads_issued++;
246     }
247 
248     if (number_of_threads_issued < 1) {
249       log_handshake_info(start_time_ns, _op->name(), 0, 0, "no threads alive");
250       return;
251     }
252     // _op was created with a count == 1 so don't double count.
253     _op->add_target_count(number_of_threads_issued - 1);
254 
255     log_trace(handshake)("Threads signaled, begin processing blocked threads by VMThread");
256     HandshakeSpinYield hsy(start_time_ns);
257     // Keeps count on how many of own emitted handshakes
258     // this thread execute.
259     int emitted_handshakes_executed = 0;
260     do {
261       // Check if handshake operation has timed out
262       check_handshake_timeout(start_time_ns, _op);
263 
264       // Have VM thread perform the handshake operation for blocked threads.
265       // Observing a blocked state may of course be transient but the processing is guarded
266       // by mutexes and we optimistically begin by working on the blocked threads
267       jtiwh.rewind();
268       for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
269         // A new thread on the ThreadsList will not have an operation,
270         // hence it is skipped in handshake_try_process.
271         HandshakeState::ProcessResult pr = thr->handshake_state()->try_process(_op);
272         hsy.add_result(pr);
273         if (pr == HandshakeState::_succeeded) {
274           emitted_handshakes_executed++;
275         }
276       }
277       hsy.process();
278     } while (!_op->is_completed());
279 
280     // This pairs up with the release store in do_handshake(). It prevents future
281     // loads from floating above the load of _pending_threads in is_completed()
282     // and thus prevents reading stale data modified in the handshake closure
283     // by the Handshakee.
284     OrderAccess::acquire();
285 
286     log_handshake_info(start_time_ns, _op->name(), number_of_threads_issued, emitted_handshakes_executed);
287   }
288 
289   VMOp_Type type() const { return VMOp_HandshakeAllThreads; }
290 };
291 
292 void HandshakeOperation::prepare(JavaThread* current_target, Thread* executing_thread) {
293   if (current_target->is_terminated()) {
294     // Will never execute any handshakes on this thread.
295     return;
296   }
297   if (current_target != executing_thread) {
298     // Only when the target is not executing the handshake itself.
299     StackWatermarkSet::start_processing(current_target, StackWatermarkKind::gc);
300   }
301   if (_requester != NULL && _requester != executing_thread && _requester->is_Java_thread()) {
302     // The handshake closure may contain oop Handles from the _requester.
303     // We must make sure we can use them.
304     StackWatermarkSet::start_processing(JavaThread::cast(_requester), StackWatermarkKind::gc);
305   }
306 }
307 
308 void HandshakeOperation::do_handshake(JavaThread* thread) {
309   jlong start_time_ns = 0;
310   if (log_is_enabled(Debug, handshake, task)) {
311     start_time_ns = os::javaTimeNanos();
312   }
313 
314   // Only actually execute the operation for non terminated threads.
315   if (!thread->is_terminated()) {
316     //NoSafepointVerifier nsv;
317     _handshake_cl->do_thread(thread);
318   }
319 
320   if (start_time_ns != 0) {
321     jlong completion_time = os::javaTimeNanos() - start_time_ns;
322     log_debug(handshake, task)("Operation: %s for thread " PTR_FORMAT ", is_vm_thread: %s, completed in " JLONG_FORMAT " ns",
323                                name(), p2i(thread), BOOL_TO_STR(Thread::current()->is_VM_thread()), completion_time);
324   }
325 
326   // Inform VMThread/Handshaker that we have completed the operation.
327   // When this is executed by the Handshakee we need a release store
328   // here to make sure memory operations executed in the handshake
329   // closure are visible to the VMThread/Handshaker after it reads
330   // that the operation has completed.
331   Atomic::dec(&_pending_threads);
332   // Trailing fence, used to make sure removal of the operation strictly
333   // happened after we completed the operation.
334 
335   // It is no longer safe to refer to 'this' as the VMThread/Handshaker may have destroyed this operation
336 }
337 
338 void Handshake::execute(HandshakeClosure* hs_cl) {
339   HandshakeOperation cto(hs_cl, NULL, Thread::current());
340   VM_HandshakeAllThreads handshake(&cto);
341   VMThread::execute(&handshake);
342 }
343 
344 void Handshake::execute(HandshakeClosure* hs_cl, JavaThread* target) {
345   JavaThread* self = JavaThread::current();
346   HandshakeOperation op(hs_cl, target, Thread::current());
347 
348   jlong start_time_ns = os::javaTimeNanos();
349 
350   ThreadsListHandle tlh;
351   if (tlh.includes(target)) {
352     target->handshake_state()->add_operation(&op);
353   } else {
354     char buf[128];
355     jio_snprintf(buf, sizeof(buf),  "(thread= " INTPTR_FORMAT " dead)", p2i(target));
356     log_handshake_info(start_time_ns, op.name(), 0, 0, buf);
357     return;
358   }
359 
360   // Keeps count on how many of own emitted handshakes
361   // this thread execute.
362   int emitted_handshakes_executed = 0;
363   HandshakeSpinYield hsy(start_time_ns);
364   while (!op.is_completed()) {
365     HandshakeState::ProcessResult pr = target->handshake_state()->try_process(&op);
366     if (pr == HandshakeState::_succeeded) {
367       emitted_handshakes_executed++;
368     }
369     if (op.is_completed()) {
370       break;
371     }
372 
373     // Check if handshake operation has timed out
374     check_handshake_timeout(start_time_ns, &op, target);
375 
376     hsy.add_result(pr);
377     // Check for pending handshakes to avoid possible deadlocks where our
378     // target is trying to handshake us.
379     if (SafepointMechanism::should_process(self)) {
380       // Will not suspend here.
381       ThreadBlockInVM tbivm(self);
382     }
383     hsy.process();
384   }
385 
386   // This pairs up with the release store in do_handshake(). It prevents future
387   // loads from floating above the load of _pending_threads in is_completed()
388   // and thus prevents reading stale data modified in the handshake closure
389   // by the Handshakee.
390   OrderAccess::acquire();
391 
392   log_handshake_info(start_time_ns, op.name(), 1, emitted_handshakes_executed);
393 }
394 
395 void Handshake::execute(AsyncHandshakeClosure* hs_cl, JavaThread* target) {
396   jlong start_time_ns = os::javaTimeNanos();
397   AsyncHandshakeOperation* op = new AsyncHandshakeOperation(hs_cl, target, start_time_ns);
398 
399   ThreadsListHandle tlh;
400   if (tlh.includes(target)) {
401     target->handshake_state()->add_operation(op);
402   } else {
403     log_handshake_info(start_time_ns, op->name(), 0, 0, "(thread dead)");
404     delete op;
405   }
406 }
407 
408 HandshakeState::HandshakeState(JavaThread* target) :
409   _handshakee(target),
410   _queue(),
411   _lock(Monitor::nosafepoint, "HandshakeState_lock"),
412   _active_handshaker(),
413   _caller(nullptr),
414   _suspended(false),
415   _async_suspend_handshake(false)
416 {
417 }
418 
419 void HandshakeState::add_operation(HandshakeOperation* op) {
420   // Adds are done lock free and so is arming.
421   _queue.push(op);
422   SafepointMechanism::arm_local_poll_release(_handshakee);
423 }
424 
425 bool HandshakeState::operation_pending(HandshakeOperation* op) {
426   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
427   MatchOp mo(op);
428   return _queue.contains(mo);
429 }
430 
431 static bool no_suspend_filter(HandshakeOperation* op) {
432   return !op->is_suspend();
433 }
434 
435 HandshakeOperation* HandshakeState::get_op_for_self(bool allow_suspend) {
436   assert(_handshakee == Thread::current(), "Must be called by self");
437   assert(_lock.owned_by_self(), "Lock must be held");
438   if (allow_suspend) {
439     return _queue.peek();
440   } else {
441     return _queue.peek(no_suspend_filter);
442   }
443 }
444 
445 bool HandshakeState::non_self_queue_filter(HandshakeOperation* op) {
446   if (op->_handshake_cl->can_be_processed_by(Thread::current())) {
447     return !op->is_async();
448   }
449   return false;
450 }
451 
452 bool HandshakeState::have_non_self_executable_operation() {
453   assert(_handshakee != Thread::current(), "Must not be called by self");
454   assert(_lock.owned_by_self(), "Lock must be held");
455   return _queue.contains(non_self_queue_filter);
456 }
457 
458 bool HandshakeState::has_a_non_suspend_operation() {
459   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
460   return _queue.contains(no_suspend_filter);
461 }
462 
463 HandshakeOperation* HandshakeState::get_op() {
464   assert(_handshakee != Thread::current(), "Must not be called by self");
465   assert(_lock.owned_by_self(), "Lock must be held");
466   return _queue.peek(non_self_queue_filter);
467 };
468 
469 void HandshakeState::remove_op(HandshakeOperation* op) {
470   assert(_lock.owned_by_self(), "Lock must be held");
471   MatchOp mo(op);
472   HandshakeOperation* ret = _queue.pop(mo);
473   assert(ret == op, "Popped op must match requested op");
474 };
475 
476 bool HandshakeState::process_by_self(bool allow_suspend) {
477   assert(Thread::current() == _handshakee, "should call from _handshakee");
478   assert(!_handshakee->is_terminated(), "should not be a terminated thread");
479   assert(_handshakee->thread_state() != _thread_blocked, "should not be in a blocked state");
480   assert(_handshakee->thread_state() != _thread_in_native, "should not be in native");
481 
482   ThreadInVMForHandshake tivm(_handshakee);
483   // Handshakes cannot safely safepoint.
484   // The exception to this rule is the asynchronous suspension handshake.
485   // It by-passes the NSV by manually doing the transition.
486   //NoSafepointVerifier nsv;
487 
488   while (has_operation()) {
489     MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
490 
491     HandshakeOperation* op = get_op_for_self(allow_suspend);
492     if (op != NULL) {
493       assert(op->_target == NULL || op->_target == Thread::current(), "Wrong thread");
494       bool async = op->is_async();
495       log_trace(handshake)("Proc handshake %s " INTPTR_FORMAT " on " INTPTR_FORMAT " by self",
496                            async ? "asynchronous" : "synchronous", p2i(op), p2i(_handshakee));
497       op->prepare(_handshakee, _handshakee);
498       if (!async) {
499         HandleMark hm(_handshakee);
500         PreserveExceptionMark pem(_handshakee);
501         op->do_handshake(_handshakee); // acquire, op removed after
502         remove_op(op);
503       } else {
504         // An asynchronous handshake may put the JavaThread in blocked state (safepoint safe).
505         // The destructor ~PreserveExceptionMark touches the exception oop so it must not be executed,
506         // since a safepoint may be in-progress when returning from the async handshake.
507         op->do_handshake(_handshakee); // acquire, op removed after
508         remove_op(op);
509         log_handshake_info(((AsyncHandshakeOperation*)op)->start_time(), op->name(), 1, 0, "asynchronous");
510         delete op;
511         return true; // Must check for safepoints
512       }
513     } else {
514       return false;
515     }
516   }
517   return false;
518 }
519 
520 bool HandshakeState::can_process_handshake() {
521   // handshake_safe may only be called with polls armed.
522   // Handshaker controls this by first claiming the handshake via claim_handshake().
523   return SafepointSynchronize::handshake_safe(_handshakee);
524 }
525 
526 bool HandshakeState::possibly_can_process_handshake() {
527   // Note that this method is allowed to produce false positives.
528   if (_handshakee->is_terminated()) {
529     return true;
530   }
531   switch (_handshakee->thread_state()) {
532   case _thread_in_native:
533     // native threads are safe if they have no java stack or have walkable stack
534     return !_handshakee->has_last_Java_frame() || _handshakee->frame_anchor()->walkable();
535 
536   case _thread_blocked:
537     return true;
538 
539   default:
540     return false;
541   }
542 }
543 
544 bool HandshakeState::claim_handshake() {
545   if (!_lock.try_lock()) {
546     return false;
547   }
548   // Operations are added lock free and then the poll is armed.
549   // If all handshake operations for the handshakee are finished and someone
550   // just adds an operation we may see it here. But if the handshakee is not
551   // armed yet it is not safe to proceed.
552   if (have_non_self_executable_operation()) {
553     OrderAccess::loadload(); // Matches the implicit storestore in add_operation()
554     if (SafepointMechanism::local_poll_armed(_handshakee)) {
555       return true;
556     }
557   }
558   _lock.unlock();
559   return false;
560 }
561 
562 HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* match_op) {
563   if (!has_operation()) {
564     // JT has already cleared its handshake
565     return HandshakeState::_no_operation;
566   }
567 
568   if (!possibly_can_process_handshake()) {
569     // JT is observed in an unsafe state, it must notice the handshake itself
570     return HandshakeState::_not_safe;
571   }
572 
573   // Claim the mutex if there still an operation to be executed.
574   if (!claim_handshake()) {
575     return HandshakeState::_claim_failed;
576   }
577 
578   // If we own the mutex at this point and while owning the mutex we
579   // can observe a safe state the thread cannot possibly continue without
580   // getting caught by the mutex.
581   if (!can_process_handshake()) {
582     _lock.unlock();
583     return HandshakeState::_not_safe;
584   }
585 
586   Thread* current_thread = Thread::current();
587 
588   HandshakeOperation* op = get_op();
589 
590   assert(op != NULL, "Must have an op");
591   assert(SafepointMechanism::local_poll_armed(_handshakee), "Must be");
592   assert(op->_target == NULL || _handshakee == op->_target, "Wrong thread");
593 
594   log_trace(handshake)("Processing handshake " INTPTR_FORMAT " by %s(%s)", p2i(op),
595                        op == match_op ? "handshaker" : "cooperative",
596                        current_thread->is_VM_thread() ? "VM Thread" : "JavaThread");
597 
598   op->prepare(_handshakee, current_thread);
599 
600   set_active_handshaker(current_thread);
601   op->do_handshake(_handshakee); // acquire, op removed after
602   set_active_handshaker(NULL);
603   remove_op(op);
604 
605   _lock.unlock();
606 
607   log_trace(handshake)("%s(" INTPTR_FORMAT ") executed an op for JavaThread: " INTPTR_FORMAT " %s target op: " INTPTR_FORMAT,
608                        current_thread->is_VM_thread() ? "VM Thread" : "JavaThread",
609                        p2i(current_thread), p2i(_handshakee),
610                        op == match_op ? "including" : "excluding", p2i(match_op));
611 
612   return op == match_op ? HandshakeState::_succeeded : HandshakeState::_processed;
613 }
614 
615 void HandshakeState::do_self_suspend() {
616   assert(Thread::current() == _handshakee, "should call from _handshakee");
617   assert(_lock.owned_by_self(), "Lock must be held");
618   assert(!_handshakee->has_last_Java_frame() || _handshakee->frame_anchor()->walkable(), "should have walkable stack");
619   assert(_handshakee->thread_state() == _thread_blocked, "Caller should have transitioned to _thread_blocked");
620 
621   while (is_suspended_or_blocked()) {
622     JVMTI_ONLY(assert(!_handshakee->is_in_VTMT(), "no suspend allowed in VTMT transition");)
623     log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended", p2i(_handshakee));
624     _lock.wait_without_safepoint_check();
625   }
626   log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " resumed", p2i(_handshakee));
627 }
628 
629 // This is the closure that prevents a suspended JavaThread from
630 // escaping the suspend request.
631 class ThreadSelfSuspensionHandshake : public AsyncHandshakeClosure {
632  public:
633   ThreadSelfSuspensionHandshake() : AsyncHandshakeClosure("ThreadSelfSuspensionHandshake") {}
634   void do_thread(Thread* thr) {
635     JavaThread* current = JavaThread::cast(thr);
636     assert(current == Thread::current(), "Must be self executed.");
637     JavaThreadState jts = current->thread_state();
638 
639     current->set_thread_state(_thread_blocked);
640     current->handshake_state()->do_self_suspend();
641     current->set_thread_state(jts);
642     current->handshake_state()->set_async_suspend_handshake(false);
643   }
644   virtual bool is_suspend() { return true; }
645 };
646 
647 bool HandshakeState::suspend_with_handshake(JavaThread* caller) {
648   // This tested for _handshakee->threadObj() != NULL as well, but the test doesn't work
649   // for that.  TODO: can you suspend a thread during initialization ?
650   if (_handshakee->is_exiting()) {
651     log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " exiting", p2i(_handshakee));
652     return false;
653   }
654   bool should_block = caller != nullptr;
655   bool should_suspend = caller == nullptr;
656 
657   if (has_async_suspend_handshake()) {
658     if ((is_suspended() && should_suspend) || (is_blocked() && should_block)) {
659       log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " already suspended or blocked", p2i(_handshakee));
660       return false;
661     } else if (should_suspend) {
662       // Target is going to wake up and leave suspension.
663       // Let's just stop the thread from doing that.
664       log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " re-suspended", p2i(_handshakee));
665       set_suspended(true);
666       return true;
667     } else {
668       assert(should_block, "should block");
669       // Target is going to wake up and leave blocking.
670       // Let's just stop the thread from doing that.
671       log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " re-blocked", p2i(_handshakee));
672       set_caller_thread(caller);
673       return true;
674     }
675   }
676 
677   // Thread is safe, so it must execute the request, thus we can count it as suspended
678   // or blocked from this point.
679   if (should_suspend) {
680     // no suspend request
681     assert(!is_suspended(), "cannot be suspended without a request");
682     set_suspended(true);
683   } else {
684     assert(!is_blocked(), "cannot be blocked without a request");
685     set_caller_thread(caller);
686   }
687 
688   set_async_suspend_handshake(true);
689   log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended, arming ThreadSuspension", p2i(_handshakee));
690   ThreadSelfSuspensionHandshake* ts = new ThreadSelfSuspensionHandshake();
691   Handshake::execute(ts, _handshakee);
692   return true;
693 }
694 
695 // This is the closure that synchronously honors the suspend request.
696 class SuspendThreadHandshake : public HandshakeClosure {
697   JavaThread* _caller;
698   bool        _did_suspend;
699 public:
700   SuspendThreadHandshake(JavaThread* caller) : HandshakeClosure("SuspendThread"), _caller(caller), _did_suspend(false) {}
701   void do_thread(Thread* thr) {
702     JavaThread* target = JavaThread::cast(thr);
703     _did_suspend = target->handshake_state()->suspend_with_handshake(_caller);
704   }
705   bool did_suspend() { return _did_suspend; }
706 };
707 
708 bool HandshakeState::suspend() {
709   JVMTI_ONLY(assert(!_handshakee->is_in_VTMT(), "no suspend allowed in VTMT transition");)
710   JavaThread* self = JavaThread::current();
711   if (_handshakee == self) {
712     // If target is the current thread we can bypass the handshake machinery
713     // and just suspend directly
714     ThreadBlockInVM tbivm(self);
715     MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
716     set_suspended(true);
717     do_self_suspend();
718     return true;
719   } else {
720     SuspendThreadHandshake st(nullptr);
721     Handshake::execute(&st, _handshakee);
722     return st.did_suspend();
723   }
724 }
725 
726 bool HandshakeState::resume() {
727   if (!is_suspended()) {
728     return false;
729   }
730   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
731   if (!is_suspended()) {
732     assert(!_handshakee->is_suspended(), "cannot be suspended without a suspend request");
733     return false;
734   }
735   // Resume the thread.
736   set_suspended(false);
737   _lock.notify();
738   return true;
739 }
740 
741 // One thread blocks execution of another thread until it resumes it.  This is similar to
742 // suspend, and much of the code is shared but it's a separate state from being suspended.
743 // The commonality is that the thread is self-suspended and that thread waits for both
744 // conditions to clear.
745 bool HandshakeState::block_suspend(JavaThread* caller) {
746   assert(caller == JavaThread::current(), "caller must be current thread");
747 
748   SuspendThreadHandshake st(caller);
749   Handshake::execute(&st, _handshakee);
750   bool suspended = st.did_suspend();
751   return suspended;
752 }
753 
754 bool HandshakeState::continue_resume(JavaThread* caller) {
755   assert(caller == JavaThread::current(), "caller must be current thread");
756 
757   // If caller is non-null only resume blocked thread if it's the caller
758   if (!is_blocked() || caller_thread() != caller) {
759     return false;
760   }
761   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
762   assert(is_blocked() && caller_thread() == caller,
763          "this is the only thread that can continue this thread");
764 
765   // Resume the thread.
766   set_caller_thread(nullptr); // !is_blocked()
767   _lock.notify();
768   return true;
769 }