1 /*
  2  * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "jvm_io.h"
 27 #include "logging/log.hpp"
 28 #include "logging/logStream.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "runtime/atomic.hpp"
 31 #include "runtime/handshake.hpp"
 32 #include "runtime/interfaceSupport.inline.hpp"
 33 #include "runtime/os.hpp"
 34 #include "runtime/osThread.hpp"
 35 #include "runtime/stackWatermarkSet.hpp"
 36 #include "runtime/task.hpp"
 37 #include "runtime/thread.hpp"
 38 #include "runtime/threadSMR.hpp"
 39 #include "runtime/vmThread.hpp"
 40 #include "utilities/formatBuffer.hpp"
 41 #include "utilities/filterQueue.inline.hpp"
 42 #include "utilities/globalDefinitions.hpp"
 43 #include "utilities/preserveException.hpp"
 44 
 45 class HandshakeOperation : public CHeapObj<mtThread> {
 46   friend class HandshakeState;
 47  protected:
 48   HandshakeClosure*   _handshake_cl;
 49   // Keeps track of emitted and completed handshake operations.
 50   // Once it reaches zero all handshake operations have been performed.
 51   int32_t             _pending_threads;
 52   JavaThread*         _target;
 53   Thread*             _requester;
 54 
 55   // Must use AsyncHandshakeOperation when using AsyncHandshakeClosure.
 56   HandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, Thread* requester) :
 57     _handshake_cl(cl),
 58     _pending_threads(1),
 59     _target(target),
 60     _requester(requester) {}
 61 
 62  public:
 63   HandshakeOperation(HandshakeClosure* cl, JavaThread* target, Thread* requester) :
 64     _handshake_cl(cl),
 65     _pending_threads(1),
 66     _target(target),
 67     _requester(requester) {}
 68   virtual ~HandshakeOperation() {}
 69   void prepare(JavaThread* current_target, Thread* executing_thread);
 70   void do_handshake(JavaThread* thread);
 71   bool is_completed() {
 72     int32_t val = Atomic::load(&_pending_threads);
 73     assert(val >= 0, "_pending_threads=%d cannot be negative", val);
 74     return val == 0;
 75   }
 76   void add_target_count(int count) { Atomic::add(&_pending_threads, count); }
 77   int32_t pending_threads()        { return Atomic::load(&_pending_threads); }
 78   const char* name()               { return _handshake_cl->name(); }
 79   bool is_async()                  { return _handshake_cl->is_async(); }
 80   bool is_suspend()                { return _handshake_cl->is_suspend(); }
 81 };
 82 
 83 class AsyncHandshakeOperation : public HandshakeOperation {
 84  private:
 85   jlong _start_time_ns;
 86  public:
 87   AsyncHandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, jlong start_ns)
 88     : HandshakeOperation(cl, target, NULL), _start_time_ns(start_ns) {}
 89   virtual ~AsyncHandshakeOperation() { delete _handshake_cl; }
 90   jlong start_time() const           { return _start_time_ns; }
 91 };
 92 
 93 // Performing handshakes requires a custom yielding strategy because without it
 94 // there is a clear performance regression vs plain spinning. We keep track of
 95 // when we last saw progress by looking at why each targeted thread has not yet
 96 // completed its handshake. After spinning for a while with no progress we will
 97 // yield, but as long as there is progress, we keep spinning. Thus we avoid
 98 // yielding when there is potential work to be done or the handshake is close
 99 // to being finished.
100 class HandshakeSpinYield : public StackObj {
101  private:
102   jlong _start_time_ns;
103   jlong _last_spin_start_ns;
104   jlong _spin_time_ns;
105 
106   int _result_count[2][HandshakeState::_number_states];
107   int _prev_result_pos;
108 
109   int current_result_pos() { return (_prev_result_pos + 1) & 0x1; }
110 
111   void wait_raw(jlong now) {
112     // We start with fine-grained nanosleeping until a millisecond has
113     // passed, at which point we resort to plain naked_short_sleep.
114     if (now - _start_time_ns < NANOSECS_PER_MILLISEC) {
115       os::naked_short_nanosleep(10 * (NANOUNITS / MICROUNITS));
116     } else {
117       os::naked_short_sleep(1);
118     }
119   }
120 
121   void wait_blocked(JavaThread* self, jlong now) {
122     ThreadBlockInVM tbivm(self);
123     wait_raw(now);
124   }
125 
126   bool state_changed() {
127     for (int i = 0; i < HandshakeState::_number_states; i++) {
128       if (_result_count[0][i] != _result_count[1][i]) {
129         return true;
130       }
131     }
132     return false;
133   }
134 
135   void reset_state() {
136     _prev_result_pos++;
137     for (int i = 0; i < HandshakeState::_number_states; i++) {
138       _result_count[current_result_pos()][i] = 0;
139     }
140   }
141 
142  public:
143   HandshakeSpinYield(jlong start_time) :
144     _start_time_ns(start_time), _last_spin_start_ns(start_time),
145     _spin_time_ns(0), _result_count(), _prev_result_pos(0) {
146 
147     const jlong max_spin_time_ns = 100 /* us */ * (NANOUNITS / MICROUNITS);
148     int free_cpus = os::active_processor_count() - 1;
149     _spin_time_ns = (5 /* us */ * (NANOUNITS / MICROUNITS)) * free_cpus; // zero on UP
150     _spin_time_ns = _spin_time_ns > max_spin_time_ns ? max_spin_time_ns : _spin_time_ns;
151   }
152 
153   void add_result(HandshakeState::ProcessResult pr) {
154     _result_count[current_result_pos()][pr]++;
155   }
156 
157   void process() {
158     jlong now = os::javaTimeNanos();
159     if (state_changed()) {
160       reset_state();
161       // We spin for x amount of time since last state change.
162       _last_spin_start_ns = now;
163       return;
164     }
165     jlong wait_target = _last_spin_start_ns + _spin_time_ns;
166     if (wait_target < now) {
167       // On UP this is always true.
168       Thread* self = Thread::current();
169       if (self->is_Java_thread()) {
170         wait_blocked(JavaThread::cast(self), now);
171       } else {
172         wait_raw(now);
173       }
174       _last_spin_start_ns = os::javaTimeNanos();
175     }
176     reset_state();
177   }
178 };
179 
180 static void handle_timeout(HandshakeOperation* op, JavaThread* target) {
181   JavaThreadIteratorWithHandle jtiwh;
182 
183   log_error(handshake)("Handshake timeout: %s(" INTPTR_FORMAT "), pending threads: " INT32_FORMAT,
184                        op->name(), p2i(op), op->pending_threads());
185 
186   if (target == NULL) {
187     for ( ; JavaThread* thr = jtiwh.next(); ) {
188       if (thr->handshake_state()->operation_pending(op)) {
189         log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(thr), p2i(op));
190         // Remember the last one found for more diagnostics below.
191         target = thr;
192       }
193     }
194   } else {
195     log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(target), p2i(op));
196   }
197 
198   if (target != NULL) {
199     if (os::signal_thread(target, SIGILL, "cannot be handshaked")) {
200       // Give target a chance to report the error and terminate the VM.
201       os::naked_sleep(3000);
202     }
203   } else {
204     log_error(handshake)("No thread with an unfinished handshake op(" INTPTR_FORMAT ") found.", p2i(op));
205   }
206   fatal("Handshake timeout");
207 }
208 
209 static void check_handshake_timeout(jlong start_time, HandshakeOperation* op, JavaThread* target = NULL) {
210   // Check if handshake operation has timed out
211   jlong timeout_ns = millis_to_nanos(HandshakeTimeout);
212   if (timeout_ns > 0) {
213     if (os::javaTimeNanos() >= (start_time + timeout_ns)) {
214       handle_timeout(op, target);
215     }
216   }
217 }
218 
219 static void log_handshake_info(jlong start_time_ns, const char* name, int targets, int emitted_handshakes_executed, const char* extra = NULL) {
220   if (log_is_enabled(Info, handshake)) {
221     jlong completion_time = os::javaTimeNanos() - start_time_ns;
222     log_info(handshake)("Handshake \"%s\", Targeted threads: %d, Executed by requesting thread: %d, Total completion time: " JLONG_FORMAT " ns%s%s",
223                         name, targets,
224                         emitted_handshakes_executed,
225                         completion_time,
226                         extra != NULL ? ", " : "",
227                         extra != NULL ? extra : "");
228   }
229 }
230 
231 class VM_HandshakeAllThreads: public VM_Operation {
232   HandshakeOperation* const _op;
233  public:
234   VM_HandshakeAllThreads(HandshakeOperation* op) : _op(op) {}
235 
236   bool evaluate_at_safepoint() const { return false; }
237 
238   void doit() {
239     jlong start_time_ns = os::javaTimeNanos();
240 
241     JavaThreadIteratorWithHandle jtiwh;
242     int number_of_threads_issued = 0;
243     for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
244       thr->handshake_state()->add_operation(_op);
245       number_of_threads_issued++;
246     }
247 
248     if (number_of_threads_issued < 1) {
249       log_handshake_info(start_time_ns, _op->name(), 0, 0, "no threads alive");
250       return;
251     }
252     // _op was created with a count == 1 so don't double count.
253     _op->add_target_count(number_of_threads_issued - 1);
254 
255     log_trace(handshake)("Threads signaled, begin processing blocked threads by VMThread");
256     HandshakeSpinYield hsy(start_time_ns);
257     // Keeps count on how many of own emitted handshakes
258     // this thread execute.
259     int emitted_handshakes_executed = 0;
260     do {
261       // Check if handshake operation has timed out
262       check_handshake_timeout(start_time_ns, _op);
263 
264       // Have VM thread perform the handshake operation for blocked threads.
265       // Observing a blocked state may of course be transient but the processing is guarded
266       // by mutexes and we optimistically begin by working on the blocked threads
267       jtiwh.rewind();
268       for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
269         // A new thread on the ThreadsList will not have an operation,
270         // hence it is skipped in handshake_try_process.
271         HandshakeState::ProcessResult pr = thr->handshake_state()->try_process(_op);
272         hsy.add_result(pr);
273         if (pr == HandshakeState::_succeeded) {
274           emitted_handshakes_executed++;
275         }
276       }
277       hsy.process();
278     } while (!_op->is_completed());
279 
280     // This pairs up with the release store in do_handshake(). It prevents future
281     // loads from floating above the load of _pending_threads in is_completed()
282     // and thus prevents reading stale data modified in the handshake closure
283     // by the Handshakee.
284     OrderAccess::acquire();
285 
286     log_handshake_info(start_time_ns, _op->name(), number_of_threads_issued, emitted_handshakes_executed);
287   }
288 
289   VMOp_Type type() const { return VMOp_HandshakeAllThreads; }
290 };
291 
292 void HandshakeOperation::prepare(JavaThread* current_target, Thread* executing_thread) {
293   if (current_target->is_terminated()) {
294     // Will never execute any handshakes on this thread.
295     return;
296   }
297   if (current_target != executing_thread) {
298     // Only when the target is not executing the handshake itself.
299     StackWatermarkSet::start_processing(current_target, StackWatermarkKind::gc);
300   }
301   if (_requester != NULL && _requester != executing_thread && _requester->is_Java_thread()) {
302     // The handshake closure may contain oop Handles from the _requester.
303     // We must make sure we can use them.
304     StackWatermarkSet::start_processing(JavaThread::cast(_requester), StackWatermarkKind::gc);
305   }
306 }
307 
308 void HandshakeOperation::do_handshake(JavaThread* thread) {
309   jlong start_time_ns = 0;
310   if (log_is_enabled(Debug, handshake, task)) {
311     start_time_ns = os::javaTimeNanos();
312   }
313 
314   // Only actually execute the operation for non terminated threads.
315   if (!thread->is_terminated()) {
316     NoSafepointVerifier nsv;
317     _handshake_cl->do_thread(thread);
318   }
319 
320   if (start_time_ns != 0) {
321     jlong completion_time = os::javaTimeNanos() - start_time_ns;
322     log_debug(handshake, task)("Operation: %s for thread " PTR_FORMAT ", is_vm_thread: %s, completed in " JLONG_FORMAT " ns",
323                                name(), p2i(thread), BOOL_TO_STR(Thread::current()->is_VM_thread()), completion_time);
324   }
325 
326   // Inform VMThread/Handshaker that we have completed the operation.
327   // When this is executed by the Handshakee we need a release store
328   // here to make sure memory operations executed in the handshake
329   // closure are visible to the VMThread/Handshaker after it reads
330   // that the operation has completed.
331   Atomic::dec(&_pending_threads);
332   // Trailing fence, used to make sure removal of the operation strictly
333   // happened after we completed the operation.
334 
335   // It is no longer safe to refer to 'this' as the VMThread/Handshaker may have destroyed this operation
336 }
337 
338 void Handshake::execute(HandshakeClosure* hs_cl) {
339   HandshakeOperation cto(hs_cl, NULL, Thread::current());
340   VM_HandshakeAllThreads handshake(&cto);
341   VMThread::execute(&handshake);
342 }
343 
344 void Handshake::execute(HandshakeClosure* hs_cl, JavaThread* target) {
345   // tlh == nullptr means we rely on a ThreadsListHandle somewhere
346   // in the caller's context (and we sanity check for that).
347   Handshake::execute(hs_cl, nullptr, target);
348 }
349 
350 void Handshake::execute(HandshakeClosure* hs_cl, ThreadsListHandle* tlh, JavaThread* target) {
351   JavaThread* self = JavaThread::current();
352   HandshakeOperation op(hs_cl, target, Thread::current());
353 
354   jlong start_time_ns = os::javaTimeNanos();
355 
356   guarantee(target != nullptr, "must be");
357   if (tlh == nullptr) {
358     guarantee(Thread::is_JavaThread_protected_by_TLH(target),
359               "missing ThreadsListHandle in calling context.");
360     target->handshake_state()->add_operation(&op);
361   } else if (tlh->includes(target)) {
362     target->handshake_state()->add_operation(&op);
363   } else {
364     char buf[128];
365     jio_snprintf(buf, sizeof(buf),  "(thread= " INTPTR_FORMAT " dead)", p2i(target));
366     log_handshake_info(start_time_ns, op.name(), 0, 0, buf);
367     return;
368   }
369 
370   // Keeps count on how many of own emitted handshakes
371   // this thread execute.
372   int emitted_handshakes_executed = 0;
373   HandshakeSpinYield hsy(start_time_ns);
374   while (!op.is_completed()) {
375     HandshakeState::ProcessResult pr = target->handshake_state()->try_process(&op);
376     if (pr == HandshakeState::_succeeded) {
377       emitted_handshakes_executed++;
378     }
379     if (op.is_completed()) {
380       break;
381     }
382 
383     // Check if handshake operation has timed out
384     check_handshake_timeout(start_time_ns, &op, target);
385 
386     hsy.add_result(pr);
387     // Check for pending handshakes to avoid possible deadlocks where our
388     // target is trying to handshake us.
389     if (SafepointMechanism::should_process(self)) {
390       // Will not suspend here.
391       ThreadBlockInVM tbivm(self);
392     }
393     hsy.process();
394   }
395 
396   // This pairs up with the release store in do_handshake(). It prevents future
397   // loads from floating above the load of _pending_threads in is_completed()
398   // and thus prevents reading stale data modified in the handshake closure
399   // by the Handshakee.
400   OrderAccess::acquire();
401 
402   log_handshake_info(start_time_ns, op.name(), 1, emitted_handshakes_executed);
403 }
404 
405 void Handshake::execute(AsyncHandshakeClosure* hs_cl, JavaThread* target) {
406   jlong start_time_ns = os::javaTimeNanos();
407   AsyncHandshakeOperation* op = new AsyncHandshakeOperation(hs_cl, target, start_time_ns);
408 
409   guarantee(target != nullptr, "must be");
410 
411   Thread* current = Thread::current();
412   if (current != target) {
413     // Another thread is handling the request and it must be protecting
414     // the target.
415     guarantee(Thread::is_JavaThread_protected_by_TLH(target),
416               "missing ThreadsListHandle in calling context.");
417   }
418   // Implied else:
419   // The target is handling the request itself so it can't be dead.
420 
421   target->handshake_state()->add_operation(op);
422 }
423 
424 HandshakeState::HandshakeState(JavaThread* target) :
425   _handshakee(target),
426   _queue(),
427   _lock(Monitor::nosafepoint, "HandshakeState_lock"),
428   _active_handshaker(),

429   _suspended(false),
430   _async_suspend_handshake(false)
431 {
432 }
433 
434 void HandshakeState::add_operation(HandshakeOperation* op) {
435   // Adds are done lock free and so is arming.
436   _queue.push(op);
437   SafepointMechanism::arm_local_poll_release(_handshakee);
438 }
439 
440 bool HandshakeState::operation_pending(HandshakeOperation* op) {
441   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
442   MatchOp mo(op);
443   return _queue.contains(mo);
444 }
445 
446 static bool no_suspend_filter(HandshakeOperation* op) {
447   return !op->is_suspend();
448 }
449 
450 HandshakeOperation* HandshakeState::get_op_for_self(bool allow_suspend) {
451   assert(_handshakee == Thread::current(), "Must be called by self");
452   assert(_lock.owned_by_self(), "Lock must be held");
453   if (allow_suspend) {
454     return _queue.peek();
455   } else {
456     return _queue.peek(no_suspend_filter);
457   }
458 }
459 
460 static bool non_self_queue_filter(HandshakeOperation* op) {
461   return !op->is_async();



462 }
463 
464 bool HandshakeState::have_non_self_executable_operation() {
465   assert(_handshakee != Thread::current(), "Must not be called by self");
466   assert(_lock.owned_by_self(), "Lock must be held");
467   return _queue.contains(non_self_queue_filter);
468 }
469 
470 bool HandshakeState::has_a_non_suspend_operation() {
471   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
472   return _queue.contains(no_suspend_filter);
473 }
474 
475 HandshakeOperation* HandshakeState::get_op() {
476   assert(_handshakee != Thread::current(), "Must not be called by self");
477   assert(_lock.owned_by_self(), "Lock must be held");
478   return _queue.peek(non_self_queue_filter);
479 };
480 
481 void HandshakeState::remove_op(HandshakeOperation* op) {
482   assert(_lock.owned_by_self(), "Lock must be held");
483   MatchOp mo(op);
484   HandshakeOperation* ret = _queue.pop(mo);
485   assert(ret == op, "Popped op must match requested op");
486 };
487 
488 bool HandshakeState::process_by_self(bool allow_suspend) {
489   assert(Thread::current() == _handshakee, "should call from _handshakee");
490   assert(!_handshakee->is_terminated(), "should not be a terminated thread");
491   assert(_handshakee->thread_state() != _thread_blocked, "should not be in a blocked state");
492   assert(_handshakee->thread_state() != _thread_in_native, "should not be in native");
493 
494   ThreadInVMForHandshake tivm(_handshakee);
495   // Handshakes cannot safely safepoint.
496   // The exception to this rule is the asynchronous suspension handshake.
497   // It by-passes the NSV by manually doing the transition.
498   NoSafepointVerifier nsv;
499 
500   while (has_operation()) {
501     MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
502 
503     HandshakeOperation* op = get_op_for_self(allow_suspend);
504     if (op != NULL) {
505       assert(op->_target == NULL || op->_target == Thread::current(), "Wrong thread");
506       bool async = op->is_async();
507       log_trace(handshake)("Proc handshake %s " INTPTR_FORMAT " on " INTPTR_FORMAT " by self",
508                            async ? "asynchronous" : "synchronous", p2i(op), p2i(_handshakee));
509       op->prepare(_handshakee, _handshakee);
510       if (!async) {
511         HandleMark hm(_handshakee);
512         PreserveExceptionMark pem(_handshakee);
513         op->do_handshake(_handshakee); // acquire, op removed after
514         remove_op(op);
515       } else {
516         // An asynchronous handshake may put the JavaThread in blocked state (safepoint safe).
517         // The destructor ~PreserveExceptionMark touches the exception oop so it must not be executed,
518         // since a safepoint may be in-progress when returning from the async handshake.
519         op->do_handshake(_handshakee); // acquire, op removed after
520         remove_op(op);
521         log_handshake_info(((AsyncHandshakeOperation*)op)->start_time(), op->name(), 1, 0, "asynchronous");
522         delete op;
523         return true; // Must check for safepoints
524       }
525     } else {
526       return false;
527     }
528   }
529   return false;
530 }
531 
532 bool HandshakeState::can_process_handshake() {
533   // handshake_safe may only be called with polls armed.
534   // Handshaker controls this by first claiming the handshake via claim_handshake().
535   return SafepointSynchronize::handshake_safe(_handshakee);
536 }
537 
538 bool HandshakeState::possibly_can_process_handshake() {
539   // Note that this method is allowed to produce false positives.
540   if (_handshakee->is_terminated()) {
541     return true;
542   }
543   switch (_handshakee->thread_state()) {
544   case _thread_in_native:
545     // native threads are safe if they have no java stack or have walkable stack
546     return !_handshakee->has_last_Java_frame() || _handshakee->frame_anchor()->walkable();
547 
548   case _thread_blocked:
549     return true;
550 
551   default:
552     return false;
553   }
554 }
555 
556 bool HandshakeState::claim_handshake() {
557   if (!_lock.try_lock()) {
558     return false;
559   }
560   // Operations are added lock free and then the poll is armed.
561   // If all handshake operations for the handshakee are finished and someone
562   // just adds an operation we may see it here. But if the handshakee is not
563   // armed yet it is not safe to proceed.
564   if (have_non_self_executable_operation()) {
565     OrderAccess::loadload(); // Matches the implicit storestore in add_operation()
566     if (SafepointMechanism::local_poll_armed(_handshakee)) {
567       return true;
568     }
569   }
570   _lock.unlock();
571   return false;
572 }
573 
574 HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* match_op) {
575   if (!has_operation()) {
576     // JT has already cleared its handshake
577     return HandshakeState::_no_operation;
578   }
579 
580   if (!possibly_can_process_handshake()) {
581     // JT is observed in an unsafe state, it must notice the handshake itself
582     return HandshakeState::_not_safe;
583   }
584 
585   // Claim the mutex if there still an operation to be executed.
586   if (!claim_handshake()) {
587     return HandshakeState::_claim_failed;
588   }
589 
590   // If we own the mutex at this point and while owning the mutex we
591   // can observe a safe state the thread cannot possibly continue without
592   // getting caught by the mutex.
593   if (!can_process_handshake()) {
594     _lock.unlock();
595     return HandshakeState::_not_safe;
596   }
597 
598   Thread* current_thread = Thread::current();
599 
600   HandshakeOperation* op = get_op();
601 
602   assert(op != NULL, "Must have an op");
603   assert(SafepointMechanism::local_poll_armed(_handshakee), "Must be");
604   assert(op->_target == NULL || _handshakee == op->_target, "Wrong thread");
605 
606   log_trace(handshake)("Processing handshake " INTPTR_FORMAT " by %s(%s)", p2i(op),
607                        op == match_op ? "handshaker" : "cooperative",
608                        current_thread->is_VM_thread() ? "VM Thread" : "JavaThread");
609 
610   op->prepare(_handshakee, current_thread);
611 
612   set_active_handshaker(current_thread);
613   op->do_handshake(_handshakee); // acquire, op removed after
614   set_active_handshaker(NULL);
615   remove_op(op);
616 
617   _lock.unlock();
618 
619   log_trace(handshake)("%s(" INTPTR_FORMAT ") executed an op for JavaThread: " INTPTR_FORMAT " %s target op: " INTPTR_FORMAT,
620                        current_thread->is_VM_thread() ? "VM Thread" : "JavaThread",
621                        p2i(current_thread), p2i(_handshakee),
622                        op == match_op ? "including" : "excluding", p2i(match_op));
623 
624   return op == match_op ? HandshakeState::_succeeded : HandshakeState::_processed;
625 }
626 
627 void HandshakeState::do_self_suspend() {
628   assert(Thread::current() == _handshakee, "should call from _handshakee");
629   assert(_lock.owned_by_self(), "Lock must be held");
630   assert(!_handshakee->has_last_Java_frame() || _handshakee->frame_anchor()->walkable(), "should have walkable stack");
631   assert(_handshakee->thread_state() == _thread_blocked, "Caller should have transitioned to _thread_blocked");
632 
633   while (is_suspended()) {

634     log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended", p2i(_handshakee));
635     _lock.wait_without_safepoint_check();
636   }
637   log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " resumed", p2i(_handshakee));
638 }
639 
640 // This is the closure that prevents a suspended JavaThread from
641 // escaping the suspend request.
642 class ThreadSelfSuspensionHandshake : public AsyncHandshakeClosure {
643  public:
644   ThreadSelfSuspensionHandshake() : AsyncHandshakeClosure("ThreadSelfSuspensionHandshake") {}
645   void do_thread(Thread* thr) {
646     JavaThread* current = JavaThread::cast(thr);
647     assert(current == Thread::current(), "Must be self executed.");
648     JavaThreadState jts = current->thread_state();
649 
650     current->set_thread_state(_thread_blocked);
651     current->handshake_state()->do_self_suspend();
652     current->set_thread_state(jts);
653     current->handshake_state()->set_async_suspend_handshake(false);
654   }
655   virtual bool is_suspend() { return true; }
656 };
657 
658 bool HandshakeState::suspend_with_handshake() {
659   assert(_handshakee->threadObj() != NULL, "cannot suspend with a NULL threadObj");

660   if (_handshakee->is_exiting()) {
661     log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " exiting", p2i(_handshakee));
662     return false;
663   }



664   if (has_async_suspend_handshake()) {
665     if (is_suspended()) {
666       // Target is already suspended.
667       log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " already suspended", p2i(_handshakee));
668       return false;
669     } else {
670       // Target is going to wake up and leave suspension.
671       // Let's just stop the thread from doing that.
672       log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " re-suspended", p2i(_handshakee));
673       set_suspended(true);
674       return true;







675     }
676   }
677   // no suspend request
678   assert(!is_suspended(), "cannot be suspended without a suspend request");
679   // Thread is safe, so it must execute the request, thus we can count it as suspended
680   // from this point.
681   set_suspended(true);








682   set_async_suspend_handshake(true);
683   log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended, arming ThreadSuspension", p2i(_handshakee));
684   ThreadSelfSuspensionHandshake* ts = new ThreadSelfSuspensionHandshake();
685   Handshake::execute(ts, _handshakee);
686   return true;
687 }
688 
689 // This is the closure that synchronously honors the suspend request.
690 class SuspendThreadHandshake : public HandshakeClosure {
691   bool _did_suspend;

692 public:
693   SuspendThreadHandshake() : HandshakeClosure("SuspendThread"), _did_suspend(false) {}
694   void do_thread(Thread* thr) {
695     JavaThread* target = JavaThread::cast(thr);
696     _did_suspend = target->handshake_state()->suspend_with_handshake();
697   }
698   bool did_suspend() { return _did_suspend; }
699 };
700 
701 bool HandshakeState::suspend() {

702   JavaThread* self = JavaThread::current();
703   if (_handshakee == self) {
704     // If target is the current thread we can bypass the handshake machinery
705     // and just suspend directly
706     ThreadBlockInVM tbivm(self);
707     MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
708     set_suspended(true);
709     do_self_suspend();
710     return true;
711   } else {
712     SuspendThreadHandshake st;
713     Handshake::execute(&st, _handshakee);
714     return st.did_suspend();
715   }
716 }
717 
718 bool HandshakeState::resume() {
719   if (!is_suspended()) {
720     return false;
721   }
722   MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
723   if (!is_suspended()) {
724     assert(!_handshakee->is_suspended(), "cannot be suspended without a suspend request");
725     return false;
726   }
727   // Resume the thread.
728   set_suspended(false);
729   _lock.notify();
730   return true;
731 }






























--- EOF ---