1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_RUNTIME_JAVATHREAD_HPP
27 #define SHARE_RUNTIME_JAVATHREAD_HPP
28
29 #include "jni.h"
30 #include "memory/allocation.hpp"
31 #include "oops/oop.hpp"
32 #include "oops/oopHandle.hpp"
33 #include "runtime/continuationEntry.hpp"
34 #include "runtime/frame.hpp"
35 #include "runtime/globals.hpp"
36 #include "runtime/handshake.hpp"
37 #include "runtime/javaFrameAnchor.hpp"
38 #include "runtime/lockStack.hpp"
39 #include "runtime/park.hpp"
40 #include "runtime/safepointMechanism.hpp"
41 #include "runtime/stackOverflow.hpp"
42 #include "runtime/stackWatermarkSet.hpp"
43 #include "runtime/suspendResumeManager.hpp"
44 #include "runtime/thread.hpp"
45 #include "runtime/threadHeapSampler.hpp"
46 #include "runtime/threadIdentifier.hpp"
47 #include "runtime/threadStatisticalInfo.hpp"
48 #include "utilities/exceptions.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/macros.hpp"
51 #if INCLUDE_JFR
52 #include "jfr/support/jfrThreadExtension.hpp"
53 #include "utilities/ticks.hpp"
54 #endif
55
56 class AsyncExceptionHandshakeClosure;
57 class DeoptResourceMark;
58 class InternalOOMEMark;
59 class JNIHandleBlock;
60 class JVMCIRuntime;
61
62 class JvmtiDeferredUpdates;
63 class JvmtiSampledObjectAllocEventCollector;
64 class JvmtiThreadState;
65
66 class Metadata;
67 class ObjectMonitor;
68 class OopHandleList;
69 class OopStorage;
70 class OSThread;
71
72 class ThreadsList;
73 class ThreadSafepointState;
74 class ThreadStatistics;
75
76 class vframeArray;
77 class vframe;
78 class javaVFrame;
79
80 class JavaThread;
81 typedef void (*ThreadFunction)(JavaThread*, TRAPS);
82
83 class EventVirtualThreadPinned;
84
85 class JavaThread: public Thread {
86 friend class VMStructs;
87 friend class JVMCIVMStructs;
88 friend class WhiteBox;
89 friend class ThreadsSMRSupport; // to access _threadObj for exiting_threads_oops_do
90 friend class HandshakeState;
91 friend class Continuation;
92 friend class Threads;
93 friend class ServiceThread; // for deferred OopHandle release access
94 private:
95 bool _on_thread_list; // Is set when this JavaThread is added to the Threads list
96
97 // All references to Java objects managed via OopHandles. These
98 // have to be released by the ServiceThread after the JavaThread has
99 // terminated - see add_oop_handles_for_release().
100 OopHandle _threadObj; // The Java level thread object
101 OopHandle _vthread; // the value returned by Thread.currentThread(): the virtual thread, if mounted, otherwise _threadObj
102 OopHandle _jvmti_vthread;
103 OopHandle _scopedValueCache;
104
105 static OopStorage* _thread_oop_storage;
106
107 #ifdef ASSERT
108 private:
109 int _java_call_counter;
110
111 public:
112 int java_call_counter() { return _java_call_counter; }
113 void inc_java_call_counter() { _java_call_counter++; }
114 void dec_java_call_counter() {
115 assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper");
116 _java_call_counter--;
117 }
118 private: // restore original namespace restriction
119 #endif // ifdef ASSERT
120
121 JavaFrameAnchor _anchor; // Encapsulation of current java frame and it state
122
123 ThreadFunction _entry_point;
124
125 JNIEnv _jni_environment;
126
127 // Deopt support
128 DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization
129
130 nmethod* _deopt_nmethod; // nmethod that is currently being deoptimized
131 vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays
132 vframeArray* _vframe_array_last; // Holds last vFrameArray we popped
133 // Holds updates by JVMTI agents for compiled frames that cannot be performed immediately. They
134 // will be carried out as soon as possible which, in most cases, is just before deoptimization of
135 // the frame, when control returns to it.
136 JvmtiDeferredUpdates* _jvmti_deferred_updates;
137
138 // Handshake value for fixing 6243940. We need a place for the i2c
139 // adapter to store the callee Method*. This value is NEVER live
140 // across a gc point so it does NOT have to be gc'd
141 // The handshake is open ended since we can't be certain that it will
142 // be nulled. This is because we rarely ever see the race and end up
143 // in handle_wrong_method which is the backend of the handshake. See
144 // code in i2c adapters and handle_wrong_method.
145
146 Method* _callee_target;
147
148 // Used to pass back results to the interpreter or generated code running Java code.
149 oop _vm_result_oop; // oop result is GC-preserved
150 Metadata* _vm_result_metadata; // non-oop result
151
152 ObjectMonitor* volatile _current_pending_monitor; // ObjectMonitor this thread is waiting to lock
153 bool _current_pending_monitor_is_from_java; // locking is from Java code
154 ObjectMonitor* volatile _current_waiting_monitor; // ObjectMonitor on which this thread called Object.wait()
155
156 // Active_handles points to a block of handles
157 JNIHandleBlock* _active_handles;
158
159 // One-element thread local free list
160 JNIHandleBlock* _free_handle_block;
161
162 // ID used as owner for inflated monitors. Same as the j.l.Thread.tid of the
163 // current _vthread object, except during creation of the primordial and JNI
164 // attached thread cases where this field can have a temporary value.
165 int64_t _monitor_owner_id;
166
167 public:
168 void set_monitor_owner_id(int64_t id) {
169 ThreadIdentifier::verify_id(id);
170 _monitor_owner_id = id;
171 }
172 int64_t monitor_owner_id() const {
173 int64_t id = _monitor_owner_id;
174 ThreadIdentifier::verify_id(id);
175 return id;
176 }
177
178 // For tracking the heavyweight monitor the thread is pending on.
179 ObjectMonitor* current_pending_monitor() {
180 // Use AtomicAccess::load() to prevent data race between concurrent modification and
181 // concurrent readers, e.g. ThreadService::get_current_contended_monitor().
182 // Especially, reloading pointer from thread after null check must be prevented.
183 return AtomicAccess::load(&_current_pending_monitor);
184 }
185 void set_current_pending_monitor(ObjectMonitor* monitor) {
186 AtomicAccess::store(&_current_pending_monitor, monitor);
187 }
188 void set_current_pending_monitor_is_from_java(bool from_java) {
189 _current_pending_monitor_is_from_java = from_java;
190 }
191 bool current_pending_monitor_is_from_java() {
192 return _current_pending_monitor_is_from_java;
193 }
194 ObjectMonitor* current_waiting_monitor() {
195 // See the comment in current_pending_monitor() above.
196 return AtomicAccess::load(&_current_waiting_monitor);
197 }
198 void set_current_waiting_monitor(ObjectMonitor* monitor) {
199 AtomicAccess::store(&_current_waiting_monitor, monitor);
200 }
201
202 // JNI handle support
203 JNIHandleBlock* active_handles() const { return _active_handles; }
204 void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
205 JNIHandleBlock* free_handle_block() const { return _free_handle_block; }
206 void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
207
208 void push_jni_handle_block();
209 void pop_jni_handle_block();
210
211 private:
212 enum SuspendFlags {
213 // NOTE: avoid using the sign-bit as cc generates different test code
214 // when the sign-bit is used, and sometimes incorrectly - see CR 6398077
215 _obj_deopt = 0x00000008U // suspend for object reallocation and relocking for JVMTI agent
216 };
217
218 // various suspension related flags - atomically updated
219 volatile uint32_t _suspend_flags;
220
221 inline void set_suspend_flag(SuspendFlags f);
222 inline void clear_suspend_flag(SuspendFlags f);
223
224 public:
225 inline void set_obj_deopt_flag();
226 inline void clear_obj_deopt_flag();
227 bool is_obj_deopt_suspend() { return (_suspend_flags & _obj_deopt) != 0; }
228
229 // Asynchronous exception support
230 private:
231 friend class InstallAsyncExceptionHandshakeClosure;
232 friend class AsyncExceptionHandshakeClosure;
233 friend class HandshakeState;
234
235 void handle_async_exception(oop java_throwable);
236 public:
237 void install_async_exception(AsyncExceptionHandshakeClosure* aec = nullptr);
238 bool has_async_exception_condition();
239 inline void set_pending_unsafe_access_error();
240 static void send_async_exception(JavaThread* jt, oop java_throwable);
241
242 class NoAsyncExceptionDeliveryMark : public StackObj {
243 friend JavaThread;
244 JavaThread *_target;
245 inline NoAsyncExceptionDeliveryMark(JavaThread *t);
246 inline ~NoAsyncExceptionDeliveryMark();
247 };
248
249 // Safepoint support
250 public: // Expose _thread_state for SafeFetchInt()
251 volatile JavaThreadState _thread_state;
252 ThreadSafepointState* _safepoint_state; // Holds information about a thread during a safepoint
253 address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened
254 NOT_PRODUCT(bool _requires_cross_modify_fence;) // State used by VerifyCrossModifyFence
255 #ifdef ASSERT
256 // Debug support for checking if code allows safepoints or not.
257 // Safepoints in the VM can happen because of allocation, invoking a VM operation, or blocking on
258 // mutex, or blocking on an object synchronizer (Java locking).
259 // If _no_safepoint_count is non-zero, then an assertion failure will happen in any of
260 // the above cases. The class NoSafepointVerifier is used to set this counter.
261 int _no_safepoint_count; // If 0, thread allow a safepoint to happen
262
263 public:
264 void inc_no_safepoint_count() { _no_safepoint_count++; }
265 void dec_no_safepoint_count() { _no_safepoint_count--; }
266 bool is_in_no_safepoint_scope() { return _no_safepoint_count > 0; }
267 #endif // ASSERT
268 public:
269 // These functions check conditions before possibly going to a safepoint.
270 // including NoSafepointVerifier.
271 void check_for_valid_safepoint_state() NOT_DEBUG_RETURN;
272 void check_possible_safepoint() NOT_DEBUG_RETURN;
273
274 #ifdef ASSERT
275 private:
276 volatile uint64_t _visited_for_critical_count;
277
278 public:
279 void set_visited_for_critical_count(uint64_t safepoint_id) {
280 assert(_visited_for_critical_count == 0, "Must be reset before set");
281 assert((safepoint_id & 0x1) == 1, "Must be odd");
282 _visited_for_critical_count = safepoint_id;
283 }
284 void reset_visited_for_critical_count(uint64_t safepoint_id) {
285 assert(_visited_for_critical_count == safepoint_id, "Was not visited");
286 _visited_for_critical_count = 0;
287 }
288 bool was_visited_for_critical_count(uint64_t safepoint_id) const {
289 return _visited_for_critical_count == safepoint_id;
290 }
291 #endif // ASSERT
292
293 // JavaThread termination support
294 public:
295 enum TerminatedTypes {
296 _not_terminated = 0xDEAD - 3,
297 _thread_exiting, // JavaThread::exit() has been called for this thread
298 _thread_gc_barrier_detached, // thread's GC barrier has been detached
299 _thread_terminated, // JavaThread is removed from thread list
300 _vm_exited // JavaThread is still executing native code, but VM is terminated
301 // only VM_Exit can set _vm_exited
302 };
303
304 private:
305 // In general a JavaThread's _terminated field transitions as follows:
306 //
307 // _not_terminated => _thread_exiting => _thread_gc_barrier_detached => _thread_terminated
308 //
309 // _vm_exited is a special value to cover the case of a JavaThread
310 // executing native code after the VM itself is terminated.
311 //
312 // A JavaThread that fails to JNI attach has these _terminated field transitions:
313 // _not_terminated => _thread_terminated
314 //
315 volatile TerminatedTypes _terminated;
316
317 jint _in_deopt_handler; // count of deoptimization
318 // handlers thread is in
319 volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access
320 volatile bool _throwing_unsafe_access_error; // Thread has faulted and is throwing an exception
321 bool _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was
322 // never locked) when throwing an exception. Used by interpreter only.
323 #if INCLUDE_JVMTI
324 volatile bool _carrier_thread_suspended; // Carrier thread is externally suspended
325 bool _is_in_VTMS_transition; // thread is in virtual thread mount state transition
326 bool _is_disable_suspend; // JVMTI suspend is temporarily disabled; used on current thread only
327 bool _is_in_java_upcall; // JVMTI is doing a Java upcall, so JVMTI events must be hidden
328 bool _VTMS_transition_mark; // used for sync between VTMS transitions and disablers
329 bool _on_monitor_waited_event; // Avoid callee arg processing for enterSpecial when posting waited event
330 ObjectMonitor* _contended_entered_monitor; // Monitor for pending monitor_contended_entered callback
331 #ifdef ASSERT
332 bool _is_VTMS_transition_disabler; // thread currently disabled VTMS transitions
333 #endif
334 #endif
335
336 // JNI attach states:
337 enum JNIAttachStates {
338 _not_attaching_via_jni = 1, // thread is not attaching via JNI
339 _attaching_via_jni, // thread is attaching via JNI
340 _attached_via_jni // thread has attached via JNI
341 };
342
343 // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni.
344 // A native thread that is attaching via JNI starts with a value
345 // of _attaching_via_jni and transitions to _attached_via_jni.
346 volatile JNIAttachStates _jni_attach_state;
347
348 // In scope of an InternalOOMEMark?
349 bool _is_in_internal_oome_mark;
350
351 #if INCLUDE_JVMCI
352 // The _pending_* fields below are used to communicate extra information
353 // from an uncommon trap in JVMCI compiled code to the uncommon trap handler.
354
355 // Communicates the DeoptReason and DeoptAction of the uncommon trap
356 int _pending_deoptimization;
357
358 // Specifies whether the uncommon trap is to bci 0 of a synchronized method
359 // before the monitor has been acquired.
360 bool _pending_monitorenter;
361
362 // Specifies if the DeoptReason for the last uncommon trap was Reason_transfer_to_interpreter
363 bool _pending_transfer_to_interpreter;
364
365 // An id of a speculation that JVMCI compiled code can use to further describe and
366 // uniquely identify the speculative optimization guarded by an uncommon trap.
367 // See JVMCINMethodData::SPECULATION_LENGTH_BITS for further details.
368 jlong _pending_failed_speculation;
369
370 // These fields are mutually exclusive in terms of live ranges.
371 union {
372 // Communicates the pc at which the most recent implicit exception occurred
373 // from the signal handler to a deoptimization stub.
374 address _implicit_exception_pc;
375
376 // Communicates an alternative call target to an i2c stub from a JavaCall .
377 address _alternate_call_target;
378 } _jvmci;
379
380 // The JVMCIRuntime in a JVMCI shared library
381 JVMCIRuntime* _libjvmci_runtime;
382
383 // Support for high precision, thread sensitive counters in JVMCI compiled code.
384 jlong* _jvmci_counters;
385
386 // Fast thread locals for use by JVMCI
387 jlong _jvmci_reserved0;
388 jlong _jvmci_reserved1;
389 oop _jvmci_reserved_oop0;
390
391 // This field is used to keep an nmethod visible to the GC so that it and its contained oops can
392 // be kept alive
393 nmethod* _live_nmethod;
394
395 public:
396 static jlong* _jvmci_old_thread_counters;
397 static void collect_counters(jlong* array, int length);
398
399 bool resize_counters(int current_size, int new_size);
400
401 static bool resize_all_jvmci_counters(int new_size);
402
403 void set_jvmci_reserved_oop0(oop value) {
404 _jvmci_reserved_oop0 = value;
405 }
406
407 oop get_jvmci_reserved_oop0() {
408 return _jvmci_reserved_oop0;
409 }
410
411 void set_jvmci_reserved0(jlong value) {
412 _jvmci_reserved0 = value;
413 }
414
415 jlong get_jvmci_reserved0() {
416 return _jvmci_reserved0;
417 }
418
419 void set_jvmci_reserved1(jlong value) {
420 _jvmci_reserved1 = value;
421 }
422
423 jlong get_jvmci_reserved1() {
424 return _jvmci_reserved1;
425 }
426
427 void set_live_nmethod(nmethod* nm) {
428 assert(_live_nmethod == nullptr, "only one");
429 _live_nmethod = nm;
430 }
431
432 void clear_live_nmethod() {
433 _live_nmethod = nullptr;
434 }
435
436 private:
437 #endif // INCLUDE_JVMCI
438
439 StackOverflow _stack_overflow_state;
440
441 void pretouch_stack();
442
443 // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
444 // used to temp. parsing values into and out of the runtime system during exception handling for compiled
445 // code)
446 volatile oop _exception_oop; // Exception thrown in compiled code
447 volatile address _exception_pc; // PC where exception happened
448 volatile address _exception_handler_pc; // PC for handler of exception
449
450 private:
451 // support for JNI critical regions
452 jint _jni_active_critical; // count of entries into JNI critical region
453
454 // Checked JNI: function name requires exception check
455 char* _pending_jni_exception_check_fn;
456
457 // For deadlock detection.
458 int _depth_first_number;
459
460 // JVMTI PopFrame support
461 // This is set to popframe_pending to signal that top Java frame should be popped immediately
462 int _popframe_condition;
463
464 // If reallocation of scalar replaced objects fails, we throw OOM
465 // and during exception propagation, pop the top
466 // _frames_to_pop_failed_realloc frames, the ones that reference
467 // failed reallocations.
468 int _frames_to_pop_failed_realloc;
469
470 ContinuationEntry* _cont_entry;
471 intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub/upcall_stub/native_wrapper
472 // frame inside the continuation that we know about
473 int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
474
475 ObjectMonitor* _unlocked_inflated_monitor;
476
477 bool _can_call_java;
478
479 // This is the field we poke in the interpreter and native
480 // wrapper (Object.wait) to check for preemption.
481 address _preempt_alternate_return;
482 // When preempting on monitorenter we could have acquired the
483 // monitor after freezing all vthread frames. In that case we
484 // set this field so that in the preempt stub we call thaw again
485 // instead of unmounting.
486 bool _preemption_cancelled;
487 // For Object.wait() we set this field to know if we need to
488 // throw IE at the end of thawing before returning to Java.
489 bool _pending_interrupted_exception;
490 // We allow preemption on some klass initialization calls.
491 // We use this boolean to mark such calls.
492 bool _at_preemptable_init;
493
494 public:
495 bool preemption_cancelled() { return _preemption_cancelled; }
496 void set_preemption_cancelled(bool b) { _preemption_cancelled = b; }
497
498 bool pending_interrupted_exception() { return _pending_interrupted_exception; }
499 void set_pending_interrupted_exception(bool b) { _pending_interrupted_exception = b; }
500
501 bool preempting() { return _preempt_alternate_return != nullptr; }
502 void set_preempt_alternate_return(address val) { _preempt_alternate_return = val; }
503
504 bool at_preemptable_init() { return _at_preemptable_init; }
505 void set_at_preemptable_init(bool b) { _at_preemptable_init = b; }
506
507 #ifdef ASSERT
508 // Used for extra logging with -Xlog:continuation+preempt
509 InstanceKlass* _preempt_init_klass;
510
511 InstanceKlass* preempt_init_klass() { return _preempt_init_klass; }
512 void set_preempt_init_klass(InstanceKlass* ik) { _preempt_init_klass = ik; }
513
514 int _interp_at_preemptable_vmcall_cnt;
515 int interp_at_preemptable_vmcall_cnt() { return _interp_at_preemptable_vmcall_cnt; }
516
517 bool _interp_redoing_vm_call;
518 bool interp_redoing_vm_call() const { return _interp_redoing_vm_call; };
519
520 class AtRedoVMCall : public StackObj {
521 JavaThread* _thread;
522 public:
523 AtRedoVMCall(JavaThread* t) : _thread(t) {
524 assert(!_thread->_interp_redoing_vm_call, "");
525 _thread->_interp_redoing_vm_call = true;
526 _thread->_interp_at_preemptable_vmcall_cnt++;
527 assert(_thread->_interp_at_preemptable_vmcall_cnt > 0, "Unexpected count: %d",
528 _thread->_interp_at_preemptable_vmcall_cnt);
529 }
530 ~AtRedoVMCall() {
531 assert(_thread->_interp_redoing_vm_call, "");
532 _thread->_interp_redoing_vm_call = false;
533 _thread->_interp_at_preemptable_vmcall_cnt--;
534 assert(_thread->_interp_at_preemptable_vmcall_cnt >= 0, "Unexpected count: %d",
535 _thread->_interp_at_preemptable_vmcall_cnt);
536 }
537 };
538 #endif // ASSERT
539
540 private:
541 friend class VMThread;
542 friend class ThreadWaitTransition;
543 friend class VM_Exit;
544
545 // Stack watermark barriers.
546 StackWatermarks _stack_watermarks;
547
548 public:
549 inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
550
551 public:
552 // Constructor
553 JavaThread(MemTag mem_tag = mtThread); // delegating constructor
554 JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MemTag mem_tag = mtThread);
555 ~JavaThread();
556
557 // Factory method to create a new JavaThread whose attach state is "is attaching"
558 static JavaThread* create_attaching_thread();
559
560 #ifdef ASSERT
561 // verify this JavaThread hasn't be published in the Threads::list yet
562 void verify_not_published();
563 #endif // ASSERT
564
565 StackOverflow* stack_overflow_state() { return &_stack_overflow_state; }
566
567 //JNI functiontable getter/setter for JVMTI jni function table interception API.
568 void set_jni_functions(struct JNINativeInterface_* functionTable) {
569 _jni_environment.functions = functionTable;
570 }
571 struct JNINativeInterface_* get_jni_functions() {
572 return (struct JNINativeInterface_ *)_jni_environment.functions;
573 }
574
575 // This function is called at thread creation to allow
576 // platform specific thread variables to be initialized.
577 void cache_global_variables();
578
579 // Executes Shutdown.shutdown()
580 void invoke_shutdown_hooks();
581
582 // Cleanup on thread exit
583 enum ExitType {
584 normal_exit,
585 jni_detach
586 };
587 void exit(bool destroy_vm, ExitType exit_type = normal_exit);
588
589 void cleanup_failed_attach_current_thread(bool is_daemon);
590
591 class NoJavaCodeMark : public StackObj {
592 friend JavaThread;
593 JavaThread* _target;
594 bool _orig;
595 public:
596 NoJavaCodeMark(JavaThread* t) : _target(t), _orig(t->_can_call_java) {
597 _target->_can_call_java = false;
598 }
599 ~NoJavaCodeMark() {
600 _target->_can_call_java = _orig;
601 }
602 };
603
604 // Testers
605 virtual bool is_Java_thread() const { return true; }
606 virtual bool can_call_java() const { return _can_call_java; }
607
608 virtual bool is_active_Java_thread() const;
609
610 // Thread oop. threadObj() can be null for initial JavaThread
611 // (or for threads attached via JNI)
612 oop threadObj() const;
613 void set_threadOopHandles(oop p);
614 oop vthread() const;
615 void set_vthread(oop p);
616 oop scopedValueCache() const;
617 void set_scopedValueCache(oop p);
618 void clear_scopedValueBindings();
619 oop jvmti_vthread() const;
620 void set_jvmti_vthread(oop p);
621 oop vthread_or_thread() const;
622
623 // Prepare thread and add to priority queue. If a priority is
624 // not specified, use the priority of the thread object. Threads_lock
625 // must be held while this function is called.
626 void prepare(jobject jni_thread, ThreadPriority prio=NoPriority);
627
628 void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; }
629 address saved_exception_pc() { return _saved_exception_pc; }
630
631 ThreadFunction entry_point() const { return _entry_point; }
632
633 // Allocates a new Java level thread object for this thread. thread_name may be null.
634 void allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS);
635
636 // Last frame anchor routines
637
638 JavaFrameAnchor* frame_anchor(void) { return &_anchor; }
639
640 // last_Java_sp
641 bool has_last_Java_frame() const { return _anchor.has_last_Java_frame(); }
642 intptr_t* last_Java_sp() const { return _anchor.last_Java_sp(); }
643
644 // last_Java_pc
645
646 address last_Java_pc(void) { return _anchor.last_Java_pc(); }
647
648 // Safepoint support
649 inline JavaThreadState thread_state() const;
650 inline void set_thread_state(JavaThreadState s);
651 inline void set_thread_state_fence(JavaThreadState s); // fence after setting thread state
652 inline ThreadSafepointState* safepoint_state() const;
653 inline void set_safepoint_state(ThreadSafepointState* state);
654 inline bool is_at_poll_safepoint();
655
656 // JavaThread termination and lifecycle support:
657 void smr_delete();
658 bool on_thread_list() const { return _on_thread_list; }
659 void set_on_thread_list() { _on_thread_list = true; }
660
661 // thread has called JavaThread::exit(), thread's GC barrier is detached
662 // or thread is terminated
663 bool is_exiting() const;
664 // thread's GC barrier is NOT detached and thread is NOT terminated
665 bool is_oop_safe() const;
666 // thread is terminated (no longer on the threads list); the thread must
667 // be protected by a ThreadsListHandle to avoid potential crashes.
668 bool check_is_terminated(TerminatedTypes l_terminated) const {
669 return l_terminated == _thread_terminated || l_terminated == _vm_exited;
670 }
671 bool is_terminated() const;
672 void set_terminated(TerminatedTypes t);
673
674 void block_if_vm_exited();
675
676 bool doing_unsafe_access() { return _doing_unsafe_access; }
677 void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; }
678
679 bool is_throwing_unsafe_access_error() { return _throwing_unsafe_access_error; }
680 void set_throwing_unsafe_access_error(bool val) { _throwing_unsafe_access_error = val; }
681
682 bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; }
683 void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
684
685 SafepointMechanism::ThreadData* poll_data() { return &_poll_data; }
686
687 static ByteSize polling_word_offset() {
688 ByteSize offset = byte_offset_of(Thread, _poll_data) +
689 byte_offset_of(SafepointMechanism::ThreadData, _polling_word);
690 // At least on x86_64, safepoint polls encode the offset as disp8 imm.
691 assert(in_bytes(offset) < 128, "Offset >= 128");
692 return offset;
693 }
694
695 static ByteSize polling_page_offset() {
696 ByteSize offset = byte_offset_of(Thread, _poll_data) +
697 byte_offset_of(SafepointMechanism::ThreadData, _polling_page);
698 // At least on x86_64, safepoint polls encode the offset as disp8 imm.
699 assert(in_bytes(offset) < 128, "Offset >= 128");
700 return offset;
701 }
702
703 void set_requires_cross_modify_fence(bool val) PRODUCT_RETURN NOT_PRODUCT({ _requires_cross_modify_fence = val; })
704
705 // Continuation support
706 ContinuationEntry* last_continuation() const { return _cont_entry; }
707 void set_cont_fastpath(intptr_t* x) { _cont_fastpath = x; }
708 void push_cont_fastpath(intptr_t* sp) { if (sp > _cont_fastpath) _cont_fastpath = sp; }
709 void set_cont_fastpath_thread_state(bool x) { _cont_fastpath_thread_state = (int)x; }
710 intptr_t* raw_cont_fastpath() const { return _cont_fastpath; }
711 bool cont_fastpath() const { return _cont_fastpath == nullptr && _cont_fastpath_thread_state != 0; }
712 bool cont_fastpath_thread_state() const { return _cont_fastpath_thread_state != 0; }
713
714 // Support for SharedRuntime::monitor_exit_helper()
715 ObjectMonitor* unlocked_inflated_monitor() const { return _unlocked_inflated_monitor; }
716 void clear_unlocked_inflated_monitor() {
717 _unlocked_inflated_monitor = nullptr;
718 }
719
720 inline bool is_vthread_mounted() const;
721 inline const ContinuationEntry* vthread_continuation() const;
722
723 private:
724 DEBUG_ONLY(void verify_frame_info();)
725
726 // Support for thread handshake operations
727 HandshakeState _handshake;
728 public:
729 HandshakeState* handshake_state() { return &_handshake; }
730
731 // A JavaThread can always safely operate on it self and other threads
732 // can do it safely if they are the active handshaker.
733 bool is_handshake_safe_for(Thread* th) const {
734 return _handshake.active_handshaker() == th || this == th;
735 }
736
737 // Suspend/resume support for JavaThread
738 // higher-level suspension/resume logic called by the public APIs
739 private:
740 SuspendResumeManager _suspend_resume_manager;
741 public:
742 bool java_suspend(bool register_vthread_SR);
743 bool java_resume(bool register_vthread_SR);
744 bool is_suspended() { return _suspend_resume_manager.is_suspended(); }
745 SuspendResumeManager* suspend_resume_manager() { return &_suspend_resume_manager; }
746
747 // Check for async exception in addition to safepoint.
748 static void check_special_condition_for_native_trans(JavaThread *thread);
749
750 // Synchronize with another thread that is deoptimizing objects of the
751 // current thread, i.e. reverts optimizations based on escape analysis.
752 void wait_for_object_deoptimization();
753
754 #if INCLUDE_JVMTI
755 inline bool set_carrier_thread_suspended();
756 inline bool clear_carrier_thread_suspended();
757
758 bool is_carrier_thread_suspended() const {
759 return AtomicAccess::load(&_carrier_thread_suspended);
760 }
761
762 bool is_in_VTMS_transition() const { return _is_in_VTMS_transition; }
763 void set_is_in_VTMS_transition(bool val);
764
765 bool is_disable_suspend() const { return _is_disable_suspend; }
766 void toggle_is_disable_suspend() { _is_disable_suspend = !_is_disable_suspend; };
767
768 bool is_in_java_upcall() const { return _is_in_java_upcall; }
769 void toggle_is_in_java_upcall() { _is_in_java_upcall = !_is_in_java_upcall; };
770
771 bool VTMS_transition_mark() const { return AtomicAccess::load(&_VTMS_transition_mark); }
772 void set_VTMS_transition_mark(bool val) { AtomicAccess::store(&_VTMS_transition_mark, val); }
773
774 // Temporarily skip posting JVMTI events for safety reasons when executions is in a critical section:
775 // - is in a VTMS transition (_is_in_VTMS_transition)
776 // - is in an interruptLock or similar critical section (_is_disable_suspend)
777 // - JVMTI is making a Java upcall (_is_in_java_upcall)
778 bool should_hide_jvmti_events() const { return _is_in_VTMS_transition || _is_disable_suspend || _is_in_java_upcall; }
779
780 bool on_monitor_waited_event() { return _on_monitor_waited_event; }
781 void set_on_monitor_waited_event(bool val) { _on_monitor_waited_event = val; }
782
783 bool pending_contended_entered_event() { return _contended_entered_monitor != nullptr; }
784 ObjectMonitor* contended_entered_monitor() { return _contended_entered_monitor; }
785 #ifdef ASSERT
786 bool is_VTMS_transition_disabler() const { return _is_VTMS_transition_disabler; }
787 void set_is_VTMS_transition_disabler(bool val);
788 #endif
789 #endif
790
791 void set_contended_entered_monitor(ObjectMonitor* val) NOT_JVMTI_RETURN JVMTI_ONLY({ _contended_entered_monitor = val; })
792
793 // Support for object deoptimization and JFR suspension
794 void handle_special_runtime_exit_condition();
795 bool has_special_runtime_exit_condition() {
796 return (_suspend_flags & _obj_deopt) != 0;
797 }
798
799 // Accessors for vframe array top
800 // The linked list of vframe arrays are sorted on sp. This means when we
801 // unpack the head must contain the vframe array to unpack.
802 void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
803 vframeArray* vframe_array_head() const { return _vframe_array_head; }
804
805 // Side structure for deferring update of java frame locals until deopt occurs
806 JvmtiDeferredUpdates* deferred_updates() const { return _jvmti_deferred_updates; }
807 void set_deferred_updates(JvmtiDeferredUpdates* du) { _jvmti_deferred_updates = du; }
808
809 // These only really exist to make debugging deopt problems simpler
810
811 void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; }
812 vframeArray* vframe_array_last() const { return _vframe_array_last; }
813
814 // The special resourceMark used during deoptimization
815
816 void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; }
817 DeoptResourceMark* deopt_mark(void) { return _deopt_mark; }
818
819 void set_deopt_compiled_method(nmethod* nm) { _deopt_nmethod = nm; }
820 nmethod* deopt_compiled_method() { return _deopt_nmethod; }
821
822 Method* callee_target() const { return _callee_target; }
823 void set_callee_target (Method* x) { _callee_target = x; }
824
825 // Oop results of vm runtime calls
826 oop vm_result_oop() const { return _vm_result_oop; }
827 void set_vm_result_oop(oop x) { _vm_result_oop = x; }
828
829 void set_vm_result_metadata(Metadata* x) { _vm_result_metadata = x; }
830
831 // Is thread in scope of an InternalOOMEMark?
832 bool is_in_internal_oome_mark() const { return _is_in_internal_oome_mark; }
833 void set_is_in_internal_oome_mark(bool b) { _is_in_internal_oome_mark = b; }
834
835 #if INCLUDE_JVMCI
836 jlong pending_failed_speculation() const { return _pending_failed_speculation; }
837 void set_pending_monitorenter(bool b) { _pending_monitorenter = b; }
838 void set_pending_deoptimization(int reason) { _pending_deoptimization = reason; }
839 void set_pending_failed_speculation(jlong failed_speculation) { _pending_failed_speculation = failed_speculation; }
840 void set_pending_transfer_to_interpreter(bool b) { _pending_transfer_to_interpreter = b; }
841 void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == nullptr, "must be"); _jvmci._alternate_call_target = a; }
842 void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == nullptr, "must be"); _jvmci._implicit_exception_pc = a; }
843
844 JVMCIRuntime* libjvmci_runtime() const { return _libjvmci_runtime; }
845 void set_libjvmci_runtime(JVMCIRuntime* rt) {
846 assert((_libjvmci_runtime == nullptr && rt != nullptr) || (_libjvmci_runtime != nullptr && rt == nullptr), "must be");
847 _libjvmci_runtime = rt;
848 }
849 #endif // INCLUDE_JVMCI
850
851 // Exception handling for compiled methods
852 oop exception_oop() const;
853 address exception_pc() const { return _exception_pc; }
854
855 void set_exception_oop(oop o);
856 void set_exception_pc(address a) { _exception_pc = a; }
857 void set_exception_handler_pc(address a) { _exception_handler_pc = a; }
858
859 void clear_exception_oop_and_pc() {
860 set_exception_oop(nullptr);
861 set_exception_pc(nullptr);
862 }
863
864 // Check if address is in the usable part of the stack (excludes protected
865 // guard pages). Can be applied to any thread and is an approximation for
866 // using is_in_live_stack when the query has to happen from another thread.
867 bool is_in_usable_stack(address adr) const {
868 return is_in_stack_range_incl(adr, _stack_overflow_state.stack_reserved_zone_base());
869 }
870
871 // Misc. accessors/mutators
872 static ByteSize scopedValueCache_offset() { return byte_offset_of(JavaThread, _scopedValueCache); }
873
874 // For assembly stub generation
875 static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj); }
876 static ByteSize vthread_offset() { return byte_offset_of(JavaThread, _vthread); }
877 static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment); }
878 static ByteSize pending_jni_exception_check_fn_offset() {
879 return byte_offset_of(JavaThread, _pending_jni_exception_check_fn);
880 }
881 static ByteSize last_Java_sp_offset() {
882 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset();
883 }
884 static ByteSize last_Java_pc_offset() {
885 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset();
886 }
887 static ByteSize frame_anchor_offset() {
888 return byte_offset_of(JavaThread, _anchor);
889 }
890 static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target); }
891 static ByteSize vm_result_oop_offset() { return byte_offset_of(JavaThread, _vm_result_oop); }
892 static ByteSize vm_result_metadata_offset() { return byte_offset_of(JavaThread, _vm_result_metadata); }
893 static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); }
894 static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); }
895 static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); }
896 #if INCLUDE_JVMCI
897 static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); }
898 static ByteSize pending_monitorenter_offset() { return byte_offset_of(JavaThread, _pending_monitorenter); }
899 static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); }
900 static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); }
901 static ByteSize jvmci_counters_offset() { return byte_offset_of(JavaThread, _jvmci_counters); }
902 #endif // INCLUDE_JVMCI
903 static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); }
904 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); }
905 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); }
906
907 static ByteSize active_handles_offset() { return byte_offset_of(JavaThread, _active_handles); }
908
909 // StackOverflow offsets
910 static ByteSize stack_overflow_limit_offset() {
911 return byte_offset_of(JavaThread, _stack_overflow_state._stack_overflow_limit);
912 }
913 static ByteSize stack_guard_state_offset() {
914 return byte_offset_of(JavaThread, _stack_overflow_state._stack_guard_state);
915 }
916 static ByteSize reserved_stack_activation_offset() {
917 return byte_offset_of(JavaThread, _stack_overflow_state._reserved_stack_activation);
918 }
919 static ByteSize shadow_zone_safe_limit() {
920 return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_safe_limit);
921 }
922 static ByteSize shadow_zone_growth_watermark() {
923 return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
924 }
925
926 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); }
927
928 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
929 static ByteSize should_post_on_exceptions_flag_offset() {
930 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
931 }
932 static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
933 NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset() { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
934
935 static ByteSize monitor_owner_id_offset() { return byte_offset_of(JavaThread, _monitor_owner_id); }
936
937 static ByteSize cont_entry_offset() { return byte_offset_of(JavaThread, _cont_entry); }
938 static ByteSize cont_fastpath_offset() { return byte_offset_of(JavaThread, _cont_fastpath); }
939 static ByteSize preemption_cancelled_offset() { return byte_offset_of(JavaThread, _preemption_cancelled); }
940 static ByteSize preempt_alternate_return_offset() { return byte_offset_of(JavaThread, _preempt_alternate_return); }
941 DEBUG_ONLY(static ByteSize interp_at_preemptable_vmcall_cnt_offset() { return byte_offset_of(JavaThread, _interp_at_preemptable_vmcall_cnt); })
942 static ByteSize unlocked_inflated_monitor_offset() { return byte_offset_of(JavaThread, _unlocked_inflated_monitor); }
943
944 #if INCLUDE_JVMTI
945 static ByteSize is_in_VTMS_transition_offset() { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
946 static ByteSize is_disable_suspend_offset() { return byte_offset_of(JavaThread, _is_disable_suspend); }
947 #endif
948
949 // Returns the jni environment for this thread
950 JNIEnv* jni_environment() { return &_jni_environment; }
951
952 // Returns the current thread as indicated by the given JNIEnv.
953 // We don't assert it is Thread::current here as that is done at the
954 // external JNI entry points where the JNIEnv is passed into the VM.
955 static JavaThread* thread_from_jni_environment(JNIEnv* env) {
956 JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
957 // We can't normally get here in a thread that has completed its
958 // execution and so "is_terminated", except when the call is from
959 // AsyncGetCallTrace, which can be triggered by a signal at any point in
960 // a thread's lifecycle. A thread is also considered terminated if the VM
961 // has exited, so we have to check this and block in case this is a daemon
962 // thread returning to the VM (the JNI DirectBuffer entry points rely on
963 // this).
964 if (current->is_terminated()) {
965 current->block_if_vm_exited();
966 }
967 return current;
968 }
969
970 // JNI critical regions. These can nest.
971 bool in_critical() { return _jni_active_critical > 0; }
972 bool in_last_critical() { return _jni_active_critical == 1; }
973 inline void enter_critical();
974 void exit_critical() {
975 assert(Thread::current() == this, "this must be current thread");
976 _jni_active_critical--;
977 assert(_jni_active_critical >= 0, "JNI critical nesting problem?");
978 }
979
980 // Atomic version; invoked by a thread other than the owning thread.
981 bool in_critical_atomic() { return AtomicAccess::load(&_jni_active_critical) > 0; }
982
983 // Checked JNI: is the programmer required to check for exceptions, if so specify
984 // which function name. Returning to a Java frame should implicitly clear the
985 // pending check, this is done for Native->Java transitions (i.e. user JNI code).
986 // VM->Java transitions are not cleared, it is expected that JNI code enclosed
987 // within ThreadToNativeFromVM makes proper exception checks (i.e. VM internal).
988 bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != nullptr; }
989 void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = nullptr; }
990 const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; }
991 void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; }
992
993 // For deadlock detection
994 int depth_first_number() { return _depth_first_number; }
995 void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
996
997 public:
998 bool in_deopt_handler() const { return _in_deopt_handler > 0; }
999 void inc_in_deopt_handler() { _in_deopt_handler++; }
1000 void dec_in_deopt_handler() {
1001 assert(_in_deopt_handler > 0, "mismatched deopt nesting");
1002 if (_in_deopt_handler > 0) { // robustness
1003 _in_deopt_handler--;
1004 }
1005 }
1006
1007 private:
1008 void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; }
1009
1010 // factor out low-level mechanics for use in both normal and error cases
1011 const char* get_thread_name_string(char* buf = nullptr, int buflen = 0) const;
1012
1013 public:
1014
1015 // Frame iteration; calls the function f for all frames on the stack
1016 void frames_do(void f(frame*, const RegisterMap*));
1017
1018 // Memory operations
1019 void oops_do_frames(OopClosure* f, NMethodClosure* cf);
1020 void oops_do_no_frames(OopClosure* f, NMethodClosure* cf);
1021
1022 // GC operations
1023 virtual void nmethods_do(NMethodClosure* cf);
1024
1025 // RedefineClasses Support
1026 void metadata_do(MetadataClosure* f);
1027
1028 // Debug method asserting thread states are correct during a handshake operation.
1029 DEBUG_ONLY(void verify_states_for_handshake();)
1030
1031 // Misc. operations
1032 const char* name() const;
1033 const char* name_raw() const;
1034 const char* type_name() const { return "JavaThread"; }
1035 static const char* name_for(oop thread_obj);
1036
1037 void print_on(outputStream* st, bool print_extended_info) const;
1038 void print_on(outputStream* st) const { print_on(st, false); }
1039 void print() const;
1040 void print_thread_state_on(outputStream*) const;
1041 void print_on_error(outputStream* st, char* buf, int buflen) const;
1042 void print_name_on_error(outputStream* st, char* buf, int buflen) const;
1043 void verify();
1044
1045 // Accessing frames
1046 frame last_frame() {
1047 _anchor.make_walkable();
1048 return pd_last_frame();
1049 }
1050 javaVFrame* last_java_vframe(RegisterMap* reg_map) { return last_java_vframe(last_frame(), reg_map); }
1051
1052 frame carrier_last_frame(RegisterMap* reg_map);
1053 javaVFrame* carrier_last_java_vframe(RegisterMap* reg_map) { return last_java_vframe(carrier_last_frame(reg_map), reg_map); }
1054
1055 frame vthread_last_frame();
1056 javaVFrame* vthread_last_java_vframe(RegisterMap* reg_map) { return last_java_vframe(vthread_last_frame(), reg_map); }
1057
1058 frame platform_thread_last_frame(RegisterMap* reg_map);
1059 javaVFrame* platform_thread_last_java_vframe(RegisterMap* reg_map) {
1060 return last_java_vframe(platform_thread_last_frame(reg_map), reg_map);
1061 }
1062
1063 javaVFrame* last_java_vframe(const frame f, RegisterMap* reg_map);
1064
1065 // Returns method at 'depth' java or native frames down the stack
1066 // Used for security checks
1067 Klass* security_get_caller_class(int depth);
1068
1069 // Print stack trace in external format
1070 // These variants print carrier/platform thread information only.
1071 void print_stack_on(outputStream* st);
1072 void print_stack() { print_stack_on(tty); }
1073 // This prints the currently mounted virtual thread.
1074 void print_vthread_stack_on(outputStream* st);
1075 // This prints the active stack: either carrier/platform or virtual.
1076 void print_active_stack_on(outputStream* st);
1077 // Print current stack trace for checked JNI warnings and JNI fatal errors.
1078 // This is the external format from above, but selecting the platform
1079 // or vthread as applicable.
1080 void print_jni_stack();
1081
1082 // Print stack traces in various internal formats
1083 void trace_stack() PRODUCT_RETURN;
1084 void trace_stack_from(vframe* start_vf) PRODUCT_RETURN;
1085 void trace_frames() PRODUCT_RETURN;
1086
1087 // Print an annotated view of the stack frames
1088 void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN;
1089 void validate_frame_layout() {
1090 print_frame_layout(0, true);
1091 }
1092
1093 // Function for testing deoptimization
1094 void deoptimize();
1095 void make_zombies();
1096
1097 void deoptimize_marked_methods();
1098
1099 public:
1100 // Returns the running thread as a JavaThread
1101 static JavaThread* current() {
1102 return JavaThread::cast(Thread::current());
1103 }
1104
1105 // Returns the current thread as a JavaThread, or nullptr if not attached
1106 static inline JavaThread* current_or_null();
1107
1108 // Casts
1109 static JavaThread* cast(Thread* t) {
1110 assert(t->is_Java_thread(), "incorrect cast to JavaThread");
1111 return static_cast<JavaThread*>(t);
1112 }
1113
1114 static const JavaThread* cast(const Thread* t) {
1115 assert(t->is_Java_thread(), "incorrect cast to const JavaThread");
1116 return static_cast<const JavaThread*>(t);
1117 }
1118
1119 // Returns the active Java thread. Do not use this if you know you are calling
1120 // from a JavaThread, as it's slower than JavaThread::current. If called from
1121 // the VMThread, it also returns the JavaThread that instigated the VMThread's
1122 // operation. You may not want that either.
1123 static JavaThread* active();
1124
1125 protected:
1126 virtual void pre_run();
1127 virtual void run();
1128 void thread_main_inner();
1129 virtual void post_run();
1130
1131 public:
1132 // Thread local information maintained by JVMTI.
1133 void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; }
1134 // A JvmtiThreadState is lazily allocated. This jvmti_thread_state()
1135 // getter is used to get this JavaThread's JvmtiThreadState if it has
1136 // one which means null can be returned. JvmtiThreadState::state_for()
1137 // is used to get the specified JavaThread's JvmtiThreadState if it has
1138 // one or it allocates a new JvmtiThreadState for the JavaThread and
1139 // returns it. JvmtiThreadState::state_for() will return null only if
1140 // the specified JavaThread is exiting.
1141 JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; }
1142 static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); }
1143
1144 #if INCLUDE_JVMTI
1145 // Rebind JVMTI thread state from carrier to virtual or from virtual to carrier.
1146 JvmtiThreadState *rebind_to_jvmti_thread_state_of(oop thread_oop);
1147 #endif
1148
1149 // JVMTI PopFrame support
1150 // Setting and clearing popframe_condition
1151 // All of these enumerated values are bits. popframe_pending
1152 // indicates that a PopFrame() has been requested and not yet been
1153 // completed. popframe_processing indicates that that PopFrame() is in
1154 // the process of being completed. popframe_force_deopt_reexecution_bit
1155 // indicates that special handling is required when returning to a
1156 // deoptimized caller.
1157 enum PopCondition {
1158 popframe_inactive = 0x00,
1159 popframe_pending_bit = 0x01,
1160 popframe_processing_bit = 0x02,
1161 popframe_force_deopt_reexecution_bit = 0x04
1162 };
1163 PopCondition popframe_condition() { return (PopCondition) _popframe_condition; }
1164 void set_popframe_condition(PopCondition c) { _popframe_condition = c; }
1165 void set_popframe_condition_bit(PopCondition c) { _popframe_condition |= c; }
1166 void clear_popframe_condition() { _popframe_condition = popframe_inactive; }
1167 static ByteSize popframe_condition_offset() { return byte_offset_of(JavaThread, _popframe_condition); }
1168 bool has_pending_popframe() { return (popframe_condition() & popframe_pending_bit) != 0; }
1169 bool popframe_forcing_deopt_reexecution() { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; }
1170
1171 bool pop_frame_in_process(void) { return ((_popframe_condition & popframe_processing_bit) != 0); }
1172 void set_pop_frame_in_process(void) { _popframe_condition |= popframe_processing_bit; }
1173 void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; }
1174
1175 int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; }
1176 void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; }
1177 void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; }
1178
1179 private:
1180 // Saved incoming arguments to popped frame.
1181 // Used only when popped interpreted frame returns to deoptimized frame.
1182 void* _popframe_preserved_args;
1183 int _popframe_preserved_args_size;
1184
1185 public:
1186 void popframe_preserve_args(ByteSize size_in_bytes, void* start);
1187 void* popframe_preserved_args();
1188 ByteSize popframe_preserved_args_size();
1189 WordSize popframe_preserved_args_size_in_words();
1190 void popframe_free_preserved_args();
1191
1192
1193 private:
1194 JvmtiThreadState *_jvmti_thread_state;
1195
1196 // Used by the interpreter in fullspeed mode for frame pop, method
1197 // entry, method exit and single stepping support. This field is
1198 // only set to non-zero at a safepoint or using a direct handshake
1199 // (see EnterInterpOnlyModeHandshakeClosure).
1200 // It can be set to zero asynchronously to this threads execution (i.e., without
1201 // safepoint/handshake or a lock) so we have to be very careful.
1202 // Accesses by other threads are synchronized using JvmtiThreadState_lock though.
1203 // This field is checked by the interpreter which expects it to be an integer.
1204 int _interp_only_mode;
1205
1206 public:
1207 // used by the interpreter for fullspeed debugging support (see above)
1208 static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); }
1209 bool is_interp_only_mode() { return (_interp_only_mode != 0); }
1210 void set_interp_only_mode(bool val) { _interp_only_mode = val ? 1 : 0; }
1211
1212 // support for cached flag that indicates whether exceptions need to be posted for this thread
1213 // if this is false, we can avoid deoptimizing when events are thrown
1214 // this gets set to reflect whether jvmtiExport::post_exception_throw would actually do anything
1215 private:
1216 int _should_post_on_exceptions_flag;
1217
1218 public:
1219 void set_should_post_on_exceptions_flag(int val) { _should_post_on_exceptions_flag = val; }
1220
1221 private:
1222 ThreadStatistics *_thread_stat;
1223
1224 public:
1225 ThreadStatistics* get_thread_stat() const { return _thread_stat; }
1226
1227 // Return a blocker object for which this thread is blocked parking.
1228 oop current_park_blocker();
1229
1230 private:
1231 static size_t _stack_size_at_create;
1232
1233 public:
1234 static inline size_t stack_size_at_create(void) {
1235 return _stack_size_at_create;
1236 }
1237 static inline void set_stack_size_at_create(size_t value) {
1238 _stack_size_at_create = value;
1239 }
1240
1241 // Machine dependent stuff
1242 #include OS_CPU_HEADER(javaThread)
1243
1244 // JSR166 per-thread parker
1245 private:
1246 Parker _parker;
1247 public:
1248 Parker* parker() { return &_parker; }
1249
1250 public:
1251 // clearing/querying jni attach status
1252 bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; }
1253 bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; }
1254 inline void set_done_attaching_via_jni();
1255
1256 // Stack dump assistance:
1257 // Track the class we want to initialize but for which we have to wait
1258 // on its init_lock() because it is already being initialized.
1259 void set_class_to_be_initialized(InstanceKlass* k);
1260 InstanceKlass* class_to_be_initialized() const;
1261
1262 // The most recent active <clinit> invocation is tracked by this variable.
1263 // The setter returns the previous value, so it can be restored later if needed.
1264 InstanceKlass* set_class_being_initialized(InstanceKlass* k);
1265 InstanceKlass* class_being_initialized() const;
1266
1267 private:
1268 InstanceKlass* _class_to_be_initialized;
1269 InstanceKlass* _class_being_initialized;
1270
1271 // java.lang.Thread.sleep support
1272 ParkEvent * _SleepEvent;
1273
1274 #if INCLUDE_JFR
1275 // Support for jdk.VirtualThreadPinned event
1276 freeze_result _last_freeze_fail_result;
1277 Ticks _last_freeze_fail_time;
1278 #endif
1279
1280 public:
1281 bool sleep(jlong millis);
1282 bool sleep_nanos(jlong nanos);
1283
1284 // java.lang.Thread interruption support
1285 void interrupt();
1286 bool is_interrupted(bool clear_interrupted);
1287
1288 #if INCLUDE_JFR
1289 // Support for jdk.VirtualThreadPinned event
1290 freeze_result last_freeze_fail_result() { return _last_freeze_fail_result; }
1291 Ticks& last_freeze_fail_time() { return _last_freeze_fail_time; }
1292 void set_last_freeze_fail_result(freeze_result result);
1293 #endif
1294 void post_vthread_pinned_event(EventVirtualThreadPinned* event, const char* op, freeze_result result) NOT_JFR_RETURN();
1295
1296
1297 // This is only for use by JVMTI RawMonitorWait. It emulates the actions of
1298 // the Java code in Object::wait which are not present in RawMonitorWait.
1299 bool get_and_clear_interrupted();
1300
1301 private:
1302 LockStack _lock_stack;
1303 OMCache _om_cache;
1304
1305 public:
1306 LockStack& lock_stack() { return _lock_stack; }
1307
1308 static ByteSize lock_stack_offset() { return byte_offset_of(JavaThread, _lock_stack); }
1309 // Those offsets are used in code generators to access the LockStack that is embedded in this
1310 // JavaThread structure. Those accesses are relative to the current thread, which
1311 // is typically in a dedicated register.
1312 static ByteSize lock_stack_top_offset() { return lock_stack_offset() + LockStack::top_offset(); }
1313 static ByteSize lock_stack_base_offset() { return lock_stack_offset() + LockStack::base_offset(); }
1314
1315 static ByteSize om_cache_offset() { return byte_offset_of(JavaThread, _om_cache); }
1316 static ByteSize om_cache_oops_offset() { return om_cache_offset() + OMCache::entries_offset(); }
1317
1318 void om_set_monitor_cache(ObjectMonitor* monitor);
1319 void om_clear_monitor_cache();
1320 ObjectMonitor* om_get_from_monitor_cache(oop obj);
1321
1322 static OopStorage* thread_oop_storage();
1323
1324 static void verify_cross_modify_fence_failure(JavaThread *thread) PRODUCT_RETURN;
1325
1326 // Helper function to create the java.lang.Thread object for a
1327 // VM-internal thread. The thread will have the given name and be
1328 // part of the System ThreadGroup.
1329 static Handle create_system_thread_object(const char* name, TRAPS);
1330
1331 // Helper function to start a VM-internal daemon thread.
1332 // E.g. ServiceThread, NotificationThread, CompilerThread etc.
1333 static void start_internal_daemon(JavaThread* current, JavaThread* target,
1334 Handle thread_oop, ThreadPriority prio);
1335
1336 // Helper function to do vm_exit_on_initialization for osthread
1337 // resource allocation failure.
1338 static void vm_exit_on_osthread_failure(JavaThread* thread);
1339
1340 // Deferred OopHandle release support
1341 private:
1342 // List of OopHandles to be released - guarded by the Service_lock.
1343 static OopHandleList* _oop_handle_list;
1344 // Add our OopHandles to the list for the service thread to release.
1345 void add_oop_handles_for_release();
1346 // Called by the ServiceThread to release the OopHandles.
1347 static void release_oop_handles();
1348 // Called by the ServiceThread to poll if there are any OopHandles to release.
1349 // Called when holding the Service_lock.
1350 static bool has_oop_handles_to_release() {
1351 return _oop_handle_list != nullptr;
1352 }
1353 };
1354
1355 inline JavaThread* JavaThread::current_or_null() {
1356 Thread* current = Thread::current_or_null();
1357 return current != nullptr ? JavaThread::cast(current) : nullptr;
1358 }
1359
1360 class UnlockFlagSaver {
1361 private:
1362 JavaThread* _thread;
1363 bool _do_not_unlock;
1364 public:
1365 UnlockFlagSaver(JavaThread* t) {
1366 _thread = t;
1367 _do_not_unlock = t->do_not_unlock_if_synchronized();
1368 t->set_do_not_unlock_if_synchronized(false);
1369 }
1370 ~UnlockFlagSaver() {
1371 _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1372 }
1373 };
1374
1375 class JNIHandleMark : public StackObj {
1376 JavaThread* _thread;
1377 public:
1378 JNIHandleMark(JavaThread* thread) : _thread(thread) {
1379 thread->push_jni_handle_block();
1380 }
1381 ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1382 };
1383
1384 class NoPreemptMark {
1385 ContinuationEntry* _ce;
1386 bool _unpin;
1387 public:
1388 NoPreemptMark(JavaThread* thread, bool ignore_mark = false) : _ce(thread->last_continuation()), _unpin(false) {
1389 if (_ce != nullptr && !ignore_mark) _unpin = _ce->pin();
1390 }
1391 ~NoPreemptMark() { if (_unpin) _ce->unpin(); }
1392 };
1393
1394 class ThreadOnMonitorWaitedEvent {
1395 JavaThread* _thread;
1396 public:
1397 ThreadOnMonitorWaitedEvent(JavaThread* thread) : _thread(thread) {
1398 JVMTI_ONLY(_thread->set_on_monitor_waited_event(true);)
1399 }
1400 ~ThreadOnMonitorWaitedEvent() { JVMTI_ONLY(_thread->set_on_monitor_waited_event(false);) }
1401 };
1402
1403 class ThreadInClassInitializer : public StackObj {
1404 JavaThread* _thread;
1405 InstanceKlass* _previous;
1406 public:
1407 ThreadInClassInitializer(JavaThread* thread, InstanceKlass* ik) : _thread(thread) {
1408 _previous = _thread->class_being_initialized();
1409 _thread->set_class_being_initialized(ik);
1410 }
1411 ~ThreadInClassInitializer() {
1412 _thread->set_class_being_initialized(_previous);
1413 }
1414 };
1415
1416 class ThrowingUnsafeAccessError : public StackObj {
1417 JavaThread* _thread;
1418 bool _prev;
1419 public:
1420 ThrowingUnsafeAccessError(JavaThread* thread) :
1421 _thread(thread),
1422 _prev(thread->is_throwing_unsafe_access_error()) {
1423 _thread->set_throwing_unsafe_access_error(true);
1424 }
1425 ~ThrowingUnsafeAccessError() {
1426 _thread->set_throwing_unsafe_access_error(_prev);
1427 }
1428 };
1429
1430 #endif // SHARE_RUNTIME_JAVATHREAD_HPP