< prev index next > src/hotspot/share/oops/stackChunkOop.inline.hpp
Print this page
#include "oops/instanceStackChunkKlass.inline.hpp"
#include "runtime/continuationJavaClasses.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
+ #include "runtime/objectMonitor.hpp"
#include "runtime/registerMap.hpp"
#include "runtime/smallRegisterMap.inline.hpp"
#include "utilities/macros.hpp"
#include CPU_HEADER_INLINE(stackChunkOop)
inline void stackChunkOopDesc::set_max_thawing_size(int value) {
assert(value >= 0, "size must be >= 0");
jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value);
}
+ inline uint8_t stackChunkOopDesc::lockstack_size() const { return jdk_internal_vm_StackChunk::lockStackSize(as_oop()); }
+ inline void stackChunkOopDesc::set_lockstack_size(uint8_t value) { jdk_internal_vm_StackChunk::set_lockStackSize(this, value); }
+
+ inline ObjectWaiter* stackChunkOopDesc::object_waiter() const { return (ObjectWaiter*)jdk_internal_vm_StackChunk::objectWaiter(as_oop()); }
+ inline void stackChunkOopDesc::set_object_waiter(ObjectWaiter* obj) { jdk_internal_vm_StackChunk::set_objectWaiter(this, (address)obj); }
+
inline oop stackChunkOopDesc::cont() const {
if (UseZGC && !ZGenerational) {
assert(!UseCompressedOops, "Non-generational ZGC does not support compressed oops");
// The state of the cont oop is used by XCollectedHeap::requires_barriers,
// to determine the age of the stackChunkOopDesc. For that to work, it is
}
inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
uint32_t flags = this->flags();
set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
}
inline void stackChunkOopDesc::clear_flags() {
set_flags(0);
}
inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
inline void stackChunkOopDesc::set_has_mixed_frames(bool value) {
! assert((flags() & ~FLAG_HAS_INTERPRETED_FRAMES) == 0, "other flags should not be set");
set_flag(FLAG_HAS_INTERPRETED_FRAMES, value);
}
inline bool stackChunkOopDesc::is_gc_mode() const { return is_flag(FLAG_GC_MODE); }
inline bool stackChunkOopDesc::is_gc_mode_acquire() const { return is_flag_acquire(FLAG_GC_MODE); }
inline void stackChunkOopDesc::set_gc_mode(bool value) { set_flag(FLAG_GC_MODE, value); }
inline bool stackChunkOopDesc::has_bitmap() const { return is_flag(FLAG_HAS_BITMAP); }
}
inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
uint32_t flags = this->flags();
set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
}
+ inline void stackChunkOopDesc::clear_flags(uint8_t flag) {
+ uint32_t flags = this->flags();
+ set_flags((uint8_t)(flags &= ~flag));
+ }
inline void stackChunkOopDesc::clear_flags() {
set_flags(0);
}
inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
inline void stackChunkOopDesc::set_has_mixed_frames(bool value) {
! assert((flags() & ~(FLAG_HAS_INTERPRETED_FRAMES | FLAGS_PREEMPTED)) == 0, "other flags should not be set");
set_flag(FLAG_HAS_INTERPRETED_FRAMES, value);
}
+ inline void stackChunkOopDesc::set_preempt_kind(int freeze_kind) {
+ assert((flags() & FLAGS_PREEMPTED) == 0, "");
+ assert(freeze_kind == freeze_on_monitorenter || freeze_kind == freeze_on_wait, "");
+ uint8_t flag = freeze_kind == freeze_on_monitorenter ? FLAG_PREEMPTED_MONITORENTER : FLAG_PREEMPTED_WAIT;
+ set_flag(flag, true);
+ }
+
+ inline int stackChunkOopDesc::get_and_clear_preempt_kind() {
+ assert((is_flag(FLAG_PREEMPTED_MONITORENTER) && !is_flag(FLAG_PREEMPTED_WAIT))
+ || (is_flag(FLAG_PREEMPTED_WAIT) && !is_flag(FLAG_PREEMPTED_MONITORENTER)), "");
+ int kind = is_flag(FLAG_PREEMPTED_MONITORENTER) ? freeze_on_monitorenter : freeze_on_wait;
+ clear_flags(FLAGS_PREEMPTED);
+ return kind;
+ }
+
+ inline ObjectMonitor* stackChunkOopDesc::current_pending_monitor() const {
+ ObjectWaiter* waiter = object_waiter();
+ if (waiter != nullptr && (waiter->is_monitorenter() || (waiter->is_wait() && (waiter->at_reenter() || waiter->notified())))) {
+ return waiter->monitor();
+ }
+ return nullptr;
+ }
+
+ inline ObjectMonitor* stackChunkOopDesc::current_waiting_monitor() const {
+ ObjectWaiter* waiter = object_waiter();
+ return waiter != nullptr && waiter->is_wait() ? waiter->monitor() : nullptr;
+ }
+
+ inline bool stackChunkOopDesc::has_lockstack() const { return is_flag(FLAG_HAS_LOCKSTACK); }
+ inline void stackChunkOopDesc::set_has_lockstack(bool value) { set_flag(FLAG_HAS_LOCKSTACK, value); }
+
inline bool stackChunkOopDesc::is_gc_mode() const { return is_flag(FLAG_GC_MODE); }
inline bool stackChunkOopDesc::is_gc_mode_acquire() const { return is_flag_acquire(FLAG_GC_MODE); }
inline void stackChunkOopDesc::set_gc_mode(bool value) { set_flag(FLAG_GC_MODE, value); }
inline bool stackChunkOopDesc::has_bitmap() const { return is_flag(FLAG_HAS_BITMAP); }
f.handle_deopted();
}
do_barriers0<barrier>(f, map);
}
+ template <typename OopT, class StackChunkLockStackClosureType>
+ inline void stackChunkOopDesc::iterate_lockstack(StackChunkLockStackClosureType* closure) {
+ assert(LockingMode == LM_LIGHTWEIGHT, "");
+ int cnt = lockstack_size();
+ intptr_t* lockstart_addr = start_address();
+ for (int i = 0; i < cnt; i++) {
+ closure->do_oop((OopT*)&lockstart_addr[i]);
+ }
+ }
+
template <class StackChunkFrameClosureType>
inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure)
: iterate_stack<ChunkFrames::CompiledOnly>(closure);
}
RegisterMap full_map(nullptr,
RegisterMap::UpdateMap::include,
RegisterMap::ProcessFrames::skip,
RegisterMap::WalkContinuation::include);
full_map.set_include_argument_oops(false);
f.next(&full_map);
!
- assert(!f.is_done(), "");
- assert(f.is_compiled(), "");
should_continue = closure->do_frame(f, &full_map);
! f.next(map);
- f.handle_deopted(); // the stub caller might be deoptimized (as it's not at a call)
}
assert(!f.is_stub(), "");
for(; should_continue && !f.is_done(); f.next(map)) {
if (frame_kind == ChunkFrames::Mixed) {
RegisterMap full_map(nullptr,
RegisterMap::UpdateMap::include,
RegisterMap::ProcessFrames::skip,
RegisterMap::WalkContinuation::include);
full_map.set_include_argument_oops(false);
+ closure->do_frame(f, map);
f.next(&full_map);
! if (f.is_done()) return;
should_continue = closure->do_frame(f, &full_map);
! f.next(&map);
}
assert(!f.is_stub(), "");
for(; should_continue && !f.is_done(); f.next(map)) {
if (frame_kind == ChunkFrames::Mixed) {
inline MemRegion stackChunkOopDesc::range() {
return MemRegion((HeapWord*)this, size());
}
inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
! assert(fr.is_compiled_frame() || fr.cb()->is_safepoint_stub(), "");
assert(is_in_chunk(fr.unextended_sp()), "");
intptr_t* base = fr.real_fp(); // equal to the caller's sp
intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
assert(base > loc, "");
inline MemRegion stackChunkOopDesc::range() {
return MemRegion((HeapWord*)this, size());
}
inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
! assert(fr.is_compiled_frame() || fr.cb()->is_runtime_stub(), "");
assert(is_in_chunk(fr.unextended_sp()), "");
intptr_t* base = fr.real_fp(); // equal to the caller's sp
intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
assert(base > loc, "");
< prev index next >