< prev index next > src/hotspot/share/runtime/lockStack.hpp
Print this page
/*
* Copyright (c) 2022, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
#include "utilities/sizes.hpp"
class JavaThread;
class OopClosure;
class outputStream;
class LockStack {
friend class VMStructs;
! private:
static const int CAPACITY = 8;
// TODO: It would be very useful if JavaThread::lock_stack_offset() and friends were constexpr,
// but this is currently not the case because we're using offset_of() which is non-constexpr,
// GCC would warn about non-standard-layout types if we were using offsetof() (which *is* constexpr).
static const int lock_stack_offset;
#include "utilities/sizes.hpp"
class JavaThread;
class OopClosure;
class outputStream;
+ template<typename>
+ class GrowableArray;
class LockStack {
+ friend class LockStackTest;
friend class VMStructs;
! JVMCI_ONLY(friend class JVMCIVMStructs;)
+ public:
static const int CAPACITY = 8;
+ private:
// TODO: It would be very useful if JavaThread::lock_stack_offset() and friends were constexpr,
// but this is currently not the case because we're using offset_of() which is non-constexpr,
// GCC would warn about non-standard-layout types if we were using offsetof() (which *is* constexpr).
static const int lock_stack_offset;
// The offset of the next element, in bytes, relative to the JavaThread structure.
// We do this instead of a simple index into the array because this allows for
// efficient addressing in generated code.
uint32_t _top;
+ // The _bad_oop_sentinel acts as a sentinel value to elide underflow checks in generated code.
+ // The correct layout is statically asserted in the constructor.
+ const uintptr_t _bad_oop_sentinel = badOopVal;
oop _base[CAPACITY];
// Get the owning thread of this lock-stack.
inline JavaThread* get_thread() const;
// The boundary indicies of the lock-stack.
static uint32_t start_offset();
static uint32_t end_offset();
! // Return true if we have room to push onto this lock-stack, false otherwise.
! inline bool can_push() const;
// Pushes an oop on this lock-stack.
inline void push(oop o);
! // Pops an oop from this lock-stack.
! inline oop pop();
// Removes an oop from an arbitrary location of this lock-stack.
! inline void remove(oop o);
// Tests whether the oop is on this lock-stack.
inline bool contains(oop o) const;
// GC support
inline void oops_do(OopClosure* cl);
// Printing
void print_on(outputStream* st);
};
#endif // SHARE_RUNTIME_LOCKSTACK_HPP
// The boundary indicies of the lock-stack.
static uint32_t start_offset();
static uint32_t end_offset();
! // Returns true if the lock-stack is full. False otherwise.
! inline bool is_full() const;
// Pushes an oop on this lock-stack.
inline void push(oop o);
! // Get the oldest oop from this lock-stack.
! // Precondition: This lock-stack must not be empty.
+ inline oop bottom() const;
+
+ // Is the lock-stack empty.
+ inline bool is_empty() const;
+
+ // Check if object is recursive.
+ // Precondition: This lock-stack must contain the oop.
+ inline bool is_recursive(oop o) const;
+
+ // Try recursive enter.
+ // Precondition: This lock-stack must not be full.
+ inline bool try_recursive_enter(oop o);
+
+ // Try recursive exit.
+ // Precondition: This lock-stack must contain the oop.
+ inline bool try_recursive_exit(oop o);
// Removes an oop from an arbitrary location of this lock-stack.
! // Precondition: This lock-stack must contain the oop.
+ // Returns the number of oops removed.
+ inline size_t remove(oop o);
// Tests whether the oop is on this lock-stack.
inline bool contains(oop o) const;
// GC support
inline void oops_do(OopClosure* cl);
// Printing
void print_on(outputStream* st);
+
+ // Verify Lock Stack consistent with lock order
+ void verify_consistent_lock_order(GrowableArray<oop>& lock_order, bool leaf_frame) const NOT_DEBUG_RETURN;
};
#endif // SHARE_RUNTIME_LOCKSTACK_HPP
< prev index next >