44 #ifdef ASSERT
45 private:
46 static const int COOKIE_VALUE = 0x1234;
47 int cookie;
48
49 public:
50 static int cookie_value() { return COOKIE_VALUE; }
51 static ByteSize cookie_offset() { return byte_offset_of(ContinuationEntry, cookie); }
52
53 void verify_cookie() {
54 assert(cookie == COOKIE_VALUE, "Bad cookie: %#x, expected: %#x", cookie, COOKIE_VALUE);
55 }
56 #endif
57
58 public:
59 static int _return_pc_offset; // friend gen_continuation_enter
60 static int _thaw_call_pc_offset;
61 static int _cleanup_offset;
62
63 static void set_enter_code(nmethod* nm, int interpreted_entry_offset);
64 static bool is_interpreted_call(address call_address);
65
66 private:
67 static address _return_pc;
68 static address _thaw_call_pc;
69 static address _cleanup_pc;
70 static nmethod* _enter_special;
71 static int _interpreted_entry_offset;
72
73 private:
74 ContinuationEntry* _parent;
75 oopDesc* _cont;
76 oopDesc* _chunk;
77 int _flags;
78 // Size in words of the stack arguments of the bottom frame on stack if compiled 0 otherwise.
79 // The caller (if there is one) is the still frozen top frame in the StackChunk.
80 int _argsize;
81 intptr_t* _parent_cont_fastpath;
82 #ifdef _LP64
83 int64_t _parent_held_monitor_count;
84 #else
85 int32_t _parent_held_monitor_count;
86 #endif
87 uint32_t _pin_count;
88
89 public:
90 static ByteSize parent_offset() { return byte_offset_of(ContinuationEntry, _parent); }
95 static ByteSize pin_count_offset(){ return byte_offset_of(ContinuationEntry, _pin_count); }
96 static ByteSize parent_cont_fastpath_offset() { return byte_offset_of(ContinuationEntry, _parent_cont_fastpath); }
97 static ByteSize parent_held_monitor_count_offset() { return byte_offset_of(ContinuationEntry, _parent_held_monitor_count); }
98
99 public:
100 static size_t size() { return align_up((int)sizeof(ContinuationEntry), 2*wordSize); }
101
102 ContinuationEntry* parent() const { return _parent; }
103 int64_t parent_held_monitor_count() const { return (int64_t)_parent_held_monitor_count; }
104
105 static address entry_pc() { return _return_pc; }
106 intptr_t* entry_sp() const { return (intptr_t*)this; }
107 intptr_t* entry_fp() const;
108
109 static address thaw_call_pc_address() { return (address)&_thaw_call_pc; }
110 static address cleanup_pc() { return _cleanup_pc; }
111
112 static address compiled_entry();
113 static address interpreted_entry();
114
115 int argsize() const { return _argsize; }
116 void set_argsize(int value) { _argsize = value; }
117
118 bool is_pinned() { return _pin_count > 0; }
119 bool pin() {
120 if (_pin_count == UINT32_MAX) return false;
121 _pin_count++;
122 return true;
123 }
124 bool unpin() {
125 if (_pin_count == 0) return false;
126 _pin_count--;
127 return true;
128 }
129
130 intptr_t* parent_cont_fastpath() const { return _parent_cont_fastpath; }
131 void set_parent_cont_fastpath(intptr_t* x) { _parent_cont_fastpath = x; }
132
133 static ContinuationEntry* from_frame(const frame& f);
134 frame to_frame() const;
135 void update_register_map(RegisterMap* map) const;
136 void flush_stack_processing(JavaThread* thread) const;
137
138 inline intptr_t* bottom_sender_sp() const;
139 inline oop cont_oop(const JavaThread* thread) const;
140 inline oop scope(const JavaThread* thread) const;
141 inline static oop cont_oop_or_null(const ContinuationEntry* ce, const JavaThread* thread);
142
143 oop* cont_addr() { return (oop*)&_cont; }
144 oop* chunk_addr() { return (oop*)&_chunk; }
145
146 bool is_virtual_thread() const { return _flags != 0; }
147
148 #ifndef PRODUCT
149 void describe(FrameValues& values, int frame_no) const;
150 #endif
151
152 #ifdef ASSERT
153 static bool assert_entry_frame_laid_out(JavaThread* thread);
154 #endif
155 };
156
157 #endif // SHARE_VM_RUNTIME_CONTINUATIONENTRY_HPP
|
44 #ifdef ASSERT
45 private:
46 static const int COOKIE_VALUE = 0x1234;
47 int cookie;
48
49 public:
50 static int cookie_value() { return COOKIE_VALUE; }
51 static ByteSize cookie_offset() { return byte_offset_of(ContinuationEntry, cookie); }
52
53 void verify_cookie() {
54 assert(cookie == COOKIE_VALUE, "Bad cookie: %#x, expected: %#x", cookie, COOKIE_VALUE);
55 }
56 #endif
57
58 public:
59 static int _return_pc_offset; // friend gen_continuation_enter
60 static int _thaw_call_pc_offset;
61 static int _cleanup_offset;
62
63 static void set_enter_code(nmethod* nm, int interpreted_entry_offset);
64 static void set_yield_code(nmethod* nm);
65 static bool is_interpreted_call(address call_address);
66
67 private:
68 static address _return_pc;
69 static address _thaw_call_pc;
70 static address _cleanup_pc;
71 static nmethod* _enter_special;
72 static nmethod* _do_yield;
73 static int _interpreted_entry_offset;
74
75 private:
76 ContinuationEntry* _parent;
77 oopDesc* _cont;
78 oopDesc* _chunk;
79 int _flags;
80 // Size in words of the stack arguments of the bottom frame on stack if compiled 0 otherwise.
81 // The caller (if there is one) is the still frozen top frame in the StackChunk.
82 int _argsize;
83 intptr_t* _parent_cont_fastpath;
84 #ifdef _LP64
85 int64_t _parent_held_monitor_count;
86 #else
87 int32_t _parent_held_monitor_count;
88 #endif
89 uint32_t _pin_count;
90
91 public:
92 static ByteSize parent_offset() { return byte_offset_of(ContinuationEntry, _parent); }
97 static ByteSize pin_count_offset(){ return byte_offset_of(ContinuationEntry, _pin_count); }
98 static ByteSize parent_cont_fastpath_offset() { return byte_offset_of(ContinuationEntry, _parent_cont_fastpath); }
99 static ByteSize parent_held_monitor_count_offset() { return byte_offset_of(ContinuationEntry, _parent_held_monitor_count); }
100
101 public:
102 static size_t size() { return align_up((int)sizeof(ContinuationEntry), 2*wordSize); }
103
104 ContinuationEntry* parent() const { return _parent; }
105 int64_t parent_held_monitor_count() const { return (int64_t)_parent_held_monitor_count; }
106
107 static address entry_pc() { return _return_pc; }
108 intptr_t* entry_sp() const { return (intptr_t*)this; }
109 intptr_t* entry_fp() const;
110
111 static address thaw_call_pc_address() { return (address)&_thaw_call_pc; }
112 static address cleanup_pc() { return _cleanup_pc; }
113
114 static address compiled_entry();
115 static address interpreted_entry();
116
117 static nmethod* do_yield_nmethod() {
118 assert(_do_yield != nullptr, "oops");
119 return _do_yield;
120 }
121
122 int argsize() const { return _argsize; }
123 void set_argsize(int value) { _argsize = value; }
124
125 bool is_pinned() { return _pin_count > 0; }
126 bool pin() {
127 if (_pin_count == UINT32_MAX) return false;
128 _pin_count++;
129 return true;
130 }
131 bool unpin() {
132 if (_pin_count == 0) return false;
133 _pin_count--;
134 return true;
135 }
136
137 intptr_t* parent_cont_fastpath() const { return _parent_cont_fastpath; }
138 void set_parent_cont_fastpath(intptr_t* x) { _parent_cont_fastpath = x; }
139
140 static ContinuationEntry* from_frame(const frame& f);
141 frame to_frame() const;
142 void update_register_map(RegisterMap* map) const;
143 void flush_stack_processing(JavaThread* thread) const;
144
145 inline intptr_t* bottom_sender_sp() const;
146 inline oop cont_oop(const JavaThread* thread) const;
147 inline oop scope(const JavaThread* thread) const;
148 inline static oop cont_oop_or_null(const ContinuationEntry* ce, const JavaThread* thread);
149
150 oop* cont_addr() { return (oop*)&_cont; }
151 oop* chunk_addr() { return (oop*)&_chunk; }
152
153 bool is_virtual_thread() const { return _flags != 0; }
154
155 #ifndef PRODUCT
156 void describe(FrameValues& values, int frame_no) const;
157 #endif
158
159 #ifdef ASSERT
160 static bool assert_entry_frame_laid_out(JavaThread* thread, bool preempted = false);
161 #endif
162 };
163
164 #endif // SHARE_VM_RUNTIME_CONTINUATIONENTRY_HPP
|