71
72 inline uint8_t stackChunkOopDesc::flags() const { return jdk_internal_vm_StackChunk::flags(as_oop()); }
73 inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); }
74
75 inline uint8_t stackChunkOopDesc::flags_acquire() const { return jdk_internal_vm_StackChunk::flags_acquire(as_oop()); }
76
77 inline void stackChunkOopDesc::release_set_flags(uint8_t value) {
78 jdk_internal_vm_StackChunk::release_set_flags(this, value);
79 }
80
81 inline bool stackChunkOopDesc::try_set_flags(uint8_t prev_flags, uint8_t new_flags) {
82 return jdk_internal_vm_StackChunk::try_set_flags(this, prev_flags, new_flags);
83 }
84
85 inline int stackChunkOopDesc::max_thawing_size() const { return jdk_internal_vm_StackChunk::maxThawingSize(as_oop()); }
86 inline void stackChunkOopDesc::set_max_thawing_size(int value) {
87 assert(value >= 0, "size must be >= 0");
88 jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value);
89 }
90
91 inline oop stackChunkOopDesc::cont() const {
92 if (UseZGC && !ZGenerational) {
93 assert(!UseCompressedOops, "Non-generational ZGC does not support compressed oops");
94 // The state of the cont oop is used by XCollectedHeap::requires_barriers,
95 // to determine the age of the stackChunkOopDesc. For that to work, it is
96 // only the GC that is allowed to perform a load barrier on the oop.
97 // This function is used by non-GC code and therfore create a stack-local
98 // copy on the oop and perform the load barrier on that copy instead.
99 oop obj = jdk_internal_vm_StackChunk::cont_raw<oop>(as_oop());
100 obj = (oop)NativeAccess<>::oop_load(&obj);
101 return obj;
102 }
103 return jdk_internal_vm_StackChunk::cont(as_oop());
104 }
105 inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); }
106 template<typename P>
107 inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
108 template<DecoratorSet decorators>
109 inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); }
110
149 HeapWord* end = start + stack_size();
150 return (HeapWord*)p >= start && (HeapWord*)p < end;
151 }
152
153 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const {
154 return (flags() & flag) != 0;
155 }
156 inline bool stackChunkOopDesc::is_flag_acquire(uint8_t flag) const {
157 return (flags_acquire() & flag) != 0;
158 }
159 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
160 uint32_t flags = this->flags();
161 set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
162 }
163 inline void stackChunkOopDesc::clear_flags() {
164 set_flags(0);
165 }
166
167 inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
168 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) {
169 assert((flags() & ~FLAG_HAS_INTERPRETED_FRAMES) == 0, "other flags should not be set");
170 set_flag(FLAG_HAS_INTERPRETED_FRAMES, value);
171 }
172
173 inline bool stackChunkOopDesc::is_gc_mode() const { return is_flag(FLAG_GC_MODE); }
174 inline bool stackChunkOopDesc::is_gc_mode_acquire() const { return is_flag_acquire(FLAG_GC_MODE); }
175 inline void stackChunkOopDesc::set_gc_mode(bool value) { set_flag(FLAG_GC_MODE, value); }
176
177 inline bool stackChunkOopDesc::has_bitmap() const { return is_flag(FLAG_HAS_BITMAP); }
178 inline void stackChunkOopDesc::set_has_bitmap(bool value) { set_flag(FLAG_HAS_BITMAP, value); }
179
180 inline bool stackChunkOopDesc::has_thaw_slowpath_condition() const { return flags() != 0; }
181
182 inline bool stackChunkOopDesc::requires_barriers() {
183 return Universe::heap()->requires_barriers(this);
184 }
185
186 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
187 void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
188 if (frame_kind == ChunkFrames::Mixed) {
189 // we could freeze deopted frames in slow mode.
190 f.handle_deopted();
191 }
192 do_barriers0<barrier>(f, map);
193 }
194
195 template <class StackChunkFrameClosureType>
196 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
197 has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure)
198 : iterate_stack<ChunkFrames::CompiledOnly>(closure);
199 }
200
201 template <ChunkFrames frame_kind, class StackChunkFrameClosureType>
202 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
203 const SmallRegisterMap* map = SmallRegisterMap::instance;
204 assert(!map->in_cont(), "");
205
206 StackChunkFrameStream<frame_kind> f(this);
207 bool should_continue = true;
208
209 if (f.is_stub()) {
210 RegisterMap full_map(nullptr,
211 RegisterMap::UpdateMap::include,
212 RegisterMap::ProcessFrames::skip,
213 RegisterMap::WalkContinuation::include);
214 full_map.set_include_argument_oops(false);
215
216 f.next(&full_map);
217
218 assert(!f.is_done(), "");
219 assert(f.is_compiled(), "");
220
221 should_continue = closure->do_frame(f, &full_map);
222 f.next(map);
223 f.handle_deopted(); // the stub caller might be deoptimized (as it's not at a call)
224 }
225 assert(!f.is_stub(), "");
226
227 for(; should_continue && !f.is_done(); f.next(map)) {
228 if (frame_kind == ChunkFrames::Mixed) {
229 // in slow mode we might freeze deoptimized frames
230 f.handle_deopted();
231 }
232 should_continue = closure->do_frame(f, map);
233 }
234 }
235
236 inline frame stackChunkOopDesc::relativize(frame fr) const { relativize_frame(fr); return fr; }
237 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
238
239 inline void* stackChunkOopDesc::gc_data() const {
240 int stack_sz = stack_size();
241 assert(stack_sz != 0, "stack should not be empty");
242
243 // The gc data is located after the stack.
264 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
265 assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
266 assert(p >= (OopT*)start_address(), "Address not in chunk");
267 return p - (OopT*)start_address();
268 }
269
270 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
271 return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index);
272 }
273
274 template <typename OopT>
275 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
276 return (OopT*)start_address() + index;
277 }
278
279 inline MemRegion stackChunkOopDesc::range() {
280 return MemRegion((HeapWord*)this, size());
281 }
282
283 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
284 assert(fr.is_compiled_frame() || fr.cb()->is_safepoint_stub(), "");
285 assert(is_in_chunk(fr.unextended_sp()), "");
286
287 intptr_t* base = fr.real_fp(); // equal to the caller's sp
288 intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
289 assert(base > loc, "");
290 return (int)(base - loc);
291 }
292
293 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const {
294 assert(fr.is_compiled_frame(), "");
295 return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes;
296 }
297
298 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const {
299 assert(fr.is_compiled_frame(), "");
300 assert(map != nullptr, "");
301 assert(map->stack_chunk() == as_oop(), "");
302
303 // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words
304 intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case
|
71
72 inline uint8_t stackChunkOopDesc::flags() const { return jdk_internal_vm_StackChunk::flags(as_oop()); }
73 inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); }
74
75 inline uint8_t stackChunkOopDesc::flags_acquire() const { return jdk_internal_vm_StackChunk::flags_acquire(as_oop()); }
76
77 inline void stackChunkOopDesc::release_set_flags(uint8_t value) {
78 jdk_internal_vm_StackChunk::release_set_flags(this, value);
79 }
80
81 inline bool stackChunkOopDesc::try_set_flags(uint8_t prev_flags, uint8_t new_flags) {
82 return jdk_internal_vm_StackChunk::try_set_flags(this, prev_flags, new_flags);
83 }
84
85 inline int stackChunkOopDesc::max_thawing_size() const { return jdk_internal_vm_StackChunk::maxThawingSize(as_oop()); }
86 inline void stackChunkOopDesc::set_max_thawing_size(int value) {
87 assert(value >= 0, "size must be >= 0");
88 jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value);
89 }
90
91 inline uint8_t stackChunkOopDesc::lockStackSize() const { return jdk_internal_vm_StackChunk::lockStackSize(as_oop()); }
92 inline void stackChunkOopDesc::set_lockStackSize(uint8_t value) { jdk_internal_vm_StackChunk::set_lockStackSize(this, value); }
93
94 inline ObjectMonitor* stackChunkOopDesc::objectMonitor() const { return (ObjectMonitor*)jdk_internal_vm_StackChunk::objectMonitor(as_oop()); }
95 inline void stackChunkOopDesc::set_objectMonitor(ObjectMonitor* mon) { jdk_internal_vm_StackChunk::set_objectMonitor(this, (address)mon); }
96
97 inline oop stackChunkOopDesc::cont() const {
98 if (UseZGC && !ZGenerational) {
99 assert(!UseCompressedOops, "Non-generational ZGC does not support compressed oops");
100 // The state of the cont oop is used by XCollectedHeap::requires_barriers,
101 // to determine the age of the stackChunkOopDesc. For that to work, it is
102 // only the GC that is allowed to perform a load barrier on the oop.
103 // This function is used by non-GC code and therfore create a stack-local
104 // copy on the oop and perform the load barrier on that copy instead.
105 oop obj = jdk_internal_vm_StackChunk::cont_raw<oop>(as_oop());
106 obj = (oop)NativeAccess<>::oop_load(&obj);
107 return obj;
108 }
109 return jdk_internal_vm_StackChunk::cont(as_oop());
110 }
111 inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); }
112 template<typename P>
113 inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
114 template<DecoratorSet decorators>
115 inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); }
116
155 HeapWord* end = start + stack_size();
156 return (HeapWord*)p >= start && (HeapWord*)p < end;
157 }
158
159 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const {
160 return (flags() & flag) != 0;
161 }
162 inline bool stackChunkOopDesc::is_flag_acquire(uint8_t flag) const {
163 return (flags_acquire() & flag) != 0;
164 }
165 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
166 uint32_t flags = this->flags();
167 set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
168 }
169 inline void stackChunkOopDesc::clear_flags() {
170 set_flags(0);
171 }
172
173 inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
174 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) {
175 assert((flags() & ~(FLAG_HAS_INTERPRETED_FRAMES | FLAG_PREEMPTED)) == 0, "other flags should not be set");
176 set_flag(FLAG_HAS_INTERPRETED_FRAMES, value);
177 }
178
179 inline bool stackChunkOopDesc::is_preempted() const { return is_flag(FLAG_PREEMPTED); }
180 inline void stackChunkOopDesc::set_is_preempted(bool value) { set_flag(FLAG_PREEMPTED, value); }
181 inline bool stackChunkOopDesc::preempted_on_monitorenter() const { return objectMonitor() != nullptr; }
182
183 inline bool stackChunkOopDesc::has_lockStack() const { return is_flag(FLAG_HAS_LOCKSTACK); }
184 inline void stackChunkOopDesc::set_has_lockStack(bool value) { set_flag(FLAG_HAS_LOCKSTACK, value); }
185
186 inline bool stackChunkOopDesc::is_gc_mode() const { return is_flag(FLAG_GC_MODE); }
187 inline bool stackChunkOopDesc::is_gc_mode_acquire() const { return is_flag_acquire(FLAG_GC_MODE); }
188 inline void stackChunkOopDesc::set_gc_mode(bool value) { set_flag(FLAG_GC_MODE, value); }
189
190 inline bool stackChunkOopDesc::has_bitmap() const { return is_flag(FLAG_HAS_BITMAP); }
191 inline void stackChunkOopDesc::set_has_bitmap(bool value) { set_flag(FLAG_HAS_BITMAP, value); }
192
193 inline bool stackChunkOopDesc::has_thaw_slowpath_condition() const { return flags() != 0; }
194
195 inline bool stackChunkOopDesc::requires_barriers() {
196 return Universe::heap()->requires_barriers(this);
197 }
198
199 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
200 void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
201 if (frame_kind == ChunkFrames::Mixed) {
202 // we could freeze deopted frames in slow mode.
203 f.handle_deopted();
204 }
205 do_barriers0<barrier>(f, map);
206 }
207
208 template <typename OopT, class StackChunkLockStackClosureType>
209 inline void stackChunkOopDesc::iterate_lockstack(StackChunkLockStackClosureType* closure) {
210 if (LockingMode != LM_LIGHTWEIGHT) {
211 return;
212 }
213 int cnt = lockStackSize();
214 intptr_t* lockstart_addr = start_address();
215 for (int i = 0; i < cnt; i++) {
216 closure->do_oop((OopT*)&lockstart_addr[i]);
217 }
218 }
219
220 template <class StackChunkFrameClosureType>
221 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
222 has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure)
223 : iterate_stack<ChunkFrames::CompiledOnly>(closure);
224 }
225
226 template <ChunkFrames frame_kind, class StackChunkFrameClosureType>
227 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
228 const SmallRegisterMap* map = SmallRegisterMap::instance;
229 assert(!map->in_cont(), "");
230
231 StackChunkFrameStream<frame_kind> f(this);
232 bool should_continue = true;
233
234 if (f.is_stub()) {
235 RegisterMap full_map(nullptr,
236 RegisterMap::UpdateMap::include,
237 RegisterMap::ProcessFrames::skip,
238 RegisterMap::WalkContinuation::include);
239 full_map.set_include_argument_oops(false);
240 closure->do_frame(f, map);
241
242 f.next(&full_map);
243 if (f.is_done()) return;
244
245 should_continue = closure->do_frame(f, &full_map);
246 f.next(&map);
247 }
248 assert(!f.is_stub(), "");
249
250 for(; should_continue && !f.is_done(); f.next(map)) {
251 if (frame_kind == ChunkFrames::Mixed) {
252 // in slow mode we might freeze deoptimized frames
253 f.handle_deopted();
254 }
255 should_continue = closure->do_frame(f, map);
256 }
257 }
258
259 inline frame stackChunkOopDesc::relativize(frame fr) const { relativize_frame(fr); return fr; }
260 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
261
262 inline void* stackChunkOopDesc::gc_data() const {
263 int stack_sz = stack_size();
264 assert(stack_sz != 0, "stack should not be empty");
265
266 // The gc data is located after the stack.
287 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
288 assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
289 assert(p >= (OopT*)start_address(), "Address not in chunk");
290 return p - (OopT*)start_address();
291 }
292
293 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
294 return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index);
295 }
296
297 template <typename OopT>
298 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
299 return (OopT*)start_address() + index;
300 }
301
302 inline MemRegion stackChunkOopDesc::range() {
303 return MemRegion((HeapWord*)this, size());
304 }
305
306 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
307 assert(fr.is_compiled_frame() || fr.cb()->is_runtime_stub(), "");
308 assert(is_in_chunk(fr.unextended_sp()), "");
309
310 intptr_t* base = fr.real_fp(); // equal to the caller's sp
311 intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
312 assert(base > loc, "");
313 return (int)(base - loc);
314 }
315
316 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const {
317 assert(fr.is_compiled_frame(), "");
318 return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes;
319 }
320
321 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const {
322 assert(fr.is_compiled_frame(), "");
323 assert(map != nullptr, "");
324 assert(map->stack_chunk() == as_oop(), "");
325
326 // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words
327 intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case
|