1 /*
  2  * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
 26 #define SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
 27 
 28 #include "oops/stackChunkOop.hpp"
 29 
 30 #include "memory/memRegion.hpp"
 31 #include "oops/instanceStackChunkKlass.inline.hpp"
 32 #include "runtime/frame.inline.hpp"
 33 #include "runtime/handles.inline.hpp"
 34 #include "runtime/registerMap.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 DEF_HANDLE_CONSTR(stackChunk, is_stackChunk_noinline)
 38 
 39 inline stackChunkOopDesc* stackChunkOopDesc::parent() const         { return (stackChunkOopDesc*)(oopDesc*)jdk_internal_vm_StackChunk::parent(as_oop()); }
 40 inline void stackChunkOopDesc::set_parent(stackChunkOopDesc* value) { jdk_internal_vm_StackChunk::set_parent(this, (oop)value); }
 41 template<typename P> inline void stackChunkOopDesc::set_parent_raw(oop value) { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); }
 42 template<typename P> inline bool stackChunkOopDesc::is_parent_null() const    { return jdk_internal_vm_StackChunk::is_parent_null<P>(as_oop()); }
 43 inline int stackChunkOopDesc::stack_size() const         { return jdk_internal_vm_StackChunk::size(as_oop()); }
 44 inline int stackChunkOopDesc::sp() const                 { return jdk_internal_vm_StackChunk::sp(as_oop()); }
 45 inline void stackChunkOopDesc::set_sp(int value)         { jdk_internal_vm_StackChunk::set_sp(this, value); }
 46 inline address stackChunkOopDesc::pc() const             { return (address)jdk_internal_vm_StackChunk::pc(as_oop()); }
 47 inline void stackChunkOopDesc::set_pc(address value)     { jdk_internal_vm_StackChunk::set_pc(this, (jlong)value); }
 48 inline int stackChunkOopDesc::argsize() const            { return jdk_internal_vm_StackChunk::argsize(as_oop()); }
 49 inline void stackChunkOopDesc::set_argsize(int value)    { jdk_internal_vm_StackChunk::set_argsize(as_oop(), value); }
 50 inline uint8_t stackChunkOopDesc::flags() const          { return (uint8_t)jdk_internal_vm_StackChunk::flags(as_oop()); }
 51 inline void stackChunkOopDesc::set_flags(uint8_t value)  { jdk_internal_vm_StackChunk::set_flags(this, (jbyte)value); }
 52 inline int stackChunkOopDesc::max_size() const           { return (int)jdk_internal_vm_StackChunk::maxSize(as_oop()); }
 53 inline void stackChunkOopDesc::set_max_size(int value)   { jdk_internal_vm_StackChunk::set_maxSize(this, (jint)value); }
 54 inline int stackChunkOopDesc::numFrames() const          { return jdk_internal_vm_StackChunk::numFrames(as_oop()); }
 55 inline void stackChunkOopDesc::set_numFrames(int value)  { jdk_internal_vm_StackChunk::set_numFrames(this, value); }
 56 inline int stackChunkOopDesc::numOops() const            { return jdk_internal_vm_StackChunk::numOops(as_oop()); }
 57 inline void stackChunkOopDesc::set_numOops(int value)    { jdk_internal_vm_StackChunk::set_numOops(this, value); }
 58 inline int stackChunkOopDesc::gc_sp() const              { return jdk_internal_vm_StackChunk::gc_sp(as_oop()); }
 59 inline void stackChunkOopDesc::set_gc_sp(int value)      { jdk_internal_vm_StackChunk::set_gc_sp(this, value); }
 60 inline uint64_t stackChunkOopDesc::mark_cycle() const         { return (uint64_t)jdk_internal_vm_StackChunk::mark_cycle(as_oop()); }
 61 inline void stackChunkOopDesc::set_mark_cycle(uint64_t value) { jdk_internal_vm_StackChunk::set_mark_cycle(this, (jlong)value); }
 62 
 63 inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); }
 64 template<typename P> inline void stackChunkOopDesc::set_cont_raw(oop value)   { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
 65 inline oop stackChunkOopDesc::cont() const  { return UseCompressedOops ? cont<narrowOop>() : cont<oop>(); /* jdk_internal_vm_StackChunk::cont(as_oop()); */ }
 66 template<typename P> inline oop stackChunkOopDesc::cont() const { 
 67   // this is a special field used to detect GC processing status (see should_fix) and so we don't want to invoke a barrier directly on it
 68   oop obj = jdk_internal_vm_StackChunk::cont_raw<P>(as_oop()); 
 69   obj = (oop)NativeAccess<>::oop_load(&obj);
 70   return obj;
 71 }
 72 
 73 inline int stackChunkOopDesc::bottom() const { return stack_size() - argsize(); }
 74 
 75 inline intptr_t* stackChunkOopDesc::start_address() const { return (intptr_t*)InstanceStackChunkKlass::start_of_stack(as_oop()); }
 76 inline intptr_t* stackChunkOopDesc::end_address() const { return start_address() + stack_size(); }
 77 inline intptr_t* stackChunkOopDesc::bottom_address() const { return start_address() + bottom(); }
 78 inline intptr_t* stackChunkOopDesc::sp_address()  const { return start_address() + sp(); }
 79 
 80 inline int stackChunkOopDesc::to_offset(intptr_t* p) const {
 81   assert(is_in_chunk(p) || (p >= start_address() && (p - start_address()) <= stack_size() + InstanceStackChunkKlass::metadata_words()), 
 82     "p: " INTPTR_FORMAT " start: " INTPTR_FORMAT " end: " INTPTR_FORMAT, p2i(p), p2i(start_address()), p2i(bottom_address()));
 83   return p - start_address();
 84 }
 85 
 86 inline intptr_t* stackChunkOopDesc::from_offset(int offset) const {
 87   assert(offset <= stack_size(), "");
 88   return start_address() + offset;
 89 }
 90 
 91 inline bool stackChunkOopDesc::is_empty() const {
 92   assert (is_stackChunk(), "");
 93   // assert ((sp() < end()) || (sp() >= stack_size()), "");
 94   return sp() >= stack_size() - argsize();
 95 }
 96 
 97 inline bool stackChunkOopDesc::is_in_chunk(void* p) const {
 98   assert (is_stackChunk(), "");
 99   HeapWord* start = (HeapWord*)start_address();
100   HeapWord* end = start + stack_size();
101   return (HeapWord*)p >= start && (HeapWord*)p < end;
102 }
103 
104 bool stackChunkOopDesc::is_usable_in_chunk(void* p) const {
105   assert (is_stackChunk(), "");
106 #if (defined(X86) || defined(AARCH64)) && !defined(ZERO)
107   HeapWord* start = (HeapWord*)start_address() + sp() - frame::sender_sp_offset;
108 #else
109   Unimplemented();
110   HeapWord* start = NULL;
111 #endif
112   HeapWord* end = start + stack_size();
113   return (HeapWord*)p >= start && (HeapWord*)p < end;
114 }
115 
116 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const {
117   return (flags() & flag) != 0;
118 }
119 inline bool stackChunkOopDesc::is_non_null_and_flag(uint8_t flag) const {
120   return this != nullptr && is_flag(flag);
121 }
122 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
123   uint32_t flags = this->flags();
124   set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
125 }
126 inline void stackChunkOopDesc::clear_flags() {
127   set_flags(0);
128 }
129 inline bool stackChunkOopDesc::requires_barriers() const { 
130   return Universe::heap()->requires_barriers(as_oop());
131 }
132 
133 template <typename OopT>
134 inline static bool is_oop_fixed(oop obj, int offset) {
135   OopT value = *obj->field_addr<OopT>(offset);
136   intptr_t before = *(intptr_t*)&value;
137   intptr_t after  = cast_from_oop<intptr_t>(NativeAccess<>::oop_load(&value));
138   // tty->print_cr(">>> fixed %d: " INTPTR_FORMAT " -> " INTPTR_FORMAT, before == after, before, after);
139   return before == after;
140 }
141 
142 template <typename OopT, bool concurrent_gc>
143 inline bool stackChunkOopDesc::should_fix() const {
144   if (UNLIKELY(is_gc_mode())) return true;
145   // the last oop traversed in this object -- see InstanceStackChunkKlass::oop_oop_iterate
146   if (concurrent_gc) return !is_oop_fixed<OopT>(as_oop(), jdk_internal_vm_StackChunk::cont_offset());
147   return false;
148 }
149 
150 inline bool stackChunkOopDesc::has_mixed_frames() const         { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
151 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) { set_flag(FLAG_HAS_INTERPRETED_FRAMES, value); }
152 inline bool stackChunkOopDesc::is_gc_mode() const               { return is_flag(FLAG_GC_MODE); }
153 inline void stackChunkOopDesc::set_gc_mode(bool value)          { set_flag(FLAG_GC_MODE, value); }
154 inline bool stackChunkOopDesc::has_bitmap() const               { return is_flag(FLAG_HAS_BITMAP); }
155 inline void stackChunkOopDesc::set_has_bitmap(bool value)       { set_flag(FLAG_HAS_BITMAP, value); assert (!value || UseChunkBitmaps, ""); }
156 
157 inline void stackChunkOopDesc::reset_counters() {
158   set_numFrames(-1);
159   set_numOops(-1);
160 }
161 
162 inline intptr_t* stackChunkOopDesc::relative_base() const {
163   // we relativize with respect to end rather than start because GC might compact the chunk
164   return end_address() + InstanceStackChunkKlass::metadata_words();
165 }
166 
167 inline intptr_t* stackChunkOopDesc::derelativize_address(int offset) const {
168   intptr_t* base = relative_base();
169   intptr_t* p = base - offset;
170   // tty->print_cr(">>> derelativize_address: %d -> %p (base: %p)", offset, p, base);
171   assert (start_address() <= p && p <= base, "");
172   return p;
173 }
174 
175 inline int stackChunkOopDesc::relativize_address(intptr_t* p) const {
176   intptr_t* base = relative_base();
177   intptr_t offset = base - p;
178   // tty->print_cr(">>> relativize_address: %p -> %ld (base: %p)", p, offset, base);
179   assert (start_address() <= p && p <= base, "");
180   assert (0 <= offset && offset <= std::numeric_limits<int>::max(), "");
181   return offset;
182 }
183 
184 inline void stackChunkOopDesc::relativize_frame(frame& fr) const {
185   fr.set_offset_sp(relativize_address(fr.sp()));
186   fr.set_offset_unextended_sp(relativize_address(fr.unextended_sp()));
187   relativize_frame_pd(fr);
188 }
189 
190 inline void stackChunkOopDesc::derelativize_frame(frame& fr) const {
191   fr.set_sp(derelativize_address(fr.offset_sp()));
192   fr.set_unextended_sp(derelativize_address(fr.offset_unextended_sp()));
193   derelativize_frame_pd(fr);
194 }
195 
196 inline frame stackChunkOopDesc::relativize(frame fr)   const { relativize_frame(fr);   return fr; }
197 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
198 
199 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
200   assert (fr.is_compiled_frame() || fr.cb()->is_safepoint_stub(), "");
201   assert (is_in_chunk(fr.unextended_sp()), "");
202 
203   intptr_t* base = fr.real_fp(); // equal to the caller's sp
204   intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
205   assert (base > loc, "");
206   int res = (int)(base - loc);
207   // tty->print_cr(">>> relativize_usp_offset: %d -> %d -- address %p", usp_offset_in_bytes, res, loc); fr.print_on<true>(tty);
208   return res;
209 }
210 
211 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const {
212   assert (fr.is_compiled_frame(), "");
213   assert (map != nullptr && map->stack_chunk() == as_oop(), "");
214 
215   // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words
216   intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case
217   intptr_t* base = derelativize_address(fr.offset_sp());
218   // tty->print_cr(">>> reg_to_location: %s -> %ld -- address: %p", reg->name(), offset, base - offset); fr.print_on(tty);
219   return (address)(base - offset);
220 }
221 
222 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const {
223   assert (fr.is_compiled_frame(), "");
224   // tty->print_cr(">>> usp_offset_to_location"); fr.print_on<true>(tty);
225   return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes;
226 }
227 
228 inline Method* stackChunkOopDesc::interpreter_frame_method(const frame& fr) {
229   // tty->print_cr(">>> interpreter_frame_method"); fr.print_on<true>(tty);
230   return derelativize(fr).interpreter_frame_method();
231 }
232 
233 inline address stackChunkOopDesc::interpreter_frame_bcp(const frame& fr) {
234   // tty->print_cr(">>> interpreter_frame_bcp"); derelativize(fr).print_on<true>(tty);
235   return derelativize(fr).interpreter_frame_bcp();
236 }
237 
238 inline intptr_t* stackChunkOopDesc::interpreter_frame_expression_stack_at(const frame& fr, int index) const {
239   // tty->print_cr(">>> interpreter_frame_expression_stack_at"); fr.print_on<true>(tty);
240   return derelativize(fr).interpreter_frame_expression_stack_at<true>(index);
241 }
242 
243 inline intptr_t* stackChunkOopDesc::interpreter_frame_local_at(const frame& fr, int index) const {
244   // tty->print_cr(">>> interpreter_frame_local_at"); fr.print_on<true>(tty);
245   return derelativize(fr).interpreter_frame_local_at<true>(index);
246 }
247 
248 template <bool dword_aligned>
249 inline void stackChunkOopDesc::copy_from_stack_to_chunk(intptr_t* from, intptr_t* to, int size) {
250   log_develop_trace(jvmcont)("Chunk bounds: " INTPTR_FORMAT "(%d) - " INTPTR_FORMAT "(%d) (%d words, %d bytes)",
251     p2i(start_address()), to_offset(start_address()), p2i(end_address()), to_offset(end_address() - 1) + 1, stack_size(), stack_size() << LogBytesPerWord);
252   log_develop_trace(jvmcont)("Copying from v: " INTPTR_FORMAT " - " INTPTR_FORMAT " (%d words, %d bytes)", p2i(from), p2i(from + size), size, size << LogBytesPerWord);
253   log_develop_trace(jvmcont)("Copying to h: " INTPTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " INTPTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)",
254     p2i(to), to - start_address(), relative_base() - to, p2i(to + size), to + size - start_address(), relative_base() - (to + size), size, size << LogBytesPerWord);
255 
256   assert (to >= start_address(), "to: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(to), p2i(start_address()));
257   assert (to + size <= end_address(), "to + size: " INTPTR_FORMAT " end: " INTPTR_FORMAT, p2i(to + size), p2i(end_address()));
258 
259   InstanceStackChunkKlass::copy_from_stack_to_chunk<dword_aligned>(from, to, size);
260 }
261 
262 template <bool dword_aligned>
263 inline void stackChunkOopDesc::copy_from_chunk_to_stack(intptr_t* from, intptr_t* to, int size) {
264   log_develop_trace(jvmcont)("Copying from h: " INTPTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " INTPTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)",
265     p2i(from), from - start_address(), relative_base() - from, p2i(from + size), from + size - start_address(), relative_base() - (from + size), size, size << LogBytesPerWord);
266   log_develop_trace(jvmcont)("Copying to v: " INTPTR_FORMAT " - " INTPTR_FORMAT " (%d words, %d bytes)", p2i(to), p2i(to + size), size, size << LogBytesPerWord);
267 
268   assert (from >= start_address(), "from: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(from), p2i(start_address()));
269   assert (from + size <= end_address(), "from + size: " INTPTR_FORMAT " end: " INTPTR_FORMAT, p2i(from + size), p2i(end_address()));
270 
271   InstanceStackChunkKlass::copy_from_chunk_to_stack<dword_aligned>(from, to, size);
272 }
273 
274 inline BitMapView stackChunkOopDesc::bitmap() const {
275   assert (has_bitmap(), "");
276   size_t size_in_bits = InstanceStackChunkKlass::bitmap_size(stack_size()) << LogBitsPerWord;
277 #ifdef ASSERT
278   BitMapView bm((BitMap::bm_word_t*)InstanceStackChunkKlass::start_of_bitmap(as_oop()), size_in_bits);
279   assert (bm.size() == size_in_bits, "bm.size(): %zu size_in_bits: %zu", bm.size(), size_in_bits);
280   assert (bm.size_in_words() == (size_t)InstanceStackChunkKlass::bitmap_size(stack_size()), "bm.size_in_words(): %zu InstanceStackChunkKlass::bitmap_size(stack_size()): %d", bm.size_in_words(), InstanceStackChunkKlass::bitmap_size(stack_size()));
281   bm.verify_range(bit_index_for(start_address()), bit_index_for(end_address()));
282 #endif
283   return BitMapView((BitMap::bm_word_t*)InstanceStackChunkKlass::start_of_bitmap(as_oop()), size_in_bits);
284 }
285 
286 inline BitMap::idx_t stackChunkOopDesc::bit_offset() const {
287   return InstanceStackChunkKlass::bit_offset(stack_size());
288 }
289 
290 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(intptr_t* p) const {
291   return UseCompressedOops ? bit_index_for((narrowOop*)p) : bit_index_for((oop*)p);
292 }
293 
294 template <typename OopT>
295 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
296   return bit_offset() + (p - (OopT*)start_address());
297 }
298 
299 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
300   return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index);
301 }
302 
303 template <typename OopT>
304 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
305   return (OopT*)start_address() + (index - bit_offset());
306 }
307 
308 template <class StackChunkFrameClosureType>
309 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
310   has_mixed_frames() ? InstanceStackChunkKlass::iterate_stack<true >(this, closure)
311                      : InstanceStackChunkKlass::iterate_stack<false>(this, closure);
312 }
313 
314 inline MemRegion stackChunkOopDesc::range() {
315   return MemRegion((HeapWord*)this, size());
316 }
317 
318 #endif // SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP