1 /*
  2  * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
 26 #define SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
 27 
 28 #include "oops/stackChunkOop.hpp"
 29 
 30 #include "memory/memRegion.hpp"
 31 #include "oops/instanceStackChunkKlass.inline.hpp"
 32 #include "runtime/frame.inline.hpp"
 33 #include "runtime/handles.inline.hpp"
 34 #include "runtime/registerMap.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 DEF_HANDLE_CONSTR(stackChunk, is_stackChunk_noinline)
 38 
 39 inline stackChunkOopDesc* stackChunkOopDesc::parent() const         { return (stackChunkOopDesc*)(oopDesc*)jdk_internal_vm_StackChunk::parent(as_oop()); }
 40 inline void stackChunkOopDesc::set_parent(stackChunkOopDesc* value) { jdk_internal_vm_StackChunk::set_parent(this, (oop)value); }
 41 template<typename P> inline void stackChunkOopDesc::set_parent_raw(oop value) { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); }
 42 template<typename P> inline bool stackChunkOopDesc::is_parent_null() const    { return jdk_internal_vm_StackChunk::is_parent_null<P>(as_oop()); }
 43 inline int stackChunkOopDesc::stack_size() const         { return jdk_internal_vm_StackChunk::size(as_oop()); }
 44 inline int stackChunkOopDesc::sp() const                 { return jdk_internal_vm_StackChunk::sp(as_oop()); }
 45 inline void stackChunkOopDesc::set_sp(int value)         { jdk_internal_vm_StackChunk::set_sp(this, value); }
 46 inline address stackChunkOopDesc::pc() const             { return (address)jdk_internal_vm_StackChunk::pc(as_oop()); }
 47 inline void stackChunkOopDesc::set_pc(address value)     { jdk_internal_vm_StackChunk::set_pc(this, (jlong)value); }
 48 inline int stackChunkOopDesc::argsize() const            { return jdk_internal_vm_StackChunk::argsize(as_oop()); }
 49 inline void stackChunkOopDesc::set_argsize(int value)    { jdk_internal_vm_StackChunk::set_argsize(as_oop(), value); }
 50 inline uint8_t stackChunkOopDesc::flags() const          { return (uint8_t)jdk_internal_vm_StackChunk::flags(as_oop()); }
 51 inline void stackChunkOopDesc::set_flags(uint8_t value)  { jdk_internal_vm_StackChunk::set_flags(this, (jbyte)value); }
 52 inline int stackChunkOopDesc::max_size() const           { return (int)jdk_internal_vm_StackChunk::maxSize(as_oop()); }
 53 inline void stackChunkOopDesc::set_max_size(int value)   { jdk_internal_vm_StackChunk::set_maxSize(this, (jint)value); }
 54 inline int stackChunkOopDesc::gc_sp() const              { return jdk_internal_vm_StackChunk::gc_sp(as_oop()); }
 55 inline void stackChunkOopDesc::set_gc_sp(int value)      { jdk_internal_vm_StackChunk::set_gc_sp(this, value); }
 56 inline uint64_t stackChunkOopDesc::mark_cycle() const         { return (uint64_t)jdk_internal_vm_StackChunk::mark_cycle(as_oop()); }
 57 inline void stackChunkOopDesc::set_mark_cycle(uint64_t value) { jdk_internal_vm_StackChunk::set_mark_cycle(this, (jlong)value); }
 58 
 59 inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); }
 60 template<typename P> inline void stackChunkOopDesc::set_cont_raw(oop value)   { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
 61 inline oop stackChunkOopDesc::cont() const  { return UseCompressedOops ? cont<narrowOop>() : cont<oop>(); /* jdk_internal_vm_StackChunk::cont(as_oop()); */ }
 62 template<typename P> inline oop stackChunkOopDesc::cont() const { 
 63   // this is a special field used to detect GC processing status (see should_fix) and so we don't want to invoke a barrier directly on it
 64   oop obj = jdk_internal_vm_StackChunk::cont_raw<P>(as_oop()); 
 65   obj = (oop)NativeAccess<>::oop_load(&obj);
 66   return obj;
 67 }
 68 
 69 inline int stackChunkOopDesc::bottom() const { return stack_size() - argsize(); }
 70 
 71 inline intptr_t* stackChunkOopDesc::start_address() const { return (intptr_t*)InstanceStackChunkKlass::start_of_stack(as_oop()); }
 72 inline intptr_t* stackChunkOopDesc::end_address() const { return start_address() + stack_size(); }
 73 inline intptr_t* stackChunkOopDesc::bottom_address() const { return start_address() + bottom(); }
 74 inline intptr_t* stackChunkOopDesc::sp_address()  const { return start_address() + sp(); }
 75 
 76 inline int stackChunkOopDesc::to_offset(intptr_t* p) const {
 77   assert(is_in_chunk(p) || (p >= start_address() && (p - start_address()) <= stack_size() + InstanceStackChunkKlass::metadata_words()), 
 78     "p: " INTPTR_FORMAT " start: " INTPTR_FORMAT " end: " INTPTR_FORMAT, p2i(p), p2i(start_address()), p2i(bottom_address()));
 79   return p - start_address();
 80 }
 81 
 82 inline intptr_t* stackChunkOopDesc::from_offset(int offset) const {
 83   assert(offset <= stack_size(), "");
 84   return start_address() + offset;
 85 }
 86 
 87 inline bool stackChunkOopDesc::is_empty() const {
 88   assert (is_stackChunk(), "");
 89   // assert ((sp() < end()) || (sp() >= stack_size()), "");
 90   return sp() >= stack_size() - argsize();
 91 }
 92 
 93 inline bool stackChunkOopDesc::is_in_chunk(void* p) const {
 94   assert (is_stackChunk(), "");
 95   HeapWord* start = (HeapWord*)start_address();
 96   HeapWord* end = start + stack_size();
 97   return (HeapWord*)p >= start && (HeapWord*)p < end;
 98 }
 99 
100 bool stackChunkOopDesc::is_usable_in_chunk(void* p) const {
101   assert (is_stackChunk(), "");
102 #if (defined(X86) || defined(AARCH64)) && !defined(ZERO)
103   HeapWord* start = (HeapWord*)start_address() + sp() - frame::sender_sp_offset;
104 #else
105   Unimplemented();
106   HeapWord* start = NULL;
107 #endif
108   HeapWord* end = start + stack_size();
109   return (HeapWord*)p >= start && (HeapWord*)p < end;
110 }
111 
112 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const {
113   return (flags() & flag) != 0;
114 }
115 inline bool stackChunkOopDesc::is_non_null_and_flag(uint8_t flag) const {
116   return this != nullptr && is_flag(flag);
117 }
118 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
119   uint32_t flags = this->flags();
120   set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
121 }
122 inline void stackChunkOopDesc::clear_flags() {
123   set_flags(0);
124 }
125 inline bool stackChunkOopDesc::requires_barriers() const { 
126   return Universe::heap()->requires_barriers(as_oop());
127 }
128 
129 template <typename OopT>
130 inline static bool is_oop_fixed(oop obj, int offset) {
131   OopT value = *obj->field_addr<OopT>(offset);
132   intptr_t before = *(intptr_t*)&value;
133   intptr_t after  = cast_from_oop<intptr_t>(NativeAccess<>::oop_load(&value));
134   // tty->print_cr(">>> fixed %d: " INTPTR_FORMAT " -> " INTPTR_FORMAT, before == after, before, after);
135   return before == after;
136 }
137 
138 template <typename OopT, bool concurrent_gc>
139 inline bool stackChunkOopDesc::should_fix() const {
140   if (UNLIKELY(is_gc_mode())) return true;
141   // the last oop traversed in this object -- see InstanceStackChunkKlass::oop_oop_iterate
142   if (concurrent_gc) return !is_oop_fixed<OopT>(as_oop(), jdk_internal_vm_StackChunk::cont_offset());
143   return false;
144 }
145 
146 inline bool stackChunkOopDesc::has_mixed_frames() const         { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
147 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) { set_flag(FLAG_HAS_INTERPRETED_FRAMES, value); }
148 inline bool stackChunkOopDesc::is_gc_mode() const               { return is_flag(FLAG_GC_MODE); }
149 inline void stackChunkOopDesc::set_gc_mode(bool value)          { set_flag(FLAG_GC_MODE, value); }
150 inline bool stackChunkOopDesc::has_bitmap() const               { return is_flag(FLAG_HAS_BITMAP); }
151 inline void stackChunkOopDesc::set_has_bitmap(bool value)       { set_flag(FLAG_HAS_BITMAP, value); assert (!value || UseChunkBitmaps, ""); }
152 
153 inline intptr_t* stackChunkOopDesc::relative_base() const {
154   // we relativize with respect to end rather than start because GC might compact the chunk
155   return end_address() + InstanceStackChunkKlass::metadata_words();
156 }
157 
158 inline intptr_t* stackChunkOopDesc::derelativize_address(int offset) const {
159   intptr_t* base = relative_base();
160   intptr_t* p = base - offset;
161   // tty->print_cr(">>> derelativize_address: %d -> %p (base: %p)", offset, p, base);
162   assert (start_address() <= p && p <= base, "");
163   return p;
164 }
165 
166 inline int stackChunkOopDesc::relativize_address(intptr_t* p) const {
167   intptr_t* base = relative_base();
168   intptr_t offset = base - p;
169   // tty->print_cr(">>> relativize_address: %p -> %ld (base: %p)", p, offset, base);
170   assert (start_address() <= p && p <= base, "");
171   assert (0 <= offset && offset <= std::numeric_limits<int>::max(), "");
172   return offset;
173 }
174 
175 inline void stackChunkOopDesc::relativize_frame(frame& fr) const {
176   fr.set_offset_sp(relativize_address(fr.sp()));
177   fr.set_offset_unextended_sp(relativize_address(fr.unextended_sp()));
178   relativize_frame_pd(fr);
179 }
180 
181 inline void stackChunkOopDesc::derelativize_frame(frame& fr) const {
182   fr.set_sp(derelativize_address(fr.offset_sp()));
183   fr.set_unextended_sp(derelativize_address(fr.offset_unextended_sp()));
184   derelativize_frame_pd(fr);
185 }
186 
187 inline frame stackChunkOopDesc::relativize(frame fr)   const { relativize_frame(fr);   return fr; }
188 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
189 
190 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
191   assert (fr.is_compiled_frame() || fr.cb()->is_safepoint_stub(), "");
192   assert (is_in_chunk(fr.unextended_sp()), "");
193 
194   intptr_t* base = fr.real_fp(); // equal to the caller's sp
195   intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
196   assert (base > loc, "");
197   return (int)(base - loc);
198 }
199 
200 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const {
201   assert (fr.is_compiled_frame(), "");
202   assert (map != nullptr && map->stack_chunk() == as_oop(), "");
203 
204   // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words
205   intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case
206   intptr_t* base = derelativize_address(fr.offset_sp());
207   return (address)(base - offset);
208 }
209 
210 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const {
211   assert (fr.is_compiled_frame(), "");
212   return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes;
213 }
214 
215 inline Method* stackChunkOopDesc::interpreter_frame_method(const frame& fr) {
216   return derelativize(fr).interpreter_frame_method();
217 }
218 
219 inline address stackChunkOopDesc::interpreter_frame_bcp(const frame& fr) {
220   return derelativize(fr).interpreter_frame_bcp();
221 }
222 
223 inline intptr_t* stackChunkOopDesc::interpreter_frame_expression_stack_at(const frame& fr, int index) const {
224   return derelativize(fr).interpreter_frame_expression_stack_at<true>(index);
225 }
226 
227 inline intptr_t* stackChunkOopDesc::interpreter_frame_local_at(const frame& fr, int index) const {
228   return derelativize(fr).interpreter_frame_local_at<true>(index);
229 }
230 
231 template <bool dword_aligned>
232 inline void stackChunkOopDesc::copy_from_stack_to_chunk(intptr_t* from, intptr_t* to, int size) {
233   log_develop_trace(jvmcont)("Chunk bounds: " INTPTR_FORMAT "(%d) - " INTPTR_FORMAT "(%d) (%d words, %d bytes)",
234     p2i(start_address()), to_offset(start_address()), p2i(end_address()), to_offset(end_address() - 1) + 1, stack_size(), stack_size() << LogBytesPerWord);
235   log_develop_trace(jvmcont)("Copying from v: " INTPTR_FORMAT " - " INTPTR_FORMAT " (%d words, %d bytes)", p2i(from), p2i(from + size), size, size << LogBytesPerWord);
236   log_develop_trace(jvmcont)("Copying to h: " INTPTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " INTPTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)",
237     p2i(to), to - start_address(), relative_base() - to, p2i(to + size), to + size - start_address(), relative_base() - (to + size), size, size << LogBytesPerWord);
238 
239   assert (to >= start_address(), "to: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(to), p2i(start_address()));
240   assert (to + size <= end_address(), "to + size: " INTPTR_FORMAT " end: " INTPTR_FORMAT, p2i(to + size), p2i(end_address()));
241 
242   InstanceStackChunkKlass::copy_from_stack_to_chunk<dword_aligned>(from, to, size);
243 }
244 
245 template <bool dword_aligned>
246 inline void stackChunkOopDesc::copy_from_chunk_to_stack(intptr_t* from, intptr_t* to, int size) {
247   log_develop_trace(jvmcont)("Copying from h: " INTPTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " INTPTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)",
248     p2i(from), from - start_address(), relative_base() - from, p2i(from + size), from + size - start_address(), relative_base() - (from + size), size, size << LogBytesPerWord);
249   log_develop_trace(jvmcont)("Copying to v: " INTPTR_FORMAT " - " INTPTR_FORMAT " (%d words, %d bytes)", p2i(to), p2i(to + size), size, size << LogBytesPerWord);
250 
251   assert (from >= start_address(), "from: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(from), p2i(start_address()));
252   assert (from + size <= end_address(), "from + size: " INTPTR_FORMAT " end: " INTPTR_FORMAT, p2i(from + size), p2i(end_address()));
253 
254   InstanceStackChunkKlass::copy_from_chunk_to_stack<dword_aligned>(from, to, size);
255 }
256 
257 inline BitMapView stackChunkOopDesc::bitmap() const {
258   assert (has_bitmap(), "");
259   size_t size_in_bits = InstanceStackChunkKlass::bitmap_size(stack_size()) << LogBitsPerWord;
260 #ifdef ASSERT
261   BitMapView bm((BitMap::bm_word_t*)InstanceStackChunkKlass::start_of_bitmap(as_oop()), size_in_bits);
262   assert (bm.size() == size_in_bits, "bm.size(): %zu size_in_bits: %zu", bm.size(), size_in_bits);
263   assert (bm.size_in_words() == (size_t)InstanceStackChunkKlass::bitmap_size(stack_size()), "bm.size_in_words(): %zu InstanceStackChunkKlass::bitmap_size(stack_size()): %zu", bm.size_in_words(), InstanceStackChunkKlass::bitmap_size(stack_size()));
264   bm.verify_range(bit_index_for(start_address()), bit_index_for(end_address()));
265 #endif
266   return BitMapView((BitMap::bm_word_t*)InstanceStackChunkKlass::start_of_bitmap(as_oop()), size_in_bits);
267 }
268 
269 inline BitMap::idx_t stackChunkOopDesc::bit_offset() const {
270   return InstanceStackChunkKlass::bit_offset(stack_size());
271 }
272 
273 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(intptr_t* p) const {
274   return UseCompressedOops ? bit_index_for((narrowOop*)p) : bit_index_for((oop*)p);
275 }
276 
277 template <typename OopT>
278 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
279   return bit_offset() + (p - (OopT*)start_address());
280 }
281 
282 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
283   return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index);
284 }
285 
286 template <typename OopT>
287 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
288   return (OopT*)start_address() + (index - bit_offset());
289 }
290 
291 template <class StackChunkFrameClosureType>
292 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
293   has_mixed_frames() ? InstanceStackChunkKlass::iterate_stack<true >(this, closure)
294                      : InstanceStackChunkKlass::iterate_stack<false>(this, closure);
295 }
296 
297 inline MemRegion stackChunkOopDesc::range() {
298   return MemRegion((HeapWord*)this, size());
299 }
300 
301 #endif // SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP