< prev index next >

src/hotspot/share/runtime/continuationEntry.hpp

Print this page

 44 #ifdef ASSERT
 45  private:
 46   static const int COOKIE_VALUE = 0x1234;
 47   int cookie;
 48 
 49  public:
 50   static int cookie_value() { return COOKIE_VALUE; }
 51   static ByteSize cookie_offset() { return byte_offset_of(ContinuationEntry, cookie); }
 52 
 53   void verify_cookie() {
 54     assert(cookie == COOKIE_VALUE, "Bad cookie: %#x, expected: %#x", cookie, COOKIE_VALUE);
 55   }
 56 #endif
 57 
 58  public:
 59   static int _return_pc_offset; // friend gen_continuation_enter
 60   static int _thaw_call_pc_offset;
 61   static int _cleanup_offset;
 62 
 63   static void set_enter_code(nmethod* nm, int interpreted_entry_offset);

 64   static bool is_interpreted_call(address call_address);
 65 
 66  private:
 67   static address _return_pc;
 68   static address _thaw_call_pc;
 69   static address _cleanup_pc;
 70   static nmethod* _enter_special;

 71   static int _interpreted_entry_offset;
 72 
 73  private:
 74   ContinuationEntry* _parent;
 75   oopDesc* _cont;
 76   oopDesc* _chunk;
 77   int _flags;
 78   // Size in words of the stack arguments of the bottom frame on stack if compiled 0 otherwise.
 79   // The caller (if there is one) is the still frozen top frame in the StackChunk.
 80   int _argsize;
 81   intptr_t* _parent_cont_fastpath;
 82 #ifdef _LP64
 83   int64_t   _parent_held_monitor_count;
 84 #else
 85   int32_t   _parent_held_monitor_count;
 86 #endif
 87   uint32_t _pin_count;
 88 
 89  public:
 90   static ByteSize parent_offset()   { return byte_offset_of(ContinuationEntry, _parent); }

 98 
 99   static address return_pc() { return _return_pc; }
100   static address return_pc_address() { return (address)&_return_pc; }
101 
102  public:
103   static size_t size() { return align_up((int)sizeof(ContinuationEntry), 2*wordSize); }
104 
105   ContinuationEntry* parent() const { return _parent; }
106   int64_t parent_held_monitor_count() const { return (int64_t)_parent_held_monitor_count; }
107 
108   static address entry_pc() { return _return_pc; }
109   intptr_t* entry_sp() const { return (intptr_t*)this; }
110   intptr_t* entry_fp() const;
111 
112   static address thaw_call_pc_address() { return (address)&_thaw_call_pc; }
113   static address cleanup_pc() { return _cleanup_pc; }
114 
115   static address compiled_entry();
116   static address interpreted_entry();
117 





118   int argsize() const { return _argsize; }
119   void set_argsize(int value) { _argsize = value; }
120 
121   bool is_pinned() { return _pin_count > 0; }
122   bool pin() {
123     if (_pin_count == UINT32_MAX) return false;
124     _pin_count++;
125     return true;
126   }
127   bool unpin() {
128     if (_pin_count == 0) return false;
129     _pin_count--;
130     return true;
131   }
132 
133   intptr_t* parent_cont_fastpath() const { return _parent_cont_fastpath; }
134   void set_parent_cont_fastpath(intptr_t* x) { _parent_cont_fastpath = x; }
135 
136   static ContinuationEntry* from_frame(const frame& f);
137   frame to_frame() const;
138   void update_register_map(RegisterMap* map) const;
139   void flush_stack_processing(JavaThread* thread) const;
140 
141   inline intptr_t* bottom_sender_sp() const;
142   inline oop cont_oop(const JavaThread* thread) const;
143   inline oop scope(const JavaThread* thread) const;
144   inline static oop cont_oop_or_null(const ContinuationEntry* ce, const JavaThread* thread);
145 
146   oop* cont_addr() { return (oop*)&_cont; }
147   oop* chunk_addr() { return (oop*)&_chunk; }
148 
149   bool is_virtual_thread() const { return _flags != 0; }
150 
151 #ifndef PRODUCT
152   void describe(FrameValues& values, int frame_no) const;
153 #endif
154 
155 #ifdef ASSERT
156   static bool assert_entry_frame_laid_out(JavaThread* thread);
157 #endif
158 };
159 
160 #endif // SHARE_VM_RUNTIME_CONTINUATIONENTRY_HPP

 44 #ifdef ASSERT
 45  private:
 46   static const int COOKIE_VALUE = 0x1234;
 47   int cookie;
 48 
 49  public:
 50   static int cookie_value() { return COOKIE_VALUE; }
 51   static ByteSize cookie_offset() { return byte_offset_of(ContinuationEntry, cookie); }
 52 
 53   void verify_cookie() {
 54     assert(cookie == COOKIE_VALUE, "Bad cookie: %#x, expected: %#x", cookie, COOKIE_VALUE);
 55   }
 56 #endif
 57 
 58  public:
 59   static int _return_pc_offset; // friend gen_continuation_enter
 60   static int _thaw_call_pc_offset;
 61   static int _cleanup_offset;
 62 
 63   static void set_enter_code(nmethod* nm, int interpreted_entry_offset);
 64   static void set_yield_code(nmethod* nm);
 65   static bool is_interpreted_call(address call_address);
 66 
 67  private:
 68   static address _return_pc;
 69   static address _thaw_call_pc;
 70   static address _cleanup_pc;
 71   static nmethod* _enter_special;
 72   static nmethod* _do_yield;
 73   static int _interpreted_entry_offset;
 74 
 75  private:
 76   ContinuationEntry* _parent;
 77   oopDesc* _cont;
 78   oopDesc* _chunk;
 79   int _flags;
 80   // Size in words of the stack arguments of the bottom frame on stack if compiled 0 otherwise.
 81   // The caller (if there is one) is the still frozen top frame in the StackChunk.
 82   int _argsize;
 83   intptr_t* _parent_cont_fastpath;
 84 #ifdef _LP64
 85   int64_t   _parent_held_monitor_count;
 86 #else
 87   int32_t   _parent_held_monitor_count;
 88 #endif
 89   uint32_t _pin_count;
 90 
 91  public:
 92   static ByteSize parent_offset()   { return byte_offset_of(ContinuationEntry, _parent); }

100 
101   static address return_pc() { return _return_pc; }
102   static address return_pc_address() { return (address)&_return_pc; }
103 
104  public:
105   static size_t size() { return align_up((int)sizeof(ContinuationEntry), 2*wordSize); }
106 
107   ContinuationEntry* parent() const { return _parent; }
108   int64_t parent_held_monitor_count() const { return (int64_t)_parent_held_monitor_count; }
109 
110   static address entry_pc() { return _return_pc; }
111   intptr_t* entry_sp() const { return (intptr_t*)this; }
112   intptr_t* entry_fp() const;
113 
114   static address thaw_call_pc_address() { return (address)&_thaw_call_pc; }
115   static address cleanup_pc() { return _cleanup_pc; }
116 
117   static address compiled_entry();
118   static address interpreted_entry();
119 
120   static nmethod* do_yield_nmethod() {
121     assert(_do_yield != nullptr, "oops");
122     return _do_yield;
123   }
124 
125   int argsize() const { return _argsize; }
126   void set_argsize(int value) { _argsize = value; }
127 
128   bool is_pinned() { return _pin_count > 0; }
129   bool pin() {
130     if (_pin_count == UINT32_MAX) return false;
131     _pin_count++;
132     return true;
133   }
134   bool unpin() {
135     if (_pin_count == 0) return false;
136     _pin_count--;
137     return true;
138   }
139 
140   intptr_t* parent_cont_fastpath() const { return _parent_cont_fastpath; }
141   void set_parent_cont_fastpath(intptr_t* x) { _parent_cont_fastpath = x; }
142 
143   static ContinuationEntry* from_frame(const frame& f);
144   frame to_frame() const;
145   void update_register_map(RegisterMap* map) const;
146   void flush_stack_processing(JavaThread* thread) const;
147 
148   inline intptr_t* bottom_sender_sp() const;
149   inline oop cont_oop(const JavaThread* thread) const;
150   inline oop scope(const JavaThread* thread) const;
151   inline static oop cont_oop_or_null(const ContinuationEntry* ce, const JavaThread* thread);
152 
153   oop* cont_addr() { return (oop*)&_cont; }
154   oop* chunk_addr() { return (oop*)&_chunk; }
155 
156   bool is_virtual_thread() const { return _flags != 0; }
157 
158 #ifndef PRODUCT
159   void describe(FrameValues& values, int frame_no) const;
160 #endif
161 
162 #ifdef ASSERT
163   static bool assert_entry_frame_laid_out(JavaThread* thread, bool preempted = false);
164 #endif
165 };
166 
167 #endif // SHARE_VM_RUNTIME_CONTINUATIONENTRY_HPP
< prev index next >