1 /*
  2  * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "jvmtifiles/jvmtiEnv.hpp"
 27 #include "memory/resourceArea.hpp"
 28 #include "prims/jvmtiEventController.inline.hpp"
 29 #include "prims/jvmtiImpl.hpp"
 30 #include "prims/jvmtiThreadState.inline.hpp"
 31 #include "runtime/safepointVerifiers.hpp"
 32 #include "runtime/vframe.hpp"
 33 
 34 // marker for when the stack depth has been reset and is now unknown.
 35 // any negative number would work but small ones might obscure an
 36 // underrun error.
 37 static const int UNKNOWN_STACK_DEPTH = -99;
 38 
 39 ///////////////////////////////////////////////////////////////
 40 //
 41 // class JvmtiThreadState
 42 //
 43 // Instances of JvmtiThreadState hang off of each thread.
 44 // Thread local storage for JVMTI.
 45 //
 46 
 47 JvmtiThreadState *JvmtiThreadState::_head = NULL;
 48 
 49 JvmtiThreadState::JvmtiThreadState(JavaThread* thread)
 50   : _thread_event_enable() {
 51   assert(JvmtiThreadState_lock->is_locked(), "sanity check");
 52   _thread               = thread;
 53   _exception_state      = ES_CLEARED;
 54   _debuggable           = true;
 55   _hide_single_stepping = false;
 56   _hide_level           = 0;
 57   _pending_step_for_popframe = false;
 58   _class_being_redefined = NULL;
 59   _class_load_kind = jvmti_class_load_kind_load;
 60   _classes_being_redefined = NULL;
 61   _head_env_thread_state = NULL;
 62   _dynamic_code_event_collector = NULL;
 63   _vm_object_alloc_event_collector = NULL;
 64   _sampled_object_alloc_event_collector = NULL;
 65   _the_class_for_redefinition_verification = NULL;
 66   _scratch_class_for_redefinition_verification = NULL;
 67   _cur_stack_depth = UNKNOWN_STACK_DEPTH;
 68 
 69   // JVMTI ForceEarlyReturn support
 70   _pending_step_for_earlyret = false;
 71   _earlyret_state = earlyret_inactive;
 72   _earlyret_tos = ilgl;
 73   _earlyret_value.j = 0L;
 74   _earlyret_oop = NULL;
 75 
 76   _jvmti_event_queue = NULL;
 77 
 78   // add all the JvmtiEnvThreadState to the new JvmtiThreadState
 79   {
 80     JvmtiEnvIterator it;
 81     for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
 82       if (env->is_valid()) {
 83         add_env(env);
 84       }
 85     }
 86   }
 87 
 88   // link us into the list
 89   {
 90     // The thread state list manipulation code must not have safepoints.
 91     // See periodic_clean_up().
 92     debug_only(NoSafepointVerifier nosafepoint;)
 93 
 94     _prev = NULL;
 95     _next = _head;
 96     if (_head != NULL) {
 97       _head->_prev = this;
 98     }
 99     _head = this;
100   }
101 
102   // set this as the state for the thread
103   thread->set_jvmti_thread_state(this);
104 }
105 
106 
107 JvmtiThreadState::~JvmtiThreadState()   {
108   assert(JvmtiThreadState_lock->is_locked(), "sanity check");
109 
110   if (_classes_being_redefined != NULL) {
111     delete _classes_being_redefined; // free the GrowableArray on C heap
112   }
113 
114   // clear this as the state for the thread
115   get_thread()->set_jvmti_thread_state(NULL);
116 
117   // zap our env thread states
118   {
119     JvmtiEnvBase::entering_dying_thread_env_iteration();
120     JvmtiEnvThreadStateIterator it(this);
121     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ) {
122       JvmtiEnvThreadState* zap = ets;
123       ets = it.next(ets);
124       delete zap;
125     }
126     JvmtiEnvBase::leaving_dying_thread_env_iteration();
127   }
128 
129   // remove us from the list
130   {
131     // The thread state list manipulation code must not have safepoints.
132     // See periodic_clean_up().
133     debug_only(NoSafepointVerifier nosafepoint;)
134 
135     if (_prev == NULL) {
136       assert(_head == this, "sanity check");
137       _head = _next;
138     } else {
139       assert(_head != this, "sanity check");
140       _prev->_next = _next;
141     }
142     if (_next != NULL) {
143       _next->_prev = _prev;
144     }
145     _next = NULL;
146     _prev = NULL;
147   }
148 }
149 
150 
151 void
152 JvmtiThreadState::periodic_clean_up() {
153   assert(SafepointSynchronize::is_at_safepoint(), "at safepoint");
154 
155   // This iteration is initialized with "_head" instead of "JvmtiThreadState::first()"
156   // because the latter requires the JvmtiThreadState_lock.
157   // This iteration is safe at a safepoint as well, see the NoSafepointVerifier
158   // asserts at all list manipulation sites.
159   for (JvmtiThreadState *state = _head; state != NULL; state = state->next()) {
160     // For each environment thread state corresponding to an invalid environment
161     // unlink it from the list and deallocate it.
162     JvmtiEnvThreadStateIterator it(state);
163     JvmtiEnvThreadState* previous_ets = NULL;
164     JvmtiEnvThreadState* ets = it.first();
165     while (ets != NULL) {
166       if (ets->get_env()->is_valid()) {
167         previous_ets = ets;
168         ets = it.next(ets);
169       } else {
170         // This one isn't valid, remove it from the list and deallocate it
171         JvmtiEnvThreadState* defunct_ets = ets;
172         ets = ets->next();
173         if (previous_ets == NULL) {
174           assert(state->head_env_thread_state() == defunct_ets, "sanity check");
175           state->set_head_env_thread_state(ets);
176         } else {
177           previous_ets->set_next(ets);
178         }
179         delete defunct_ets;
180       }
181     }
182   }
183 }
184 
185 void JvmtiThreadState::add_env(JvmtiEnvBase *env) {
186   assert(JvmtiThreadState_lock->is_locked(), "sanity check");
187 
188   JvmtiEnvThreadState *new_ets = new JvmtiEnvThreadState(_thread, env);
189   // add this environment thread state to the end of the list (order is important)
190   {
191     // list deallocation (which occurs at a safepoint) cannot occur simultaneously
192     debug_only(NoSafepointVerifier nosafepoint;)
193 
194     JvmtiEnvThreadStateIterator it(this);
195     JvmtiEnvThreadState* previous_ets = NULL;
196     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
197       previous_ets = ets;
198     }
199     if (previous_ets == NULL) {
200       set_head_env_thread_state(new_ets);
201     } else {
202       previous_ets->set_next(new_ets);
203     }
204   }
205 }
206 
207 
208 
209 
210 void JvmtiThreadState::enter_interp_only_mode() {
211   assert(_thread->get_interp_only_mode() == 0, "entering interp only when mode not zero");
212   _thread->increment_interp_only_mode();
213 }
214 
215 
216 void JvmtiThreadState::leave_interp_only_mode() {
217   assert(_thread->get_interp_only_mode() == 1, "leaving interp only when mode not one");
218   _thread->decrement_interp_only_mode();
219 }
220 
221 
222 // Helper routine used in several places
223 int JvmtiThreadState::count_frames() {
224 #ifdef ASSERT
225   Thread *current_thread = Thread::current();
226 #endif
227   assert(SafepointSynchronize::is_at_safepoint() ||
228          get_thread()->is_handshake_safe_for(current_thread),
229          "call by myself / at safepoint / at handshake");
230 
231   if (!get_thread()->has_last_Java_frame()) return 0;  // no Java frames
232 
233   ResourceMark rm;
234   RegisterMap reg_map(get_thread());
235   javaVFrame *jvf = get_thread()->last_java_vframe(&reg_map);
236   int n = 0;
237   while (jvf != NULL) {
238     Method* method = jvf->method();
239     jvf = jvf->java_sender();
240     n++;
241   }
242   return n;
243 }
244 
245 
246 void JvmtiThreadState::invalidate_cur_stack_depth() {
247   assert(SafepointSynchronize::is_at_safepoint() ||
248          get_thread()->is_handshake_safe_for(Thread::current()),
249          "bad synchronization with owner thread");
250 
251   _cur_stack_depth = UNKNOWN_STACK_DEPTH;
252 }
253 
254 void JvmtiThreadState::incr_cur_stack_depth() {
255   guarantee(JavaThread::current() == get_thread(), "must be current thread");
256 
257   if (!is_interp_only_mode()) {
258     _cur_stack_depth = UNKNOWN_STACK_DEPTH;
259   }
260   if (_cur_stack_depth != UNKNOWN_STACK_DEPTH) {
261     ++_cur_stack_depth;
262   }
263 }
264 
265 void JvmtiThreadState::decr_cur_stack_depth() {
266   guarantee(JavaThread::current() == get_thread(), "must be current thread");
267 
268   if (!is_interp_only_mode()) {
269     _cur_stack_depth = UNKNOWN_STACK_DEPTH;
270   }
271   if (_cur_stack_depth != UNKNOWN_STACK_DEPTH) {
272     --_cur_stack_depth;
273     assert(_cur_stack_depth >= 0, "incr/decr_cur_stack_depth mismatch");
274   }
275 }
276 
277 int JvmtiThreadState::cur_stack_depth() {
278   Thread *current = Thread::current();
279   guarantee(get_thread()->is_handshake_safe_for(current),
280             "must be current thread or direct handshake");
281 
282   if (!is_interp_only_mode() || _cur_stack_depth == UNKNOWN_STACK_DEPTH) {
283     _cur_stack_depth = count_frames();
284   } else {
285 #ifdef ASSERT
286     if (EnableJVMTIStackDepthAsserts) {
287       // heavy weight assert
288       jint num_frames = count_frames();
289       assert(_cur_stack_depth == num_frames, "cur_stack_depth out of sync _cur_stack_depth: %d num_frames: %d",
290              _cur_stack_depth, num_frames);
291     }
292 #endif
293   }
294   return _cur_stack_depth;
295 }
296 
297 void JvmtiThreadState::process_pending_step_for_popframe() {
298   // We are single stepping as the last part of the PopFrame() dance
299   // so we have some house keeping to do.
300 
301   JavaThread *thr = get_thread();
302   if (thr->popframe_condition() != JavaThread::popframe_inactive) {
303     // If the popframe_condition field is not popframe_inactive, then
304     // we missed all of the popframe_field cleanup points:
305     //
306     // - unpack_frames() was not called (nothing to deopt)
307     // - remove_activation_preserving_args_entry() was not called
308     //   (did not get suspended in a call_vm() family call and did
309     //   not complete a call_vm() family call on the way here)
310     thr->clear_popframe_condition();
311   }
312 
313   // clearing the flag indicates we are done with the PopFrame() dance
314   clr_pending_step_for_popframe();
315 
316   // If exception was thrown in this frame, need to reset jvmti thread state.
317   // Single stepping may not get enabled correctly by the agent since
318   // exception state is passed in MethodExit event which may be sent at some
319   // time in the future. JDWP agent ignores MethodExit events if caused by
320   // an exception.
321   //
322   if (is_exception_detected()) {
323     clear_exception_state();
324   }
325   // If step is pending for popframe then it may not be
326   // a repeat step. The new_bci and method_id is same as current_bci
327   // and current method_id after pop and step for recursive calls.
328   // Force the step by clearing the last location.
329   JvmtiEnvThreadStateIterator it(this);
330   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
331     ets->clear_current_location();
332   }
333 }
334 
335 
336 // Class:     JvmtiThreadState
337 // Function:  update_for_pop_top_frame
338 // Description:
339 //   This function removes any frame pop notification request for
340 //   the top frame and invalidates both the current stack depth and
341 //   all cached frameIDs.
342 //
343 // Called by: PopFrame
344 //
345 void JvmtiThreadState::update_for_pop_top_frame() {
346   if (is_interp_only_mode()) {
347     // remove any frame pop notification request for the top frame
348     // in any environment
349     int popframe_number = cur_stack_depth();
350     {
351       JvmtiEnvThreadStateIterator it(this);
352       for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
353         if (ets->is_frame_pop(popframe_number)) {
354           ets->clear_frame_pop(popframe_number);
355         }
356       }
357     }
358     // force stack depth to be recalculated
359     invalidate_cur_stack_depth();
360   } else {
361     assert(!is_enabled(JVMTI_EVENT_FRAME_POP), "Must have no framepops set");
362   }
363 }
364 
365 
366 void JvmtiThreadState::process_pending_step_for_earlyret() {
367   // We are single stepping as the last part of the ForceEarlyReturn
368   // dance so we have some house keeping to do.
369 
370   if (is_earlyret_pending()) {
371     // If the earlyret_state field is not earlyret_inactive, then
372     // we missed all of the earlyret_field cleanup points:
373     //
374     // - remove_activation() was not called
375     //   (did not get suspended in a call_vm() family call and did
376     //   not complete a call_vm() family call on the way here)
377     //
378     // One legitimate way for us to miss all the cleanup points is
379     // if we got here right after handling a compiled return. If that
380     // is the case, then we consider our return from compiled code to
381     // complete the ForceEarlyReturn request and we clear the condition.
382     clr_earlyret_pending();
383     set_earlyret_oop(NULL);
384     clr_earlyret_value();
385   }
386 
387   // clearing the flag indicates we are done with
388   // the ForceEarlyReturn() dance
389   clr_pending_step_for_earlyret();
390 
391   // If exception was thrown in this frame, need to reset jvmti thread state.
392   // Single stepping may not get enabled correctly by the agent since
393   // exception state is passed in MethodExit event which may be sent at some
394   // time in the future. JDWP agent ignores MethodExit events if caused by
395   // an exception.
396   //
397   if (is_exception_detected()) {
398     clear_exception_state();
399   }
400   // If step is pending for earlyret then it may not be a repeat step.
401   // The new_bci and method_id is same as current_bci and current
402   // method_id after earlyret and step for recursive calls.
403   // Force the step by clearing the last location.
404   JvmtiEnvThreadStateIterator it(this);
405   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
406     ets->clear_current_location();
407   }
408 }
409 
410 void JvmtiThreadState::oops_do(OopClosure* f, CodeBlobClosure* cf) {
411   f->do_oop((oop*) &_earlyret_oop);
412 
413   // Keep nmethods from unloading on the event queue
414   if (_jvmti_event_queue != NULL) {
415     _jvmti_event_queue->oops_do(f, cf);
416   }
417 }
418 
419 void JvmtiThreadState::nmethods_do(CodeBlobClosure* cf) {
420   // Keep nmethods from unloading on the event queue
421   if (_jvmti_event_queue != NULL) {
422     _jvmti_event_queue->nmethods_do(cf);
423   }
424 }
425 
426 // Thread local event queue.
427 void JvmtiThreadState::enqueue_event(JvmtiDeferredEvent* event) {
428   if (_jvmti_event_queue == NULL) {
429     _jvmti_event_queue = new JvmtiDeferredEventQueue();
430   }
431   // copy the event
432   _jvmti_event_queue->enqueue(*event);
433 }
434 
435 void JvmtiThreadState::post_events(JvmtiEnv* env) {
436   if (_jvmti_event_queue != NULL) {
437     _jvmti_event_queue->post(env);  // deletes each queue node
438     delete _jvmti_event_queue;
439     _jvmti_event_queue = NULL;
440   }
441 }
442 
443 void JvmtiThreadState::run_nmethod_entry_barriers() {
444   if (_jvmti_event_queue != NULL) {
445     _jvmti_event_queue->run_nmethod_entry_barriers();
446   }
447 }