1 /*
  2  * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "classfile/classLoaderData.hpp"
 26 #include "gc/shared/gc_globals.hpp"
 27 #include "gc/z/zAddress.inline.hpp"
 28 #include "gc/z/zHeap.inline.hpp"
 29 #include "gc/z/zNMethod.hpp"
 30 #include "gc/z/zOop.hpp"
 31 #include "gc/z/zPageAllocator.hpp"
 32 #include "gc/z/zResurrection.hpp"
 33 #include "gc/z/zRootsIterator.hpp"
 34 #include "gc/z/zStackWatermark.hpp"
 35 #include "gc/z/zStat.hpp"
 36 #include "gc/z/zVerify.hpp"
 37 #include "memory/iterator.inline.hpp"
 38 #include "memory/resourceArea.hpp"
 39 #include "oops/oop.hpp"
 40 #include "runtime/frame.inline.hpp"
 41 #include "runtime/globals.hpp"
 42 #include "runtime/handles.hpp"
 43 #include "runtime/safepoint.hpp"
 44 #include "runtime/stackFrameStream.inline.hpp"
 45 #include "runtime/stackWatermark.inline.hpp"
 46 #include "runtime/stackWatermarkSet.inline.hpp"
 47 #include "runtime/thread.hpp"
 48 #include "utilities/debug.hpp"
 49 #include "utilities/globalDefinitions.hpp"
 50 #include "utilities/preserveException.hpp"
 51 
 52 #define BAD_OOP_ARG(o, p)   "Bad oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(o), p2i(p)
 53 
 54 static void z_verify_oop(oop* p) {
 55   const oop o = RawAccess<>::oop_load(p);
 56   if (o != NULL) {
 57     const uintptr_t addr = ZOop::to_address(o);
 58     guarantee(ZAddress::is_good(addr), BAD_OOP_ARG(o, p));
 59     guarantee(oopDesc::is_oop(ZOop::from_address(addr)), BAD_OOP_ARG(o, p));
 60   }
 61 }
 62 
 63 static void z_verify_possibly_weak_oop(oop* p) {
 64   const oop o = RawAccess<>::oop_load(p);
 65   if (o != NULL) {
 66     const uintptr_t addr = ZOop::to_address(o);
 67     guarantee(ZAddress::is_good(addr) || ZAddress::is_finalizable_good(addr), BAD_OOP_ARG(o, p));
 68     guarantee(oopDesc::is_oop(ZOop::from_address(ZAddress::good(addr))), BAD_OOP_ARG(o, p));
 69   }
 70 }
 71 
 72 class ZVerifyRootClosure : public OopClosure {
 73 private:
 74   const bool _verify_fixed;
 75 
 76 public:
 77   ZVerifyRootClosure(bool verify_fixed) :
 78       _verify_fixed(verify_fixed) {}
 79 
 80   virtual void do_oop(oop* p) {
 81     if (_verify_fixed) {
 82       z_verify_oop(p);
 83     } else {
 84       // Don't know the state of the oop.
 85       oop obj = *p;
 86       obj = NativeAccess<AS_NO_KEEPALIVE>::oop_load(&obj);
 87       z_verify_oop(&obj);
 88     }
 89   }
 90 
 91   virtual void do_oop(narrowOop*) {
 92     ShouldNotReachHere();
 93   }
 94 
 95   bool verify_fixed() const {
 96     return _verify_fixed;
 97   }
 98 };
 99 
100 class ZVerifyCodeBlobClosure : public CodeBlobToOopClosure {
101 public:
102   ZVerifyCodeBlobClosure(ZVerifyRootClosure* _cl) :
103       CodeBlobToOopClosure(_cl, false /* fix_relocations */) {}
104 
105   virtual void do_code_blob(CodeBlob* cb) {
106     CodeBlobToOopClosure::do_code_blob(cb);
107   }
108 };
109 
110 class ZVerifyStack : public OopClosure {
111 private:
112   ZVerifyRootClosure* const _cl;
113   JavaThread*         const _jt;
114   uint64_t                  _last_good;
115   bool                      _verifying_bad_frames;
116 
117 public:
118   ZVerifyStack(ZVerifyRootClosure* cl, JavaThread* jt) :
119       _cl(cl),
120       _jt(jt),
121       _last_good(0),
122       _verifying_bad_frames(false) {
123     ZStackWatermark* const stack_watermark = StackWatermarkSet::get<ZStackWatermark>(jt, StackWatermarkKind::gc);
124 
125     if (_cl->verify_fixed()) {
126       assert(stack_watermark->processing_started(), "Should already have been fixed");
127       assert(stack_watermark->processing_completed(), "Should already have been fixed");
128     } else {
129       // We don't really know the state of the stack, verify watermark.
130       if (!stack_watermark->processing_started()) {
131         _verifying_bad_frames = true;
132       } else {
133         // Not time yet to verify bad frames
134         _last_good = stack_watermark->last_processed();
135       }
136     }
137   }
138 
139   void do_oop(oop* p) {
140     if (_verifying_bad_frames) {
141       const oop obj = *p;
142       guarantee(!ZAddress::is_good(ZOop::to_address(obj)), BAD_OOP_ARG(obj, p));
143     }
144     _cl->do_oop(p);
145   }
146 
147   void do_oop(narrowOop* p) {
148     ShouldNotReachHere();
149   }
150 
151   void prepare_next_frame(frame& frame) {
152     if (_cl->verify_fixed()) {
153       // All frames need to be good
154       return;
155     }
156 
157     // The verification has two modes, depending on whether we have reached the
158     // last processed frame or not. Before it is reached, we expect everything to
159     // be good. After reaching it, we expect everything to be bad.
160     const uintptr_t sp = reinterpret_cast<uintptr_t>(frame.sp());
161 
162     if (!_verifying_bad_frames && sp == _last_good) {
163       // Found the last good frame, now verify the bad ones
164       _verifying_bad_frames = true;
165     }
166   }
167 
168   void verify_frames() {
169     ZVerifyCodeBlobClosure cb_cl(_cl);
170     for (StackFrameStream frames(_jt, true /* update */, false /* process_frames */);
171          !frames.is_done();
172          frames.next()) {
173       frame& frame = *frames.current();
174       frame.oops_do(this, &cb_cl, frames.register_map(), DerivedPointerIterationMode::_ignore);
175       prepare_next_frame(frame);
176     }
177   }
178 };
179 
180 class ZVerifyOopClosure : public ClaimMetadataVisitingOopIterateClosure {
181 private:
182   const bool _verify_weaks;
183 
184 public:
185   ZVerifyOopClosure(bool verify_weaks) :
186       ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other),
187       _verify_weaks(verify_weaks) {}
188 
189   virtual void do_oop(oop* p) {
190     if (_verify_weaks) {
191       z_verify_possibly_weak_oop(p);
192     } else {
193       // We should never encounter finalizable oops through strong
194       // paths. This assumes we have only visited strong roots.
195       z_verify_oop(p);
196     }
197   }
198 
199   virtual void do_oop(narrowOop* p) {
200     ShouldNotReachHere();
201   }
202 
203   virtual ReferenceIterationMode reference_iteration_mode() {
204     return _verify_weaks ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
205   }
206 
207   virtual void do_method(Method* m) {}
208   virtual void do_nmethod(nmethod* nm) {}
209 };
210 
211 typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_none> ZVerifyCLDClosure;
212 
213 class ZVerifyThreadClosure : public ThreadClosure {
214 private:
215   ZVerifyRootClosure* const _cl;
216 
217 public:
218   ZVerifyThreadClosure(ZVerifyRootClosure* cl) :
219       _cl(cl) {}
220 
221   virtual void do_thread(Thread* thread) {
222     thread->oops_do_no_frames(_cl, NULL);
223 
224     JavaThread* const jt = JavaThread::cast(thread);
225     if (!jt->has_last_Java_frame()) {
226       return;
227     }
228 
229     ZVerifyStack verify_stack(_cl, jt);
230     verify_stack.verify_frames();
231   }
232 };
233 
234 class ZVerifyNMethodClosure : public NMethodClosure {
235 private:
236   OopClosure* const        _cl;
237   BarrierSetNMethod* const _bs_nm;
238   const bool               _verify_fixed;
239 
240   bool trust_nmethod_state() const {
241     // The root iterator will visit non-processed
242     // nmethods class unloading is turned off.
243     return ClassUnloading || _verify_fixed;
244   }
245 
246 public:
247   ZVerifyNMethodClosure(OopClosure* cl, bool verify_fixed) :
248       _cl(cl),
249       _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()),
250       _verify_fixed(verify_fixed) {}
251 
252   virtual void do_nmethod(nmethod* nm) {
253     assert(!trust_nmethod_state() || !_bs_nm->is_armed(nm), "Should not encounter any armed nmethods");
254 
255     ZNMethod::nmethod_oops_do(nm, _cl);
256   }
257 };
258 
259 void ZVerify::roots_strong(bool verify_fixed) {
260   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
261   assert(!ZResurrection::is_blocked(), "Invalid phase");
262 
263   ZVerifyRootClosure cl(verify_fixed);
264   ZVerifyCLDClosure cld_cl(&cl);
265   ZVerifyThreadClosure thread_cl(&cl);
266   ZVerifyNMethodClosure nm_cl(&cl, verify_fixed);
267 
268   ZRootsIterator iter(ClassLoaderData::_claim_none);
269   iter.apply(&cl,
270              &cld_cl,
271              &thread_cl,
272              &nm_cl);
273 }
274 
275 void ZVerify::roots_weak() {
276   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
277   assert(!ZResurrection::is_blocked(), "Invalid phase");
278 
279   ZVerifyRootClosure cl(true /* verify_fixed */);
280   ZWeakRootsIterator iter;
281   iter.apply(&cl);
282 }
283 
284 void ZVerify::objects(bool verify_weaks) {
285   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
286   assert(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
287   assert(!ZResurrection::is_blocked(), "Invalid phase");
288 
289   ZVerifyOopClosure cl(verify_weaks);
290   ObjectToOopClosure object_cl(&cl);
291   ZHeap::heap()->object_iterate(&object_cl, verify_weaks);
292 }
293 
294 void ZVerify::before_zoperation() {
295   // Verify strong roots
296   ZStatTimerDisable disable;
297   if (ZVerifyRoots) {
298     roots_strong(false /* verify_fixed */);
299   }
300 }
301 
302 void ZVerify::after_mark() {
303   // Verify all strong roots and strong references
304   ZStatTimerDisable disable;
305   if (ZVerifyRoots) {
306     roots_strong(true /* verify_fixed */);
307   }
308   if (ZVerifyObjects) {
309     objects(false /* verify_weaks */);
310   }
311 }
312 
313 void ZVerify::after_weak_processing() {
314   // Verify all roots and all references
315   ZStatTimerDisable disable;
316   if (ZVerifyRoots) {
317     roots_strong(true /* verify_fixed */);
318     roots_weak();
319   }
320   if (ZVerifyObjects) {
321     objects(true /* verify_weaks */);
322   }
323 }
324 
325 template <bool Map>
326 class ZPageDebugMapOrUnmapClosure : public ZPageClosure {
327 private:
328   const ZPageAllocator* const _allocator;
329 
330 public:
331   ZPageDebugMapOrUnmapClosure(const ZPageAllocator* allocator) :
332       _allocator(allocator) {}
333 
334   void do_page(const ZPage* page) {
335     if (Map) {
336       _allocator->debug_map_page(page);
337     } else {
338       _allocator->debug_unmap_page(page);
339     }
340   }
341 };
342 
343 ZVerifyViewsFlip::ZVerifyViewsFlip(const ZPageAllocator* allocator) :
344     _allocator(allocator) {
345   if (ZVerifyViews) {
346     // Unmap all pages
347     ZPageDebugMapOrUnmapClosure<false /* Map */> cl(_allocator);
348     ZHeap::heap()->pages_do(&cl);
349   }
350 }
351 
352 ZVerifyViewsFlip::~ZVerifyViewsFlip() {
353   if (ZVerifyViews) {
354     // Map all pages
355     ZPageDebugMapOrUnmapClosure<true /* Map */> cl(_allocator);
356     ZHeap::heap()->pages_do(&cl);
357   }
358 }
359 
360 #ifdef ASSERT
361 
362 class ZVerifyBadOopClosure : public OopClosure {
363 public:
364   virtual void do_oop(oop* p) {
365     const oop o = *p;
366     assert(!ZAddress::is_good(ZOop::to_address(o)), "Should not be good: " PTR_FORMAT, p2i(o));
367   }
368 
369   virtual void do_oop(narrowOop* p) {
370     ShouldNotReachHere();
371   }
372 };
373 
374 // This class encapsulates various marks we need to deal with calling the
375 // frame iteration code from arbitrary points in the runtime. It is mostly
376 // due to problems that we might want to eventually clean up inside of the
377 // frame iteration code, such as creating random handles even though there
378 // is no safepoint to protect against, and fiddling around with exceptions.
379 class StackWatermarkProcessingMark {
380   ResetNoHandleMark     _rnhm;
381   HandleMark            _hm;
382   PreserveExceptionMark _pem;
383   ResourceMark          _rm;
384 
385 public:
386   StackWatermarkProcessingMark(Thread* thread) :
387       _rnhm(),
388       _hm(thread),
389       _pem(thread),
390       _rm(thread) {}
391 };
392 
393 void ZVerify::verify_frame_bad(const frame& fr, RegisterMap& register_map) {
394   ZVerifyBadOopClosure verify_cl;
395   fr.oops_do(&verify_cl, NULL, &register_map, DerivedPointerIterationMode::_ignore);
396 }
397 
398 void ZVerify::verify_thread_head_bad(JavaThread* jt) {
399   ZVerifyBadOopClosure verify_cl;
400   jt->oops_do_no_frames(&verify_cl, NULL);
401 }
402 
403 void ZVerify::verify_thread_frames_bad(JavaThread* jt) {
404   if (jt->has_last_Java_frame()) {
405     ZVerifyBadOopClosure verify_cl;
406     StackWatermarkProcessingMark swpm(Thread::current());
407     // Traverse the execution stack
408     for (StackFrameStream fst(jt, true /* update */, false /* process_frames */); !fst.is_done(); fst.next()) {
409       fst.current()->oops_do(&verify_cl, NULL /* code_cl */, fst.register_map(), DerivedPointerIterationMode::_ignore);
410     }
411   }
412 }
413 
414 #endif // ASSERT