27 #include "classfile/vmSymbols.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/nmethod.hpp"
31 #include "code/pcDesc.hpp"
32 #include "code/scopeDesc.hpp"
33 #include "code/vtableStubs.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/g1/g1HeapRegion.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "interpreter/bytecode.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/linkResolver.hpp"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/oopFactory.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/objArrayKlass.hpp"
48 #include "oops/klass.inline.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "oops/typeArrayOop.inline.hpp"
51 #include "opto/ad.hpp"
52 #include "opto/addnode.hpp"
53 #include "opto/callnode.hpp"
54 #include "opto/cfgnode.hpp"
55 #include "opto/graphKit.hpp"
56 #include "opto/machnode.hpp"
57 #include "opto/matcher.hpp"
58 #include "opto/memnode.hpp"
59 #include "opto/mulnode.hpp"
60 #include "opto/output.hpp"
61 #include "opto/runtime.hpp"
62 #include "opto/subnode.hpp"
63 #include "prims/jvmtiExport.hpp"
64 #include "runtime/atomic.hpp"
65 #include "runtime/frame.inline.hpp"
66 #include "runtime/handles.inline.hpp"
227 oopDesc* dest, jint dest_pos,
228 jint length, JavaThread* thread) {
229 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread);
230 }
231
232 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
233 SharedRuntime::complete_monitor_locking_C(obj, lock, current);
234 }
235
236
237 //=============================================================================
238 // Opto compiler runtime routines
239 //=============================================================================
240
241
242 //=============================allocation======================================
243 // We failed the fast-path allocation. Now we need to do a scavenge or GC
244 // and try allocation again.
245
246 // object allocation
247 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current))
248 JRT_BLOCK;
249 #ifndef PRODUCT
250 SharedRuntime::_new_instance_ctr++; // new instance requires GC
251 #endif
252 assert(check_compiled_frame(current), "incorrect caller");
253
254 // These checks are cheap to make and support reflective allocation.
255 int lh = klass->layout_helper();
256 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
257 Handle holder(current, klass->klass_holder()); // keep the klass alive
258 klass->check_valid_for_instantiation(false, THREAD);
259 if (!HAS_PENDING_EXCEPTION) {
260 InstanceKlass::cast(klass)->initialize(THREAD);
261 }
262 }
263
264 if (!HAS_PENDING_EXCEPTION) {
265 // Scavenge and allocate an instance.
266 Handle holder(current, klass->klass_holder()); // keep the klass alive
267 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
268 current->set_vm_result(result);
269
270 // Pass oops back through thread local storage. Our apparent type to Java
271 // is that we return an oop, but we can block on exit from this routine and
272 // a GC can trash the oop in C's return register. The generated stub will
273 // fetch the oop from TLS after any possible GC.
274 }
275
276 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
277 JRT_BLOCK_END;
278
279 // inform GC that we won't do card marks for initializing writes.
280 SharedRuntime::on_slowpath_allocation_exit(current);
281 JRT_END
282
283
284 // array allocation
285 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current))
286 JRT_BLOCK;
287 #ifndef PRODUCT
288 SharedRuntime::_new_array_ctr++; // new array requires GC
289 #endif
290 assert(check_compiled_frame(current), "incorrect caller");
291
292 // Scavenge and allocate an instance.
293 oop result;
294
295 if (array_type->is_typeArray_klass()) {
296 // The oopFactory likes to work with the element type.
297 // (We could bypass the oopFactory, since it doesn't add much value.)
298 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
299 result = oopFactory::new_typeArray(elem_type, len, THREAD);
300 } else {
301 // Although the oopFactory likes to work with the elem_type,
302 // the compiler prefers the array_type, since it must already have
303 // that latter value in hand for the fast path.
304 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
305 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass();
306 result = oopFactory::new_objArray(elem_type, len, THREAD);
307 }
308
309 // Pass oops back through thread local storage. Our apparent type to Java
310 // is that we return an oop, but we can block on exit from this routine and
311 // a GC can trash the oop in C's return register. The generated stub will
312 // fetch the oop from TLS after any possible GC.
313 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
314 current->set_vm_result(result);
315 JRT_BLOCK_END;
316
317 // inform GC that we won't do card marks for initializing writes.
318 SharedRuntime::on_slowpath_allocation_exit(current);
319 JRT_END
320
321 // array allocation without zeroing
322 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
323 JRT_BLOCK;
324 #ifndef PRODUCT
325 SharedRuntime::_new_array_ctr++; // new array requires GC
326 #endif
483 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
484
485 if (!SafepointSynchronize::is_synchronizing() ) {
486 if (ObjectSynchronizer::quick_notify(obj, current, true)) {
487 return;
488 }
489 }
490
491 // This is the case the fast-path above isn't provisioned to handle.
492 // The fast-path is designed to handle frequently arising cases in an efficient manner.
493 // (The fast-path is just a degenerate variant of the slow-path).
494 // Perform the dreaded state transition and pass control into the slow-path.
495 JRT_BLOCK;
496 Handle h_obj(current, obj);
497 ObjectSynchronizer::notifyall(h_obj, CHECK);
498 JRT_BLOCK_END;
499 JRT_END
500
501 const TypeFunc *OptoRuntime::new_instance_Type() {
502 // create input type (domain)
503 const Type **fields = TypeTuple::fields(1);
504 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
505 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
506
507 // create result type (range)
508 fields = TypeTuple::fields(1);
509 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
510
511 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
512
513 return TypeFunc::make(domain, range);
514 }
515
516 #if INCLUDE_JVMTI
517 const TypeFunc *OptoRuntime::notify_jvmti_vthread_Type() {
518 // create input type (domain)
519 const Type **fields = TypeTuple::fields(2);
520 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop
521 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean
522 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
523
524 // no result type needed
525 fields = TypeTuple::fields(1);
623 fields = TypeTuple::fields(0);
624 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
625
626 return TypeFunc::make(domain, range);
627 }
628
629 //-----------------------------------------------------------------------------
630 // Monitor Handling
631 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() {
632 // create input type (domain)
633 const Type **fields = TypeTuple::fields(2);
634 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
635 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
636 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
637
638 // create result type (range)
639 fields = TypeTuple::fields(0);
640
641 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
642
643 return TypeFunc::make(domain,range);
644 }
645
646 const TypeFunc *OptoRuntime::complete_monitor_locking_Type() {
647 return complete_monitor_enter_Type();
648 }
649
650 //-----------------------------------------------------------------------------
651 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() {
652 // create input type (domain)
653 const Type **fields = TypeTuple::fields(3);
654 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
655 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
656 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
657 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
658
659 // create result type (range)
660 fields = TypeTuple::fields(0);
661
662 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
663
1786 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1787 frame caller_frame = stub_frame.sender(®_map);
1788
1789 // Deoptimize the caller frame.
1790 Deoptimization::deoptimize_frame(thread, caller_frame.id());
1791 }
1792
1793
1794 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
1795 // Called from within the owner thread, so no need for safepoint
1796 RegisterMap reg_map(thread,
1797 RegisterMap::UpdateMap::include,
1798 RegisterMap::ProcessFrames::include,
1799 RegisterMap::WalkContinuation::skip);
1800 frame stub_frame = thread->last_frame();
1801 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1802 frame caller_frame = stub_frame.sender(®_map);
1803 return caller_frame.is_deoptimized_frame();
1804 }
1805
1806
1807 const TypeFunc *OptoRuntime::register_finalizer_Type() {
1808 // create input type (domain)
1809 const Type **fields = TypeTuple::fields(1);
1810 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
1811 // // The JavaThread* is passed to each routine as the last argument
1812 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
1813 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1814
1815 // create result type (range)
1816 fields = TypeTuple::fields(0);
1817
1818 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1819
1820 return TypeFunc::make(domain,range);
1821 }
1822
1823 #if INCLUDE_JFR
1824 const TypeFunc *OptoRuntime::class_id_load_barrier_Type() {
1825 // create input type (domain)
1826 const Type **fields = TypeTuple::fields(1);
1827 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
1828 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
1829
1830 // create result type (range)
1831 fields = TypeTuple::fields(0);
1832
1833 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
1834
1835 return TypeFunc::make(domain,range);
1836 }
1837 #endif
1838
1839 //-----------------------------------------------------------------------------
1840 // Dtrace support. entry and exit probes have the same signature
1841 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() {
1842 // create input type (domain)
1843 const Type **fields = TypeTuple::fields(2);
1844 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1845 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
1846 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1847
1848 // create result type (range)
1849 fields = TypeTuple::fields(0);
1850
1851 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1852
1853 return TypeFunc::make(domain,range);
1854 }
1855
1856 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() {
1857 // create input type (domain)
1858 const Type **fields = TypeTuple::fields(2);
1859 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1860 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
1861
1862 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1863
1864 // create result type (range)
1865 fields = TypeTuple::fields(0);
1866
1867 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1868
1869 return TypeFunc::make(domain,range);
1870 }
1871
1872
1873 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
1874 assert(oopDesc::is_oop(obj), "must be a valid oop");
1875 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1876 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1877 JRT_END
1878
1879 //-----------------------------------------------------------------------------
1880
1881 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
1882
1883 //
1884 // dump the collected NamedCounters.
1885 //
1886 void OptoRuntime::print_named_counters() {
1887 int total_lock_count = 0;
1888 int eliminated_lock_count = 0;
1889
1959 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
1960 trace_exception_counter++;
1961 stringStream tempst;
1962
1963 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
1964 exception_oop->print_value_on(&tempst);
1965 tempst.print(" in ");
1966 CodeBlob* blob = CodeCache::find_blob(exception_pc);
1967 if (blob->is_nmethod()) {
1968 blob->as_nmethod()->method()->print_value_on(&tempst);
1969 } else if (blob->is_runtime_stub()) {
1970 tempst.print("<runtime-stub>");
1971 } else {
1972 tempst.print("<unknown>");
1973 }
1974 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
1975 tempst.print("]");
1976
1977 st->print_raw_cr(tempst.freeze());
1978 }
|
27 #include "classfile/vmSymbols.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/nmethod.hpp"
31 #include "code/pcDesc.hpp"
32 #include "code/scopeDesc.hpp"
33 #include "code/vtableStubs.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/g1/g1HeapRegion.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "interpreter/bytecode.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/linkResolver.hpp"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/oopFactory.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/flatArrayKlass.hpp"
48 #include "oops/flatArrayOop.inline.hpp"
49 #include "oops/objArrayKlass.hpp"
50 #include "oops/klass.inline.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "oops/typeArrayOop.inline.hpp"
53 #include "opto/ad.hpp"
54 #include "opto/addnode.hpp"
55 #include "opto/callnode.hpp"
56 #include "opto/cfgnode.hpp"
57 #include "opto/graphKit.hpp"
58 #include "opto/machnode.hpp"
59 #include "opto/matcher.hpp"
60 #include "opto/memnode.hpp"
61 #include "opto/mulnode.hpp"
62 #include "opto/output.hpp"
63 #include "opto/runtime.hpp"
64 #include "opto/subnode.hpp"
65 #include "prims/jvmtiExport.hpp"
66 #include "runtime/atomic.hpp"
67 #include "runtime/frame.inline.hpp"
68 #include "runtime/handles.inline.hpp"
229 oopDesc* dest, jint dest_pos,
230 jint length, JavaThread* thread) {
231 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread);
232 }
233
234 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
235 SharedRuntime::complete_monitor_locking_C(obj, lock, current);
236 }
237
238
239 //=============================================================================
240 // Opto compiler runtime routines
241 //=============================================================================
242
243
244 //=============================allocation======================================
245 // We failed the fast-path allocation. Now we need to do a scavenge or GC
246 // and try allocation again.
247
248 // object allocation
249 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, bool is_larval, JavaThread* current))
250 JRT_BLOCK;
251 #ifndef PRODUCT
252 SharedRuntime::_new_instance_ctr++; // new instance requires GC
253 #endif
254 assert(check_compiled_frame(current), "incorrect caller");
255
256 // These checks are cheap to make and support reflective allocation.
257 int lh = klass->layout_helper();
258 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
259 Handle holder(current, klass->klass_holder()); // keep the klass alive
260 klass->check_valid_for_instantiation(false, THREAD);
261 if (!HAS_PENDING_EXCEPTION) {
262 InstanceKlass::cast(klass)->initialize(THREAD);
263 }
264 }
265
266 if (!HAS_PENDING_EXCEPTION) {
267 // Scavenge and allocate an instance.
268 Handle holder(current, klass->klass_holder()); // keep the klass alive
269 instanceOop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
270 if (is_larval) {
271 // Check if this is a larval buffer allocation
272 result->set_mark(result->mark().enter_larval_state());
273 }
274 current->set_vm_result(result);
275
276 // Pass oops back through thread local storage. Our apparent type to Java
277 // is that we return an oop, but we can block on exit from this routine and
278 // a GC can trash the oop in C's return register. The generated stub will
279 // fetch the oop from TLS after any possible GC.
280 }
281
282 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
283 JRT_BLOCK_END;
284
285 // inform GC that we won't do card marks for initializing writes.
286 SharedRuntime::on_slowpath_allocation_exit(current);
287 JRT_END
288
289
290 // array allocation
291 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current))
292 JRT_BLOCK;
293 #ifndef PRODUCT
294 SharedRuntime::_new_array_ctr++; // new array requires GC
295 #endif
296 assert(check_compiled_frame(current), "incorrect caller");
297
298 // Scavenge and allocate an instance.
299 oop result;
300
301 if (array_type->is_flatArray_klass()) {
302 Klass* elem_type = FlatArrayKlass::cast(array_type)->element_klass();
303 result = oopFactory::new_valueArray(elem_type, len, THREAD);
304 } else if (array_type->is_typeArray_klass()) {
305 // The oopFactory likes to work with the element type.
306 // (We could bypass the oopFactory, since it doesn't add much value.)
307 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
308 result = oopFactory::new_typeArray(elem_type, len, THREAD);
309 } else {
310 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
311 result = ObjArrayKlass::cast(array_type)->allocate(len, THREAD);
312 }
313
314 // Pass oops back through thread local storage. Our apparent type to Java
315 // is that we return an oop, but we can block on exit from this routine and
316 // a GC can trash the oop in C's return register. The generated stub will
317 // fetch the oop from TLS after any possible GC.
318 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
319 current->set_vm_result(result);
320 JRT_BLOCK_END;
321
322 // inform GC that we won't do card marks for initializing writes.
323 SharedRuntime::on_slowpath_allocation_exit(current);
324 JRT_END
325
326 // array allocation without zeroing
327 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
328 JRT_BLOCK;
329 #ifndef PRODUCT
330 SharedRuntime::_new_array_ctr++; // new array requires GC
331 #endif
488 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
489
490 if (!SafepointSynchronize::is_synchronizing() ) {
491 if (ObjectSynchronizer::quick_notify(obj, current, true)) {
492 return;
493 }
494 }
495
496 // This is the case the fast-path above isn't provisioned to handle.
497 // The fast-path is designed to handle frequently arising cases in an efficient manner.
498 // (The fast-path is just a degenerate variant of the slow-path).
499 // Perform the dreaded state transition and pass control into the slow-path.
500 JRT_BLOCK;
501 Handle h_obj(current, obj);
502 ObjectSynchronizer::notifyall(h_obj, CHECK);
503 JRT_BLOCK_END;
504 JRT_END
505
506 const TypeFunc *OptoRuntime::new_instance_Type() {
507 // create input type (domain)
508 const Type **fields = TypeTuple::fields(2);
509 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
510 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // is_larval
511 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
512
513 // create result type (range)
514 fields = TypeTuple::fields(1);
515 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
516
517 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
518
519 return TypeFunc::make(domain, range);
520 }
521
522 #if INCLUDE_JVMTI
523 const TypeFunc *OptoRuntime::notify_jvmti_vthread_Type() {
524 // create input type (domain)
525 const Type **fields = TypeTuple::fields(2);
526 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop
527 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean
528 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
529
530 // no result type needed
531 fields = TypeTuple::fields(1);
629 fields = TypeTuple::fields(0);
630 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
631
632 return TypeFunc::make(domain, range);
633 }
634
635 //-----------------------------------------------------------------------------
636 // Monitor Handling
637 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() {
638 // create input type (domain)
639 const Type **fields = TypeTuple::fields(2);
640 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
641 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
642 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
643
644 // create result type (range)
645 fields = TypeTuple::fields(0);
646
647 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
648
649 return TypeFunc::make(domain, range);
650 }
651
652 const TypeFunc *OptoRuntime::complete_monitor_locking_Type() {
653 return complete_monitor_enter_Type();
654 }
655
656 //-----------------------------------------------------------------------------
657 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() {
658 // create input type (domain)
659 const Type **fields = TypeTuple::fields(3);
660 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
661 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
662 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
663 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
664
665 // create result type (range)
666 fields = TypeTuple::fields(0);
667
668 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
669
1792 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1793 frame caller_frame = stub_frame.sender(®_map);
1794
1795 // Deoptimize the caller frame.
1796 Deoptimization::deoptimize_frame(thread, caller_frame.id());
1797 }
1798
1799
1800 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
1801 // Called from within the owner thread, so no need for safepoint
1802 RegisterMap reg_map(thread,
1803 RegisterMap::UpdateMap::include,
1804 RegisterMap::ProcessFrames::include,
1805 RegisterMap::WalkContinuation::skip);
1806 frame stub_frame = thread->last_frame();
1807 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1808 frame caller_frame = stub_frame.sender(®_map);
1809 return caller_frame.is_deoptimized_frame();
1810 }
1811
1812 const TypeFunc *OptoRuntime::register_finalizer_Type() {
1813 // create input type (domain)
1814 const Type **fields = TypeTuple::fields(1);
1815 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
1816 // // The JavaThread* is passed to each routine as the last argument
1817 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
1818 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1819
1820 // create result type (range)
1821 fields = TypeTuple::fields(0);
1822
1823 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1824
1825 return TypeFunc::make(domain, range);
1826 }
1827
1828 #if INCLUDE_JFR
1829 const TypeFunc *OptoRuntime::class_id_load_barrier_Type() {
1830 // create input type (domain)
1831 const Type **fields = TypeTuple::fields(1);
1832 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
1833 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
1834
1835 // create result type (range)
1836 fields = TypeTuple::fields(0);
1837
1838 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
1839
1840 return TypeFunc::make(domain,range);
1841 }
1842 #endif
1843
1844 //-----------------------------------------------------------------------------
1845 // Dtrace support. entry and exit probes have the same signature
1846 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() {
1847 // create input type (domain)
1848 const Type **fields = TypeTuple::fields(2);
1849 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1850 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
1851 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1852
1853 // create result type (range)
1854 fields = TypeTuple::fields(0);
1855
1856 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1857
1858 return TypeFunc::make(domain, range);
1859 }
1860
1861 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() {
1862 // create input type (domain)
1863 const Type **fields = TypeTuple::fields(2);
1864 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1865 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
1866
1867 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1868
1869 // create result type (range)
1870 fields = TypeTuple::fields(0);
1871
1872 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1873
1874 return TypeFunc::make(domain, range);
1875 }
1876
1877
1878 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
1879 assert(oopDesc::is_oop(obj), "must be a valid oop");
1880 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1881 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1882 JRT_END
1883
1884 //-----------------------------------------------------------------------------
1885
1886 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
1887
1888 //
1889 // dump the collected NamedCounters.
1890 //
1891 void OptoRuntime::print_named_counters() {
1892 int total_lock_count = 0;
1893 int eliminated_lock_count = 0;
1894
1964 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
1965 trace_exception_counter++;
1966 stringStream tempst;
1967
1968 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
1969 exception_oop->print_value_on(&tempst);
1970 tempst.print(" in ");
1971 CodeBlob* blob = CodeCache::find_blob(exception_pc);
1972 if (blob->is_nmethod()) {
1973 blob->as_nmethod()->method()->print_value_on(&tempst);
1974 } else if (blob->is_runtime_stub()) {
1975 tempst.print("<runtime-stub>");
1976 } else {
1977 tempst.print("<unknown>");
1978 }
1979 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
1980 tempst.print("]");
1981
1982 st->print_raw_cr(tempst.freeze());
1983 }
1984
1985 const TypeFunc *OptoRuntime::store_inline_type_fields_Type() {
1986 // create input type (domain)
1987 uint total = SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2;
1988 const Type **fields = TypeTuple::fields(total);
1989 // We don't know the number of returned values and their
1990 // types. Assume all registers available to the return convention
1991 // are used.
1992 fields[TypeFunc::Parms] = TypePtr::BOTTOM;
1993 uint i = 1;
1994 for (; i < SharedRuntime::java_return_convention_max_int; i++) {
1995 fields[TypeFunc::Parms+i] = TypeInt::INT;
1996 }
1997 for (; i < total; i+=2) {
1998 fields[TypeFunc::Parms+i] = Type::DOUBLE;
1999 fields[TypeFunc::Parms+i+1] = Type::HALF;
2000 }
2001 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields);
2002
2003 // create result type (range)
2004 fields = TypeTuple::fields(1);
2005 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
2006
2007 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields);
2008
2009 return TypeFunc::make(domain, range);
2010 }
2011
2012 const TypeFunc *OptoRuntime::pack_inline_type_Type() {
2013 // create input type (domain)
2014 uint total = 1 + SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2;
2015 const Type **fields = TypeTuple::fields(total);
2016 // We don't know the number of returned values and their
2017 // types. Assume all registers available to the return convention
2018 // are used.
2019 fields[TypeFunc::Parms] = TypeRawPtr::BOTTOM;
2020 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;
2021 uint i = 2;
2022 for (; i < SharedRuntime::java_return_convention_max_int+1; i++) {
2023 fields[TypeFunc::Parms+i] = TypeInt::INT;
2024 }
2025 for (; i < total; i+=2) {
2026 fields[TypeFunc::Parms+i] = Type::DOUBLE;
2027 fields[TypeFunc::Parms+i+1] = Type::HALF;
2028 }
2029 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields);
2030
2031 // create result type (range)
2032 fields = TypeTuple::fields(1);
2033 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
2034
2035 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields);
2036
2037 return TypeFunc::make(domain, range);
2038 }
2039
2040 JRT_BLOCK_ENTRY(void, OptoRuntime::load_unknown_inline_C(flatArrayOopDesc* array, int index, JavaThread* current))
2041 JRT_BLOCK;
2042 flatArrayHandle vah(current, array);
2043 oop buffer = flatArrayOopDesc::value_alloc_copy_from_index(vah, index, THREAD);
2044 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
2045 current->set_vm_result(buffer);
2046 JRT_BLOCK_END;
2047 JRT_END
2048
2049 const TypeFunc* OptoRuntime::load_unknown_inline_Type() {
2050 // create input type (domain)
2051 const Type** fields = TypeTuple::fields(2);
2052 fields[TypeFunc::Parms] = TypeOopPtr::NOTNULL;
2053 fields[TypeFunc::Parms+1] = TypeInt::POS;
2054
2055 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+2, fields);
2056
2057 // create result type (range)
2058 fields = TypeTuple::fields(1);
2059 fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL;
2060
2061 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
2062
2063 return TypeFunc::make(domain, range);
2064 }
2065
2066 JRT_LEAF(void, OptoRuntime::store_unknown_inline_C(instanceOopDesc* buffer, flatArrayOopDesc* array, int index))
2067 {
2068 assert(buffer != nullptr, "can't store null into flat array");
2069 array->value_copy_to_index(buffer, index, LayoutKind::PAYLOAD); // Temporary hack for the transition
2070 }
2071 JRT_END
2072
2073 const TypeFunc* OptoRuntime::store_unknown_inline_Type() {
2074 // create input type (domain)
2075 const Type** fields = TypeTuple::fields(3);
2076 fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL;
2077 fields[TypeFunc::Parms+1] = TypeOopPtr::NOTNULL;
2078 fields[TypeFunc::Parms+2] = TypeInt::POS;
2079
2080 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+3, fields);
2081
2082 // create result type (range)
2083 fields = TypeTuple::fields(0);
2084 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
2085
2086 return TypeFunc::make(domain, range);
2087 }
|