1 /*
2 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
27 #include "classfile/vmSymbols.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/nmethod.hpp"
31 #include "code/pcDesc.hpp"
32 #include "code/scopeDesc.hpp"
33 #include "code/vtableStubs.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/g1/g1HeapRegion.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "interpreter/bytecode.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/linkResolver.hpp"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/oopFactory.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/objArrayKlass.hpp"
48 #include "oops/klass.inline.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "oops/typeArrayOop.inline.hpp"
51 #include "opto/ad.hpp"
52 #include "opto/addnode.hpp"
53 #include "opto/callnode.hpp"
54 #include "opto/cfgnode.hpp"
55 #include "opto/graphKit.hpp"
56 #include "opto/machnode.hpp"
57 #include "opto/matcher.hpp"
58 #include "opto/memnode.hpp"
59 #include "opto/mulnode.hpp"
60 #include "opto/output.hpp"
61 #include "opto/runtime.hpp"
62 #include "opto/subnode.hpp"
63 #include "prims/jvmtiExport.hpp"
64 #include "runtime/atomic.hpp"
65 #include "runtime/frame.inline.hpp"
66 #include "runtime/handles.inline.hpp"
227 oopDesc* dest, jint dest_pos,
228 jint length, JavaThread* thread) {
229 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread);
230 }
231
232 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
233 SharedRuntime::complete_monitor_locking_C(obj, lock, current);
234 }
235
236
237 //=============================================================================
238 // Opto compiler runtime routines
239 //=============================================================================
240
241
242 //=============================allocation======================================
243 // We failed the fast-path allocation. Now we need to do a scavenge or GC
244 // and try allocation again.
245
246 // object allocation
247 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current))
248 JRT_BLOCK;
249 #ifndef PRODUCT
250 SharedRuntime::_new_instance_ctr++; // new instance requires GC
251 #endif
252 assert(check_compiled_frame(current), "incorrect caller");
253
254 // These checks are cheap to make and support reflective allocation.
255 int lh = klass->layout_helper();
256 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
257 Handle holder(current, klass->klass_holder()); // keep the klass alive
258 klass->check_valid_for_instantiation(false, THREAD);
259 if (!HAS_PENDING_EXCEPTION) {
260 InstanceKlass::cast(klass)->initialize(THREAD);
261 }
262 }
263
264 if (!HAS_PENDING_EXCEPTION) {
265 // Scavenge and allocate an instance.
266 Handle holder(current, klass->klass_holder()); // keep the klass alive
267 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
268 current->set_vm_result(result);
269
270 // Pass oops back through thread local storage. Our apparent type to Java
271 // is that we return an oop, but we can block on exit from this routine and
272 // a GC can trash the oop in C's return register. The generated stub will
273 // fetch the oop from TLS after any possible GC.
274 }
275
276 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
277 JRT_BLOCK_END;
278
279 // inform GC that we won't do card marks for initializing writes.
280 SharedRuntime::on_slowpath_allocation_exit(current);
281 JRT_END
282
283
284 // array allocation
285 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current))
286 JRT_BLOCK;
287 #ifndef PRODUCT
288 SharedRuntime::_new_array_ctr++; // new array requires GC
289 #endif
290 assert(check_compiled_frame(current), "incorrect caller");
291
292 // Scavenge and allocate an instance.
293 oop result;
294
295 if (array_type->is_typeArray_klass()) {
296 // The oopFactory likes to work with the element type.
297 // (We could bypass the oopFactory, since it doesn't add much value.)
298 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
299 result = oopFactory::new_typeArray(elem_type, len, THREAD);
300 } else {
301 // Although the oopFactory likes to work with the elem_type,
302 // the compiler prefers the array_type, since it must already have
303 // that latter value in hand for the fast path.
304 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
305 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass();
306 result = oopFactory::new_objArray(elem_type, len, THREAD);
307 }
308
309 // Pass oops back through thread local storage. Our apparent type to Java
310 // is that we return an oop, but we can block on exit from this routine and
311 // a GC can trash the oop in C's return register. The generated stub will
312 // fetch the oop from TLS after any possible GC.
313 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
314 current->set_vm_result(result);
315 JRT_BLOCK_END;
316
317 // inform GC that we won't do card marks for initializing writes.
318 SharedRuntime::on_slowpath_allocation_exit(current);
319 JRT_END
320
321 // array allocation without zeroing
322 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
323 JRT_BLOCK;
324 #ifndef PRODUCT
325 SharedRuntime::_new_array_ctr++; // new array requires GC
326 #endif
483 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
484
485 if (!SafepointSynchronize::is_synchronizing() ) {
486 if (ObjectSynchronizer::quick_notify(obj, current, true)) {
487 return;
488 }
489 }
490
491 // This is the case the fast-path above isn't provisioned to handle.
492 // The fast-path is designed to handle frequently arising cases in an efficient manner.
493 // (The fast-path is just a degenerate variant of the slow-path).
494 // Perform the dreaded state transition and pass control into the slow-path.
495 JRT_BLOCK;
496 Handle h_obj(current, obj);
497 ObjectSynchronizer::notifyall(h_obj, CHECK);
498 JRT_BLOCK_END;
499 JRT_END
500
501 const TypeFunc *OptoRuntime::new_instance_Type() {
502 // create input type (domain)
503 const Type **fields = TypeTuple::fields(1);
504 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
505 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
506
507 // create result type (range)
508 fields = TypeTuple::fields(1);
509 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
510
511 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
512
513 return TypeFunc::make(domain, range);
514 }
515
516 #if INCLUDE_JVMTI
517 const TypeFunc *OptoRuntime::notify_jvmti_vthread_Type() {
518 // create input type (domain)
519 const Type **fields = TypeTuple::fields(2);
520 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop
521 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean
522 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
523
524 // no result type needed
525 fields = TypeTuple::fields(1);
623 fields = TypeTuple::fields(0);
624 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
625
626 return TypeFunc::make(domain, range);
627 }
628
629 //-----------------------------------------------------------------------------
630 // Monitor Handling
631 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() {
632 // create input type (domain)
633 const Type **fields = TypeTuple::fields(2);
634 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
635 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
636 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
637
638 // create result type (range)
639 fields = TypeTuple::fields(0);
640
641 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
642
643 return TypeFunc::make(domain,range);
644 }
645
646 const TypeFunc *OptoRuntime::complete_monitor_locking_Type() {
647 return complete_monitor_enter_Type();
648 }
649
650 //-----------------------------------------------------------------------------
651 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() {
652 // create input type (domain)
653 const Type **fields = TypeTuple::fields(3);
654 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
655 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
656 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
657 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
658
659 // create result type (range)
660 fields = TypeTuple::fields(0);
661
662 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
663
1786 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1787 frame caller_frame = stub_frame.sender(®_map);
1788
1789 // Deoptimize the caller frame.
1790 Deoptimization::deoptimize_frame(thread, caller_frame.id());
1791 }
1792
1793
1794 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
1795 // Called from within the owner thread, so no need for safepoint
1796 RegisterMap reg_map(thread,
1797 RegisterMap::UpdateMap::include,
1798 RegisterMap::ProcessFrames::include,
1799 RegisterMap::WalkContinuation::skip);
1800 frame stub_frame = thread->last_frame();
1801 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1802 frame caller_frame = stub_frame.sender(®_map);
1803 return caller_frame.is_deoptimized_frame();
1804 }
1805
1806
1807 const TypeFunc *OptoRuntime::register_finalizer_Type() {
1808 // create input type (domain)
1809 const Type **fields = TypeTuple::fields(1);
1810 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
1811 // // The JavaThread* is passed to each routine as the last argument
1812 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
1813 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1814
1815 // create result type (range)
1816 fields = TypeTuple::fields(0);
1817
1818 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1819
1820 return TypeFunc::make(domain,range);
1821 }
1822
1823 #if INCLUDE_JFR
1824 const TypeFunc *OptoRuntime::class_id_load_barrier_Type() {
1825 // create input type (domain)
1826 const Type **fields = TypeTuple::fields(1);
1827 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
1828 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
1829
1830 // create result type (range)
1831 fields = TypeTuple::fields(0);
1832
1833 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
1834
1835 return TypeFunc::make(domain,range);
1836 }
1837 #endif
1838
1839 //-----------------------------------------------------------------------------
1840 // Dtrace support. entry and exit probes have the same signature
1841 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() {
1842 // create input type (domain)
1843 const Type **fields = TypeTuple::fields(2);
1844 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1845 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
1846 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1847
1848 // create result type (range)
1849 fields = TypeTuple::fields(0);
1850
1851 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1852
1853 return TypeFunc::make(domain,range);
1854 }
1855
1856 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() {
1857 // create input type (domain)
1858 const Type **fields = TypeTuple::fields(2);
1859 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1860 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
1861
1862 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1863
1864 // create result type (range)
1865 fields = TypeTuple::fields(0);
1866
1867 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1868
1869 return TypeFunc::make(domain,range);
1870 }
1871
1872
1873 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
1874 assert(oopDesc::is_oop(obj), "must be a valid oop");
1875 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1876 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1877 JRT_END
1878
1879 //-----------------------------------------------------------------------------
1880
1881 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
1882
1883 //
1884 // dump the collected NamedCounters.
1885 //
1886 void OptoRuntime::print_named_counters() {
1887 int total_lock_count = 0;
1888 int eliminated_lock_count = 0;
1889
1959 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
1960 trace_exception_counter++;
1961 stringStream tempst;
1962
1963 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
1964 exception_oop->print_value_on(&tempst);
1965 tempst.print(" in ");
1966 CodeBlob* blob = CodeCache::find_blob(exception_pc);
1967 if (blob->is_nmethod()) {
1968 blob->as_nmethod()->method()->print_value_on(&tempst);
1969 } else if (blob->is_runtime_stub()) {
1970 tempst.print("<runtime-stub>");
1971 } else {
1972 tempst.print("<unknown>");
1973 }
1974 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
1975 tempst.print("]");
1976
1977 st->print_raw_cr(tempst.freeze());
1978 }
|
1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
27 #include "classfile/vmSymbols.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/nmethod.hpp"
31 #include "code/pcDesc.hpp"
32 #include "code/scopeDesc.hpp"
33 #include "code/vtableStubs.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/g1/g1HeapRegion.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "interpreter/bytecode.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/linkResolver.hpp"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/oopFactory.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/flatArrayKlass.hpp"
48 #include "oops/flatArrayOop.inline.hpp"
49 #include "oops/objArrayKlass.hpp"
50 #include "oops/klass.inline.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "oops/typeArrayOop.inline.hpp"
53 #include "opto/ad.hpp"
54 #include "opto/addnode.hpp"
55 #include "opto/callnode.hpp"
56 #include "opto/cfgnode.hpp"
57 #include "opto/graphKit.hpp"
58 #include "opto/machnode.hpp"
59 #include "opto/matcher.hpp"
60 #include "opto/memnode.hpp"
61 #include "opto/mulnode.hpp"
62 #include "opto/output.hpp"
63 #include "opto/runtime.hpp"
64 #include "opto/subnode.hpp"
65 #include "prims/jvmtiExport.hpp"
66 #include "runtime/atomic.hpp"
67 #include "runtime/frame.inline.hpp"
68 #include "runtime/handles.inline.hpp"
229 oopDesc* dest, jint dest_pos,
230 jint length, JavaThread* thread) {
231 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread);
232 }
233
234 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
235 SharedRuntime::complete_monitor_locking_C(obj, lock, current);
236 }
237
238
239 //=============================================================================
240 // Opto compiler runtime routines
241 //=============================================================================
242
243
244 //=============================allocation======================================
245 // We failed the fast-path allocation. Now we need to do a scavenge or GC
246 // and try allocation again.
247
248 // object allocation
249 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, bool is_larval, JavaThread* current))
250 JRT_BLOCK;
251 #ifndef PRODUCT
252 SharedRuntime::_new_instance_ctr++; // new instance requires GC
253 #endif
254 assert(check_compiled_frame(current), "incorrect caller");
255
256 // These checks are cheap to make and support reflective allocation.
257 int lh = klass->layout_helper();
258 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
259 Handle holder(current, klass->klass_holder()); // keep the klass alive
260 klass->check_valid_for_instantiation(false, THREAD);
261 if (!HAS_PENDING_EXCEPTION) {
262 InstanceKlass::cast(klass)->initialize(THREAD);
263 }
264 }
265
266 if (!HAS_PENDING_EXCEPTION) {
267 // Scavenge and allocate an instance.
268 Handle holder(current, klass->klass_holder()); // keep the klass alive
269 instanceOop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
270 if (is_larval) {
271 // Check if this is a larval buffer allocation
272 result->set_mark(result->mark().enter_larval_state());
273 }
274 current->set_vm_result(result);
275
276 // Pass oops back through thread local storage. Our apparent type to Java
277 // is that we return an oop, but we can block on exit from this routine and
278 // a GC can trash the oop in C's return register. The generated stub will
279 // fetch the oop from TLS after any possible GC.
280 }
281
282 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
283 JRT_BLOCK_END;
284
285 // inform GC that we won't do card marks for initializing writes.
286 SharedRuntime::on_slowpath_allocation_exit(current);
287 JRT_END
288
289
290 // array allocation
291 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current))
292 JRT_BLOCK;
293 #ifndef PRODUCT
294 SharedRuntime::_new_array_ctr++; // new array requires GC
295 #endif
296 assert(check_compiled_frame(current), "incorrect caller");
297
298 // Scavenge and allocate an instance.
299 oop result;
300
301 if (array_type->is_flatArray_klass()) {
302 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
303 FlatArrayKlass* fak = FlatArrayKlass::cast(array_type);
304 Klass* elem_type = fak->element_klass();
305 result = oopFactory::new_flatArray(elem_type, len, fak->layout_kind(), THREAD);
306 } else if (array_type->is_typeArray_klass()) {
307 // The oopFactory likes to work with the element type.
308 // (We could bypass the oopFactory, since it doesn't add much value.)
309 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
310 result = oopFactory::new_typeArray(elem_type, len, THREAD);
311 } else {
312 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
313 result = ObjArrayKlass::cast(array_type)->allocate(len, THREAD);
314 }
315
316 // Pass oops back through thread local storage. Our apparent type to Java
317 // is that we return an oop, but we can block on exit from this routine and
318 // a GC can trash the oop in C's return register. The generated stub will
319 // fetch the oop from TLS after any possible GC.
320 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
321 current->set_vm_result(result);
322 JRT_BLOCK_END;
323
324 // inform GC that we won't do card marks for initializing writes.
325 SharedRuntime::on_slowpath_allocation_exit(current);
326 JRT_END
327
328 // array allocation without zeroing
329 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
330 JRT_BLOCK;
331 #ifndef PRODUCT
332 SharedRuntime::_new_array_ctr++; // new array requires GC
333 #endif
490 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
491
492 if (!SafepointSynchronize::is_synchronizing() ) {
493 if (ObjectSynchronizer::quick_notify(obj, current, true)) {
494 return;
495 }
496 }
497
498 // This is the case the fast-path above isn't provisioned to handle.
499 // The fast-path is designed to handle frequently arising cases in an efficient manner.
500 // (The fast-path is just a degenerate variant of the slow-path).
501 // Perform the dreaded state transition and pass control into the slow-path.
502 JRT_BLOCK;
503 Handle h_obj(current, obj);
504 ObjectSynchronizer::notifyall(h_obj, CHECK);
505 JRT_BLOCK_END;
506 JRT_END
507
508 const TypeFunc *OptoRuntime::new_instance_Type() {
509 // create input type (domain)
510 const Type **fields = TypeTuple::fields(2);
511 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
512 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // is_larval
513 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
514
515 // create result type (range)
516 fields = TypeTuple::fields(1);
517 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
518
519 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
520
521 return TypeFunc::make(domain, range);
522 }
523
524 #if INCLUDE_JVMTI
525 const TypeFunc *OptoRuntime::notify_jvmti_vthread_Type() {
526 // create input type (domain)
527 const Type **fields = TypeTuple::fields(2);
528 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop
529 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean
530 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
531
532 // no result type needed
533 fields = TypeTuple::fields(1);
631 fields = TypeTuple::fields(0);
632 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
633
634 return TypeFunc::make(domain, range);
635 }
636
637 //-----------------------------------------------------------------------------
638 // Monitor Handling
639 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() {
640 // create input type (domain)
641 const Type **fields = TypeTuple::fields(2);
642 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
643 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
644 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
645
646 // create result type (range)
647 fields = TypeTuple::fields(0);
648
649 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
650
651 return TypeFunc::make(domain, range);
652 }
653
654 const TypeFunc *OptoRuntime::complete_monitor_locking_Type() {
655 return complete_monitor_enter_Type();
656 }
657
658 //-----------------------------------------------------------------------------
659 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() {
660 // create input type (domain)
661 const Type **fields = TypeTuple::fields(3);
662 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
663 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
664 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
665 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
666
667 // create result type (range)
668 fields = TypeTuple::fields(0);
669
670 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
671
1794 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1795 frame caller_frame = stub_frame.sender(®_map);
1796
1797 // Deoptimize the caller frame.
1798 Deoptimization::deoptimize_frame(thread, caller_frame.id());
1799 }
1800
1801
1802 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
1803 // Called from within the owner thread, so no need for safepoint
1804 RegisterMap reg_map(thread,
1805 RegisterMap::UpdateMap::include,
1806 RegisterMap::ProcessFrames::include,
1807 RegisterMap::WalkContinuation::skip);
1808 frame stub_frame = thread->last_frame();
1809 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1810 frame caller_frame = stub_frame.sender(®_map);
1811 return caller_frame.is_deoptimized_frame();
1812 }
1813
1814 const TypeFunc *OptoRuntime::register_finalizer_Type() {
1815 // create input type (domain)
1816 const Type **fields = TypeTuple::fields(1);
1817 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
1818 // // The JavaThread* is passed to each routine as the last argument
1819 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
1820 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1821
1822 // create result type (range)
1823 fields = TypeTuple::fields(0);
1824
1825 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1826
1827 return TypeFunc::make(domain, range);
1828 }
1829
1830 #if INCLUDE_JFR
1831 const TypeFunc *OptoRuntime::class_id_load_barrier_Type() {
1832 // create input type (domain)
1833 const Type **fields = TypeTuple::fields(1);
1834 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
1835 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
1836
1837 // create result type (range)
1838 fields = TypeTuple::fields(0);
1839
1840 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
1841
1842 return TypeFunc::make(domain,range);
1843 }
1844 #endif
1845
1846 //-----------------------------------------------------------------------------
1847 // Dtrace support. entry and exit probes have the same signature
1848 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() {
1849 // create input type (domain)
1850 const Type **fields = TypeTuple::fields(2);
1851 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1852 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
1853 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1854
1855 // create result type (range)
1856 fields = TypeTuple::fields(0);
1857
1858 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1859
1860 return TypeFunc::make(domain, range);
1861 }
1862
1863 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() {
1864 // create input type (domain)
1865 const Type **fields = TypeTuple::fields(2);
1866 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1867 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
1868
1869 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1870
1871 // create result type (range)
1872 fields = TypeTuple::fields(0);
1873
1874 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1875
1876 return TypeFunc::make(domain, range);
1877 }
1878
1879
1880 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
1881 assert(oopDesc::is_oop(obj), "must be a valid oop");
1882 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1883 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1884 JRT_END
1885
1886 //-----------------------------------------------------------------------------
1887
1888 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
1889
1890 //
1891 // dump the collected NamedCounters.
1892 //
1893 void OptoRuntime::print_named_counters() {
1894 int total_lock_count = 0;
1895 int eliminated_lock_count = 0;
1896
1966 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
1967 trace_exception_counter++;
1968 stringStream tempst;
1969
1970 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
1971 exception_oop->print_value_on(&tempst);
1972 tempst.print(" in ");
1973 CodeBlob* blob = CodeCache::find_blob(exception_pc);
1974 if (blob->is_nmethod()) {
1975 blob->as_nmethod()->method()->print_value_on(&tempst);
1976 } else if (blob->is_runtime_stub()) {
1977 tempst.print("<runtime-stub>");
1978 } else {
1979 tempst.print("<unknown>");
1980 }
1981 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
1982 tempst.print("]");
1983
1984 st->print_raw_cr(tempst.freeze());
1985 }
1986
1987 const TypeFunc *OptoRuntime::store_inline_type_fields_Type() {
1988 // create input type (domain)
1989 uint total = SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2;
1990 const Type **fields = TypeTuple::fields(total);
1991 // We don't know the number of returned values and their
1992 // types. Assume all registers available to the return convention
1993 // are used.
1994 fields[TypeFunc::Parms] = TypePtr::BOTTOM;
1995 uint i = 1;
1996 for (; i < SharedRuntime::java_return_convention_max_int; i++) {
1997 fields[TypeFunc::Parms+i] = TypeInt::INT;
1998 }
1999 for (; i < total; i+=2) {
2000 fields[TypeFunc::Parms+i] = Type::DOUBLE;
2001 fields[TypeFunc::Parms+i+1] = Type::HALF;
2002 }
2003 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields);
2004
2005 // create result type (range)
2006 fields = TypeTuple::fields(1);
2007 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
2008
2009 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields);
2010
2011 return TypeFunc::make(domain, range);
2012 }
2013
2014 const TypeFunc *OptoRuntime::pack_inline_type_Type() {
2015 // create input type (domain)
2016 uint total = 1 + SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2;
2017 const Type **fields = TypeTuple::fields(total);
2018 // We don't know the number of returned values and their
2019 // types. Assume all registers available to the return convention
2020 // are used.
2021 fields[TypeFunc::Parms] = TypeRawPtr::BOTTOM;
2022 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;
2023 uint i = 2;
2024 for (; i < SharedRuntime::java_return_convention_max_int+1; i++) {
2025 fields[TypeFunc::Parms+i] = TypeInt::INT;
2026 }
2027 for (; i < total; i+=2) {
2028 fields[TypeFunc::Parms+i] = Type::DOUBLE;
2029 fields[TypeFunc::Parms+i+1] = Type::HALF;
2030 }
2031 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields);
2032
2033 // create result type (range)
2034 fields = TypeTuple::fields(1);
2035 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
2036
2037 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields);
2038
2039 return TypeFunc::make(domain, range);
2040 }
2041
2042 JRT_BLOCK_ENTRY(void, OptoRuntime::load_unknown_inline_C(flatArrayOopDesc* array, int index, JavaThread* current))
2043 JRT_BLOCK;
2044 oop buffer = array->read_value_from_flat_array(index, THREAD);
2045 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
2046 current->set_vm_result(buffer);
2047 JRT_BLOCK_END;
2048 JRT_END
2049
2050 const TypeFunc* OptoRuntime::load_unknown_inline_Type() {
2051 // create input type (domain)
2052 const Type** fields = TypeTuple::fields(2);
2053 fields[TypeFunc::Parms] = TypeOopPtr::NOTNULL;
2054 fields[TypeFunc::Parms+1] = TypeInt::POS;
2055
2056 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+2, fields);
2057
2058 // create result type (range)
2059 fields = TypeTuple::fields(1);
2060 fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL;
2061
2062 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
2063
2064 return TypeFunc::make(domain, range);
2065 }
2066
2067 JRT_BLOCK_ENTRY(void, OptoRuntime::store_unknown_inline_C(instanceOopDesc* buffer, flatArrayOopDesc* array, int index, JavaThread* current))
2068 JRT_BLOCK;
2069 assert(buffer != nullptr, "can't store null into flat array");
2070 array->write_value_to_flat_array(buffer, index, THREAD);
2071 if (HAS_PENDING_EXCEPTION) {
2072 fatal("This entry must be changed to be a non-leaf entry because writing to a flat array can now throw an exception");
2073 }
2074 JRT_BLOCK_END;
2075 JRT_END
2076
2077 const TypeFunc* OptoRuntime::store_unknown_inline_Type() {
2078 // create input type (domain)
2079 const Type** fields = TypeTuple::fields(3);
2080 fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL;
2081 fields[TypeFunc::Parms+1] = TypeOopPtr::NOTNULL;
2082 fields[TypeFunc::Parms+2] = TypeInt::POS;
2083
2084 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+3, fields);
2085
2086 // create result type (range)
2087 fields = TypeTuple::fields(0);
2088 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
2089
2090 return TypeFunc::make(domain, range);
2091 }
|