1 /*
2 * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmClasses.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledMethod.inline.hpp"
30 #include "code/compiledIC.hpp"
31 #include "code/icBuffer.hpp"
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "code/vtableStubs.hpp"
36 #include "compiler/compileBroker.hpp"
37 #include "compiler/oopMap.hpp"
38 #include "gc/g1/heapRegion.hpp"
39 #include "gc/shared/barrierSet.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/gcLocker.hpp"
42 #include "interpreter/bytecode.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/linkResolver.hpp"
45 #include "logging/log.hpp"
46 #include "logging/logStream.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "oops/objArrayKlass.hpp"
50 #include "oops/klass.inline.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "oops/typeArrayOop.inline.hpp"
53 #include "opto/ad.hpp"
54 #include "opto/addnode.hpp"
55 #include "opto/callnode.hpp"
56 #include "opto/cfgnode.hpp"
57 #include "opto/graphKit.hpp"
58 #include "opto/machnode.hpp"
59 #include "opto/matcher.hpp"
60 #include "opto/memnode.hpp"
61 #include "opto/mulnode.hpp"
62 #include "opto/output.hpp"
63 #include "opto/runtime.hpp"
64 #include "opto/subnode.hpp"
65 #include "prims/jvmtiExport.hpp"
66 #include "runtime/atomic.hpp"
67 #include "runtime/frame.inline.hpp"
68 #include "runtime/handles.inline.hpp"
69 #include "runtime/interfaceSupport.inline.hpp"
70 #include "runtime/javaCalls.hpp"
71 #include "runtime/sharedRuntime.hpp"
72 #include "runtime/signature.hpp"
73 #include "runtime/stackWatermarkSet.hpp"
74 #include "runtime/threadCritical.hpp"
75 #include "runtime/threadWXSetters.inline.hpp"
76 #include "runtime/vframe.hpp"
77 #include "runtime/vframeArray.hpp"
78 #include "runtime/vframe_hp.hpp"
79 #include "utilities/copy.hpp"
80 #include "utilities/preserveException.hpp"
81
82
83 // For debugging purposes:
84 // To force FullGCALot inside a runtime function, add the following two lines
85 //
86 // Universe::release_fullgc_alot_dummy();
87 // MarkSweep::invoke(0, "Debugging");
88 //
89 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000
90
91
92
93
94 // Compiled code entry points
95 address OptoRuntime::_new_instance_Java = nullptr;
96 address OptoRuntime::_new_array_Java = nullptr;
97 address OptoRuntime::_new_array_nozero_Java = nullptr;
98 address OptoRuntime::_multianewarray2_Java = nullptr;
99 address OptoRuntime::_multianewarray3_Java = nullptr;
100 address OptoRuntime::_multianewarray4_Java = nullptr;
101 address OptoRuntime::_multianewarray5_Java = nullptr;
102 address OptoRuntime::_multianewarrayN_Java = nullptr;
103 address OptoRuntime::_vtable_must_compile_Java = nullptr;
104 address OptoRuntime::_complete_monitor_locking_Java = nullptr;
105 address OptoRuntime::_monitor_notify_Java = nullptr;
106 address OptoRuntime::_monitor_notifyAll_Java = nullptr;
107 address OptoRuntime::_rethrow_Java = nullptr;
108
109 address OptoRuntime::_slow_arraycopy_Java = nullptr;
110 address OptoRuntime::_register_finalizer_Java = nullptr;
111
112 ExceptionBlob* OptoRuntime::_exception_blob;
113
114 // This should be called in an assertion at the start of OptoRuntime routines
115 // which are entered from compiled code (all of them)
116 #ifdef ASSERT
117 static bool check_compiled_frame(JavaThread* thread) {
118 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
119 RegisterMap map(thread, false);
120 frame caller = thread->last_frame().sender(&map);
121 assert(caller.is_compiled_frame(), "not being called from compiled like code");
122 return true;
123 }
124 #endif // ASSERT
125
126
127 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \
128 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \
129 if (var == nullptr) { return false; }
130
131 bool OptoRuntime::generate(ciEnv* env) {
132
133 generate_exception_blob();
134
135 // Note: tls: Means fetching the return oop out of the thread-local storage
136 //
137 // variable/name type-function-gen , runtime method ,fncy_jp, tls,retpc
138 // -------------------------------------------------------------------------------------------------------------------------------
139 gen(env, _new_instance_Java , new_instance_Type , new_instance_C , 0 , true, false);
140 gen(env, _new_array_Java , new_array_Type , new_array_C , 0 , true, false);
141 gen(env, _new_array_nozero_Java , new_array_Type , new_array_nozero_C , 0 , true, false);
142 gen(env, _multianewarray2_Java , multianewarray2_Type , multianewarray2_C , 0 , true, false);
143 gen(env, _multianewarray3_Java , multianewarray3_Type , multianewarray3_C , 0 , true, false);
144 gen(env, _multianewarray4_Java , multianewarray4_Type , multianewarray4_C , 0 , true, false);
145 gen(env, _multianewarray5_Java , multianewarray5_Type , multianewarray5_C , 0 , true, false);
146 gen(env, _multianewarrayN_Java , multianewarrayN_Type , multianewarrayN_C , 0 , true, false);
147 gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C, 0, false, false);
148 gen(env, _monitor_notify_Java , monitor_notify_Type , monitor_notify_C , 0 , false, false);
149 gen(env, _monitor_notifyAll_Java , monitor_notify_Type , monitor_notifyAll_C , 0 , false, false);
150 gen(env, _rethrow_Java , rethrow_Type , rethrow_C , 2 , true , true );
151
152 gen(env, _slow_arraycopy_Java , slow_arraycopy_Type , SharedRuntime::slow_arraycopy_C , 0 , false, false);
153 gen(env, _register_finalizer_Java , register_finalizer_Type , register_finalizer , 0 , false, false);
154
155 return true;
156 }
157
158 #undef gen
159
160
161 // Helper method to do generation of RunTimeStub's
162 address OptoRuntime::generate_stub(ciEnv* env,
163 TypeFunc_generator gen, address C_function,
164 const char *name, int is_fancy_jump,
165 bool pass_tls,
166 bool return_pc) {
167
168 // Matching the default directive, we currently have no method to match.
169 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization));
170 ResourceMark rm;
171 Compile C(env, gen, C_function, name, is_fancy_jump, pass_tls, return_pc, directive);
172 DirectivesStack::release(directive);
173 return C.stub_entry_point();
174 }
175
176 const char* OptoRuntime::stub_name(address entry) {
177 #ifndef PRODUCT
178 CodeBlob* cb = CodeCache::find_blob(entry);
179 RuntimeStub* rs =(RuntimeStub *)cb;
180 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub");
181 return rs->name();
182 #else
183 // Fast implementation for product mode (maybe it should be inlined too)
184 return "runtime stub";
185 #endif
186 }
187
188
189 //=============================================================================
190 // Opto compiler runtime routines
191 //=============================================================================
192
193
194 //=============================allocation======================================
195 // We failed the fast-path allocation. Now we need to do a scavenge or GC
196 // and try allocation again.
197
198 // object allocation
199 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current))
200 JRT_BLOCK;
201 #ifndef PRODUCT
202 SharedRuntime::_new_instance_ctr++; // new instance requires GC
203 #endif
204 assert(check_compiled_frame(current), "incorrect caller");
205
206 // These checks are cheap to make and support reflective allocation.
207 int lh = klass->layout_helper();
208 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
209 Handle holder(current, klass->klass_holder()); // keep the klass alive
210 klass->check_valid_for_instantiation(false, THREAD);
211 if (!HAS_PENDING_EXCEPTION) {
212 InstanceKlass::cast(klass)->initialize(THREAD);
213 }
214 }
215
216 if (!HAS_PENDING_EXCEPTION) {
217 // Scavenge and allocate an instance.
218 Handle holder(current, klass->klass_holder()); // keep the klass alive
219 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
220 current->set_vm_result(result);
221
222 // Pass oops back through thread local storage. Our apparent type to Java
223 // is that we return an oop, but we can block on exit from this routine and
224 // a GC can trash the oop in C's return register. The generated stub will
225 // fetch the oop from TLS after any possible GC.
226 }
227
228 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
229 JRT_BLOCK_END;
230
231 // inform GC that we won't do card marks for initializing writes.
232 SharedRuntime::on_slowpath_allocation_exit(current);
233 JRT_END
234
235
236 // array allocation
237 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current))
238 JRT_BLOCK;
239 #ifndef PRODUCT
240 SharedRuntime::_new_array_ctr++; // new array requires GC
241 #endif
242 assert(check_compiled_frame(current), "incorrect caller");
243
244 // Scavenge and allocate an instance.
245 oop result;
246
247 if (array_type->is_typeArray_klass()) {
248 // The oopFactory likes to work with the element type.
249 // (We could bypass the oopFactory, since it doesn't add much value.)
250 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
251 result = oopFactory::new_typeArray(elem_type, len, THREAD);
252 } else {
253 // Although the oopFactory likes to work with the elem_type,
254 // the compiler prefers the array_type, since it must already have
255 // that latter value in hand for the fast path.
256 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
257 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass();
258 result = oopFactory::new_objArray(elem_type, len, THREAD);
259 }
260
261 // Pass oops back through thread local storage. Our apparent type to Java
262 // is that we return an oop, but we can block on exit from this routine and
263 // a GC can trash the oop in C's return register. The generated stub will
264 // fetch the oop from TLS after any possible GC.
265 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
266 current->set_vm_result(result);
267 JRT_BLOCK_END;
268
269 // inform GC that we won't do card marks for initializing writes.
270 SharedRuntime::on_slowpath_allocation_exit(current);
271 JRT_END
272
273 // array allocation without zeroing
274 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
275 JRT_BLOCK;
276 #ifndef PRODUCT
277 SharedRuntime::_new_array_ctr++; // new array requires GC
278 #endif
279 assert(check_compiled_frame(current), "incorrect caller");
280
281 // Scavenge and allocate an instance.
282 oop result;
283
284 assert(array_type->is_typeArray_klass(), "should be called only for type array");
285 // The oopFactory likes to work with the element type.
286 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
287 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD);
288
289 // Pass oops back through thread local storage. Our apparent type to Java
290 // is that we return an oop, but we can block on exit from this routine and
291 // a GC can trash the oop in C's return register. The generated stub will
292 // fetch the oop from TLS after any possible GC.
293 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
294 current->set_vm_result(result);
295 JRT_BLOCK_END;
296
297
298 // inform GC that we won't do card marks for initializing writes.
299 SharedRuntime::on_slowpath_allocation_exit(current);
300
301 oop result = current->vm_result();
302 if ((len > 0) && (result != nullptr) &&
303 is_deoptimized_caller_frame(current)) {
304 // Zero array here if the caller is deoptimized.
305 int size = TypeArrayKlass::cast(array_type)->oop_size(result);
306 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
307 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type);
308 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned");
309 HeapWord* obj = cast_from_oop<HeapWord*>(result);
310 if (!is_aligned(hs_bytes, BytesPerLong)) {
311 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0;
312 hs_bytes += BytesPerInt;
313 }
314
315 // Optimized zeroing.
316 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned");
317 const size_t aligned_hs = hs_bytes / BytesPerLong;
318 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs);
319 }
320
321 JRT_END
322
323 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array.
324
325 // multianewarray for 2 dimensions
326 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current))
327 #ifndef PRODUCT
328 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension
329 #endif
330 assert(check_compiled_frame(current), "incorrect caller");
331 assert(elem_type->is_klass(), "not a class");
332 jint dims[2];
333 dims[0] = len1;
334 dims[1] = len2;
335 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
336 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD);
337 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
338 current->set_vm_result(obj);
339 JRT_END
340
341 // multianewarray for 3 dimensions
342 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current))
343 #ifndef PRODUCT
344 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension
345 #endif
346 assert(check_compiled_frame(current), "incorrect caller");
347 assert(elem_type->is_klass(), "not a class");
348 jint dims[3];
349 dims[0] = len1;
350 dims[1] = len2;
351 dims[2] = len3;
352 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
353 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD);
354 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
355 current->set_vm_result(obj);
356 JRT_END
357
358 // multianewarray for 4 dimensions
359 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current))
360 #ifndef PRODUCT
361 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension
362 #endif
363 assert(check_compiled_frame(current), "incorrect caller");
364 assert(elem_type->is_klass(), "not a class");
365 jint dims[4];
366 dims[0] = len1;
367 dims[1] = len2;
368 dims[2] = len3;
369 dims[3] = len4;
370 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
371 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD);
372 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
373 current->set_vm_result(obj);
374 JRT_END
375
376 // multianewarray for 5 dimensions
377 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current))
378 #ifndef PRODUCT
379 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension
380 #endif
381 assert(check_compiled_frame(current), "incorrect caller");
382 assert(elem_type->is_klass(), "not a class");
383 jint dims[5];
384 dims[0] = len1;
385 dims[1] = len2;
386 dims[2] = len3;
387 dims[3] = len4;
388 dims[4] = len5;
389 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
390 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD);
391 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
392 current->set_vm_result(obj);
393 JRT_END
394
395 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current))
396 assert(check_compiled_frame(current), "incorrect caller");
397 assert(elem_type->is_klass(), "not a class");
398 assert(oop(dims)->is_typeArray(), "not an array");
399
400 ResourceMark rm;
401 jint len = dims->length();
402 assert(len > 0, "Dimensions array should contain data");
403 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
404 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0),
405 c_dims, len);
406
407 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
408 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
409 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
410 current->set_vm_result(obj);
411 JRT_END
412
413 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current))
414
415 // Very few notify/notifyAll operations find any threads on the waitset, so
416 // the dominant fast-path is to simply return.
417 // Relatedly, it's critical that notify/notifyAll be fast in order to
418 // reduce lock hold times.
419 if (!SafepointSynchronize::is_synchronizing()) {
420 if (ObjectSynchronizer::quick_notify(obj, current, false)) {
421 return;
422 }
423 }
424
425 // This is the case the fast-path above isn't provisioned to handle.
426 // The fast-path is designed to handle frequently arising cases in an efficient manner.
427 // (The fast-path is just a degenerate variant of the slow-path).
428 // Perform the dreaded state transition and pass control into the slow-path.
429 JRT_BLOCK;
430 Handle h_obj(current, obj);
431 ObjectSynchronizer::notify(h_obj, CHECK);
432 JRT_BLOCK_END;
433 JRT_END
434
435 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
436
437 if (!SafepointSynchronize::is_synchronizing() ) {
438 if (ObjectSynchronizer::quick_notify(obj, current, true)) {
439 return;
440 }
441 }
442
443 // This is the case the fast-path above isn't provisioned to handle.
444 // The fast-path is designed to handle frequently arising cases in an efficient manner.
445 // (The fast-path is just a degenerate variant of the slow-path).
446 // Perform the dreaded state transition and pass control into the slow-path.
447 JRT_BLOCK;
448 Handle h_obj(current, obj);
449 ObjectSynchronizer::notifyall(h_obj, CHECK);
450 JRT_BLOCK_END;
451 JRT_END
452
453 const TypeFunc *OptoRuntime::new_instance_Type() {
454 // create input type (domain)
455 const Type **fields = TypeTuple::fields(1);
456 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
457 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
458
459 // create result type (range)
460 fields = TypeTuple::fields(1);
461 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
462
463 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
464
465 return TypeFunc::make(domain, range);
466 }
467
468
469 const TypeFunc *OptoRuntime::athrow_Type() {
470 // create input type (domain)
471 const Type **fields = TypeTuple::fields(1);
472 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
473 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
474
475 // create result type (range)
476 fields = TypeTuple::fields(0);
477
478 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
479
480 return TypeFunc::make(domain, range);
481 }
482
483
484 const TypeFunc *OptoRuntime::new_array_Type() {
485 // create input type (domain)
486 const Type **fields = TypeTuple::fields(2);
487 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
488 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size
489 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
490
491 // create result type (range)
492 fields = TypeTuple::fields(1);
493 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
494
495 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
496
497 return TypeFunc::make(domain, range);
498 }
499
500 const TypeFunc *OptoRuntime::multianewarray_Type(int ndim) {
501 // create input type (domain)
502 const int nargs = ndim + 1;
503 const Type **fields = TypeTuple::fields(nargs);
504 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
505 for( int i = 1; i < nargs; i++ )
506 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size
507 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields);
508
509 // create result type (range)
510 fields = TypeTuple::fields(1);
511 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
512 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
513
514 return TypeFunc::make(domain, range);
515 }
516
517 const TypeFunc *OptoRuntime::multianewarray2_Type() {
518 return multianewarray_Type(2);
519 }
520
521 const TypeFunc *OptoRuntime::multianewarray3_Type() {
522 return multianewarray_Type(3);
523 }
524
525 const TypeFunc *OptoRuntime::multianewarray4_Type() {
526 return multianewarray_Type(4);
527 }
528
529 const TypeFunc *OptoRuntime::multianewarray5_Type() {
530 return multianewarray_Type(5);
531 }
532
533 const TypeFunc *OptoRuntime::multianewarrayN_Type() {
534 // create input type (domain)
535 const Type **fields = TypeTuple::fields(2);
536 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
537 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes
538 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
539
540 // create result type (range)
541 fields = TypeTuple::fields(1);
542 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
543 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
544
545 return TypeFunc::make(domain, range);
546 }
547
548 const TypeFunc *OptoRuntime::uncommon_trap_Type() {
549 // create input type (domain)
550 const Type **fields = TypeTuple::fields(1);
551 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action)
552 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
553
554 // create result type (range)
555 fields = TypeTuple::fields(0);
556 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
557
558 return TypeFunc::make(domain, range);
559 }
560
561 //-----------------------------------------------------------------------------
562 // Monitor Handling
563 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() {
564 // create input type (domain)
565 const Type **fields = TypeTuple::fields(2);
566 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
567 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
568 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
569
570 // create result type (range)
571 fields = TypeTuple::fields(0);
572
573 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
574
575 return TypeFunc::make(domain,range);
576 }
577
578
579 //-----------------------------------------------------------------------------
580 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() {
581 // create input type (domain)
582 const Type **fields = TypeTuple::fields(3);
583 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
584 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
585 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
586 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
587
588 // create result type (range)
589 fields = TypeTuple::fields(0);
590
591 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
592
593 return TypeFunc::make(domain, range);
594 }
595
596 const TypeFunc *OptoRuntime::monitor_notify_Type() {
597 // create input type (domain)
598 const Type **fields = TypeTuple::fields(1);
599 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
600 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
601
602 // create result type (range)
603 fields = TypeTuple::fields(0);
604 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
605 return TypeFunc::make(domain, range);
606 }
607
608 const TypeFunc* OptoRuntime::flush_windows_Type() {
609 // create input type (domain)
610 const Type** fields = TypeTuple::fields(1);
611 fields[TypeFunc::Parms+0] = nullptr; // void
612 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
613
614 // create result type
615 fields = TypeTuple::fields(1);
616 fields[TypeFunc::Parms+0] = nullptr; // void
617 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
618
619 return TypeFunc::make(domain, range);
620 }
621
622 const TypeFunc* OptoRuntime::l2f_Type() {
623 // create input type (domain)
624 const Type **fields = TypeTuple::fields(2);
625 fields[TypeFunc::Parms+0] = TypeLong::LONG;
626 fields[TypeFunc::Parms+1] = Type::HALF;
627 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
628
629 // create result type (range)
630 fields = TypeTuple::fields(1);
631 fields[TypeFunc::Parms+0] = Type::FLOAT;
632 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
633
634 return TypeFunc::make(domain, range);
635 }
636
637 const TypeFunc* OptoRuntime::modf_Type() {
638 const Type **fields = TypeTuple::fields(2);
639 fields[TypeFunc::Parms+0] = Type::FLOAT;
640 fields[TypeFunc::Parms+1] = Type::FLOAT;
641 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
642
643 // create result type (range)
644 fields = TypeTuple::fields(1);
645 fields[TypeFunc::Parms+0] = Type::FLOAT;
646
647 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
648
649 return TypeFunc::make(domain, range);
650 }
651
652 const TypeFunc *OptoRuntime::Math_D_D_Type() {
653 // create input type (domain)
654 const Type **fields = TypeTuple::fields(2);
655 // Symbol* name of class to be loaded
656 fields[TypeFunc::Parms+0] = Type::DOUBLE;
657 fields[TypeFunc::Parms+1] = Type::HALF;
658 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
659
660 // create result type (range)
661 fields = TypeTuple::fields(2);
662 fields[TypeFunc::Parms+0] = Type::DOUBLE;
663 fields[TypeFunc::Parms+1] = Type::HALF;
664 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
665
666 return TypeFunc::make(domain, range);
667 }
668
669 const TypeFunc *OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) {
670 // create input type (domain)
671 const Type **fields = TypeTuple::fields(num_arg);
672 // Symbol* name of class to be loaded
673 assert(num_arg > 0, "must have at least 1 input");
674 for (uint i = 0; i < num_arg; i++) {
675 fields[TypeFunc::Parms+i] = in_type;
676 }
677 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields);
678
679 // create result type (range)
680 const uint num_ret = 1;
681 fields = TypeTuple::fields(num_ret);
682 fields[TypeFunc::Parms+0] = out_type;
683 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields);
684
685 return TypeFunc::make(domain, range);
686 }
687
688 const TypeFunc* OptoRuntime::Math_DD_D_Type() {
689 const Type **fields = TypeTuple::fields(4);
690 fields[TypeFunc::Parms+0] = Type::DOUBLE;
691 fields[TypeFunc::Parms+1] = Type::HALF;
692 fields[TypeFunc::Parms+2] = Type::DOUBLE;
693 fields[TypeFunc::Parms+3] = Type::HALF;
694 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields);
695
696 // create result type (range)
697 fields = TypeTuple::fields(2);
698 fields[TypeFunc::Parms+0] = Type::DOUBLE;
699 fields[TypeFunc::Parms+1] = Type::HALF;
700 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
701
702 return TypeFunc::make(domain, range);
703 }
704
705 //-------------- currentTimeMillis, currentTimeNanos, etc
706
707 const TypeFunc* OptoRuntime::void_long_Type() {
708 // create input type (domain)
709 const Type **fields = TypeTuple::fields(0);
710 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
711
712 // create result type (range)
713 fields = TypeTuple::fields(2);
714 fields[TypeFunc::Parms+0] = TypeLong::LONG;
715 fields[TypeFunc::Parms+1] = Type::HALF;
716 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
717
718 return TypeFunc::make(domain, range);
719 }
720
721 // arraycopy stub variations:
722 enum ArrayCopyType {
723 ac_fast, // void(ptr, ptr, size_t)
724 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr)
725 ac_slow, // void(ptr, int, ptr, int, int)
726 ac_generic // int(ptr, int, ptr, int, int)
727 };
728
729 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) {
730 // create input type (domain)
731 int num_args = (act == ac_fast ? 3 : 5);
732 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0);
733 int argcnt = num_args;
734 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths
735 const Type** fields = TypeTuple::fields(argcnt);
736 int argp = TypeFunc::Parms;
737 fields[argp++] = TypePtr::NOTNULL; // src
738 if (num_size_args == 0) {
739 fields[argp++] = TypeInt::INT; // src_pos
740 }
741 fields[argp++] = TypePtr::NOTNULL; // dest
742 if (num_size_args == 0) {
743 fields[argp++] = TypeInt::INT; // dest_pos
744 fields[argp++] = TypeInt::INT; // length
745 }
746 while (num_size_args-- > 0) {
747 fields[argp++] = TypeX_X; // size in whatevers (size_t)
748 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
749 }
750 if (act == ac_checkcast) {
751 fields[argp++] = TypePtr::NOTNULL; // super_klass
752 }
753 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act");
754 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
755
756 // create result type if needed
757 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0);
758 fields = TypeTuple::fields(1);
759 if (retcnt == 0)
760 fields[TypeFunc::Parms+0] = nullptr; // void
761 else
762 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed
763 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields);
764 return TypeFunc::make(domain, range);
765 }
766
767 const TypeFunc* OptoRuntime::fast_arraycopy_Type() {
768 // This signature is simple: Two base pointers and a size_t.
769 return make_arraycopy_Type(ac_fast);
770 }
771
772 const TypeFunc* OptoRuntime::checkcast_arraycopy_Type() {
773 // An extension of fast_arraycopy_Type which adds type checking.
774 return make_arraycopy_Type(ac_checkcast);
775 }
776
777 const TypeFunc* OptoRuntime::slow_arraycopy_Type() {
778 // This signature is exactly the same as System.arraycopy.
779 // There are no intptr_t (int/long) arguments.
780 return make_arraycopy_Type(ac_slow);
781 }
782
783 const TypeFunc* OptoRuntime::generic_arraycopy_Type() {
784 // This signature is like System.arraycopy, except that it returns status.
785 return make_arraycopy_Type(ac_generic);
786 }
787
788
789 const TypeFunc* OptoRuntime::array_fill_Type() {
790 const Type** fields;
791 int argp = TypeFunc::Parms;
792 // create input type (domain): pointer, int, size_t
793 fields = TypeTuple::fields(3 LP64_ONLY( + 1));
794 fields[argp++] = TypePtr::NOTNULL;
795 fields[argp++] = TypeInt::INT;
796 fields[argp++] = TypeX_X; // size in whatevers (size_t)
797 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
798 const TypeTuple *domain = TypeTuple::make(argp, fields);
799
800 // create result type
801 fields = TypeTuple::fields(1);
802 fields[TypeFunc::Parms+0] = nullptr; // void
803 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
804
805 return TypeFunc::make(domain, range);
806 }
807
808 // for aescrypt encrypt/decrypt operations, just three pointers returning void (length is constant)
809 const TypeFunc* OptoRuntime::aescrypt_block_Type() {
810 // create input type (domain)
811 int num_args = 3;
812 int argcnt = num_args;
813 const Type** fields = TypeTuple::fields(argcnt);
814 int argp = TypeFunc::Parms;
815 fields[argp++] = TypePtr::NOTNULL; // src
816 fields[argp++] = TypePtr::NOTNULL; // dest
817 fields[argp++] = TypePtr::NOTNULL; // k array
818 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
819 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
820
821 // no result type needed
822 fields = TypeTuple::fields(1);
823 fields[TypeFunc::Parms+0] = nullptr; // void
824 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
825 return TypeFunc::make(domain, range);
826 }
827
828 /**
829 * int updateBytesCRC32(int crc, byte* b, int len)
830 */
831 const TypeFunc* OptoRuntime::updateBytesCRC32_Type() {
832 // create input type (domain)
833 int num_args = 3;
834 int argcnt = num_args;
835 const Type** fields = TypeTuple::fields(argcnt);
836 int argp = TypeFunc::Parms;
837 fields[argp++] = TypeInt::INT; // crc
838 fields[argp++] = TypePtr::NOTNULL; // src
839 fields[argp++] = TypeInt::INT; // len
840 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
841 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
842
843 // result type needed
844 fields = TypeTuple::fields(1);
845 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
846 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
847 return TypeFunc::make(domain, range);
848 }
849
850 /**
851 * int updateBytesCRC32C(int crc, byte* buf, int len, int* table)
852 */
853 const TypeFunc* OptoRuntime::updateBytesCRC32C_Type() {
854 // create input type (domain)
855 int num_args = 4;
856 int argcnt = num_args;
857 const Type** fields = TypeTuple::fields(argcnt);
858 int argp = TypeFunc::Parms;
859 fields[argp++] = TypeInt::INT; // crc
860 fields[argp++] = TypePtr::NOTNULL; // buf
861 fields[argp++] = TypeInt::INT; // len
862 fields[argp++] = TypePtr::NOTNULL; // table
863 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
864 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
865
866 // result type needed
867 fields = TypeTuple::fields(1);
868 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
869 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
870 return TypeFunc::make(domain, range);
871 }
872
873 /**
874 * int updateBytesAdler32(int adler, bytes* b, int off, int len)
875 */
876 const TypeFunc* OptoRuntime::updateBytesAdler32_Type() {
877 // create input type (domain)
878 int num_args = 3;
879 int argcnt = num_args;
880 const Type** fields = TypeTuple::fields(argcnt);
881 int argp = TypeFunc::Parms;
882 fields[argp++] = TypeInt::INT; // crc
883 fields[argp++] = TypePtr::NOTNULL; // src + offset
884 fields[argp++] = TypeInt::INT; // len
885 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
886 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
887
888 // result type needed
889 fields = TypeTuple::fields(1);
890 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
891 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
892 return TypeFunc::make(domain, range);
893 }
894
895 // for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning int
896 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
897 // create input type (domain)
898 int num_args = 5;
899 int argcnt = num_args;
900 const Type** fields = TypeTuple::fields(argcnt);
901 int argp = TypeFunc::Parms;
902 fields[argp++] = TypePtr::NOTNULL; // src
903 fields[argp++] = TypePtr::NOTNULL; // dest
904 fields[argp++] = TypePtr::NOTNULL; // k array
905 fields[argp++] = TypePtr::NOTNULL; // r array
906 fields[argp++] = TypeInt::INT; // src len
907 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
908 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
909
910 // returning cipher len (int)
911 fields = TypeTuple::fields(1);
912 fields[TypeFunc::Parms+0] = TypeInt::INT;
913 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
914 return TypeFunc::make(domain, range);
915 }
916
917 // for electronicCodeBook calls of aescrypt encrypt/decrypt, three pointers and a length, returning int
918 const TypeFunc* OptoRuntime::electronicCodeBook_aescrypt_Type() {
919 // create input type (domain)
920 int num_args = 4;
921 int argcnt = num_args;
922 const Type** fields = TypeTuple::fields(argcnt);
923 int argp = TypeFunc::Parms;
924 fields[argp++] = TypePtr::NOTNULL; // src
925 fields[argp++] = TypePtr::NOTNULL; // dest
926 fields[argp++] = TypePtr::NOTNULL; // k array
927 fields[argp++] = TypeInt::INT; // src len
928 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
929 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
930
931 // returning cipher len (int)
932 fields = TypeTuple::fields(1);
933 fields[TypeFunc::Parms + 0] = TypeInt::INT;
934 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
935 return TypeFunc::make(domain, range);
936 }
937
938 //for counterMode calls of aescrypt encrypt/decrypt, four pointers and a length, returning int
939 const TypeFunc* OptoRuntime::counterMode_aescrypt_Type() {
940 // create input type (domain)
941 int num_args = 7;
942 int argcnt = num_args;
943 const Type** fields = TypeTuple::fields(argcnt);
944 int argp = TypeFunc::Parms;
945 fields[argp++] = TypePtr::NOTNULL; // src
946 fields[argp++] = TypePtr::NOTNULL; // dest
947 fields[argp++] = TypePtr::NOTNULL; // k array
948 fields[argp++] = TypePtr::NOTNULL; // counter array
949 fields[argp++] = TypeInt::INT; // src len
950 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter
951 fields[argp++] = TypePtr::NOTNULL; // saved used addr
952 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
953 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
954 // returning cipher len (int)
955 fields = TypeTuple::fields(1);
956 fields[TypeFunc::Parms + 0] = TypeInt::INT;
957 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
958 return TypeFunc::make(domain, range);
959 }
960
961 /*
962 * void implCompress(byte[] buf, int ofs)
963 */
964 const TypeFunc* OptoRuntime::digestBase_implCompress_Type(bool is_sha3) {
965 // create input type (domain)
966 int num_args = is_sha3 ? 3 : 2;
967 int argcnt = num_args;
968 const Type** fields = TypeTuple::fields(argcnt);
969 int argp = TypeFunc::Parms;
970 fields[argp++] = TypePtr::NOTNULL; // buf
971 fields[argp++] = TypePtr::NOTNULL; // state
972 if (is_sha3) fields[argp++] = TypeInt::INT; // digest_length
973 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
974 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
975
976 // no result type needed
977 fields = TypeTuple::fields(1);
978 fields[TypeFunc::Parms+0] = nullptr; // void
979 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
980 return TypeFunc::make(domain, range);
981 }
982
983 /*
984 * int implCompressMultiBlock(byte[] b, int ofs, int limit)
985 */
986 const TypeFunc* OptoRuntime::digestBase_implCompressMB_Type(bool is_sha3) {
987 // create input type (domain)
988 int num_args = is_sha3 ? 5 : 4;
989 int argcnt = num_args;
990 const Type** fields = TypeTuple::fields(argcnt);
991 int argp = TypeFunc::Parms;
992 fields[argp++] = TypePtr::NOTNULL; // buf
993 fields[argp++] = TypePtr::NOTNULL; // state
994 if (is_sha3) fields[argp++] = TypeInt::INT; // digest_length
995 fields[argp++] = TypeInt::INT; // ofs
996 fields[argp++] = TypeInt::INT; // limit
997 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
998 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
999
1000 // returning ofs (int)
1001 fields = TypeTuple::fields(1);
1002 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs
1003 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1004 return TypeFunc::make(domain, range);
1005 }
1006
1007 const TypeFunc* OptoRuntime::multiplyToLen_Type() {
1008 // create input type (domain)
1009 int num_args = 6;
1010 int argcnt = num_args;
1011 const Type** fields = TypeTuple::fields(argcnt);
1012 int argp = TypeFunc::Parms;
1013 fields[argp++] = TypePtr::NOTNULL; // x
1014 fields[argp++] = TypeInt::INT; // xlen
1015 fields[argp++] = TypePtr::NOTNULL; // y
1016 fields[argp++] = TypeInt::INT; // ylen
1017 fields[argp++] = TypePtr::NOTNULL; // z
1018 fields[argp++] = TypeInt::INT; // zlen
1019 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1020 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1021
1022 // no result type needed
1023 fields = TypeTuple::fields(1);
1024 fields[TypeFunc::Parms+0] = nullptr;
1025 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1026 return TypeFunc::make(domain, range);
1027 }
1028
1029 const TypeFunc* OptoRuntime::squareToLen_Type() {
1030 // create input type (domain)
1031 int num_args = 4;
1032 int argcnt = num_args;
1033 const Type** fields = TypeTuple::fields(argcnt);
1034 int argp = TypeFunc::Parms;
1035 fields[argp++] = TypePtr::NOTNULL; // x
1036 fields[argp++] = TypeInt::INT; // len
1037 fields[argp++] = TypePtr::NOTNULL; // z
1038 fields[argp++] = TypeInt::INT; // zlen
1039 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1040 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1041
1042 // no result type needed
1043 fields = TypeTuple::fields(1);
1044 fields[TypeFunc::Parms+0] = nullptr;
1045 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1046 return TypeFunc::make(domain, range);
1047 }
1048
1049 // for mulAdd calls, 2 pointers and 3 ints, returning int
1050 const TypeFunc* OptoRuntime::mulAdd_Type() {
1051 // create input type (domain)
1052 int num_args = 5;
1053 int argcnt = num_args;
1054 const Type** fields = TypeTuple::fields(argcnt);
1055 int argp = TypeFunc::Parms;
1056 fields[argp++] = TypePtr::NOTNULL; // out
1057 fields[argp++] = TypePtr::NOTNULL; // in
1058 fields[argp++] = TypeInt::INT; // offset
1059 fields[argp++] = TypeInt::INT; // len
1060 fields[argp++] = TypeInt::INT; // k
1061 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1062 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1063
1064 // returning carry (int)
1065 fields = TypeTuple::fields(1);
1066 fields[TypeFunc::Parms+0] = TypeInt::INT;
1067 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1068 return TypeFunc::make(domain, range);
1069 }
1070
1071 const TypeFunc* OptoRuntime::montgomeryMultiply_Type() {
1072 // create input type (domain)
1073 int num_args = 7;
1074 int argcnt = num_args;
1075 const Type** fields = TypeTuple::fields(argcnt);
1076 int argp = TypeFunc::Parms;
1077 fields[argp++] = TypePtr::NOTNULL; // a
1078 fields[argp++] = TypePtr::NOTNULL; // b
1079 fields[argp++] = TypePtr::NOTNULL; // n
1080 fields[argp++] = TypeInt::INT; // len
1081 fields[argp++] = TypeLong::LONG; // inv
1082 fields[argp++] = Type::HALF;
1083 fields[argp++] = TypePtr::NOTNULL; // result
1084 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1085 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1086
1087 // result type needed
1088 fields = TypeTuple::fields(1);
1089 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1090
1091 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1092 return TypeFunc::make(domain, range);
1093 }
1094
1095 const TypeFunc* OptoRuntime::montgomerySquare_Type() {
1096 // create input type (domain)
1097 int num_args = 6;
1098 int argcnt = num_args;
1099 const Type** fields = TypeTuple::fields(argcnt);
1100 int argp = TypeFunc::Parms;
1101 fields[argp++] = TypePtr::NOTNULL; // a
1102 fields[argp++] = TypePtr::NOTNULL; // n
1103 fields[argp++] = TypeInt::INT; // len
1104 fields[argp++] = TypeLong::LONG; // inv
1105 fields[argp++] = Type::HALF;
1106 fields[argp++] = TypePtr::NOTNULL; // result
1107 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1108 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1109
1110 // result type needed
1111 fields = TypeTuple::fields(1);
1112 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1113
1114 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1115 return TypeFunc::make(domain, range);
1116 }
1117
1118 const TypeFunc * OptoRuntime::bigIntegerShift_Type() {
1119 int argcnt = 5;
1120 const Type** fields = TypeTuple::fields(argcnt);
1121 int argp = TypeFunc::Parms;
1122 fields[argp++] = TypePtr::NOTNULL; // newArr
1123 fields[argp++] = TypePtr::NOTNULL; // oldArr
1124 fields[argp++] = TypeInt::INT; // newIdx
1125 fields[argp++] = TypeInt::INT; // shiftCount
1126 fields[argp++] = TypeInt::INT; // numIter
1127 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1128 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1129
1130 // no result type needed
1131 fields = TypeTuple::fields(1);
1132 fields[TypeFunc::Parms + 0] = nullptr;
1133 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1134 return TypeFunc::make(domain, range);
1135 }
1136
1137 const TypeFunc* OptoRuntime::vectorizedMismatch_Type() {
1138 // create input type (domain)
1139 int num_args = 4;
1140 int argcnt = num_args;
1141 const Type** fields = TypeTuple::fields(argcnt);
1142 int argp = TypeFunc::Parms;
1143 fields[argp++] = TypePtr::NOTNULL; // obja
1144 fields[argp++] = TypePtr::NOTNULL; // objb
1145 fields[argp++] = TypeInt::INT; // length, number of elements
1146 fields[argp++] = TypeInt::INT; // log2scale, element size
1147 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1148 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1149
1150 //return mismatch index (int)
1151 fields = TypeTuple::fields(1);
1152 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1153 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1154 return TypeFunc::make(domain, range);
1155 }
1156
1157 // GHASH block processing
1158 const TypeFunc* OptoRuntime::ghash_processBlocks_Type() {
1159 int argcnt = 4;
1160
1161 const Type** fields = TypeTuple::fields(argcnt);
1162 int argp = TypeFunc::Parms;
1163 fields[argp++] = TypePtr::NOTNULL; // state
1164 fields[argp++] = TypePtr::NOTNULL; // subkeyH
1165 fields[argp++] = TypePtr::NOTNULL; // data
1166 fields[argp++] = TypeInt::INT; // blocks
1167 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1168 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1169
1170 // result type needed
1171 fields = TypeTuple::fields(1);
1172 fields[TypeFunc::Parms+0] = nullptr; // void
1173 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1174 return TypeFunc::make(domain, range);
1175 }
1176 // Base64 encode function
1177 const TypeFunc* OptoRuntime::base64_encodeBlock_Type() {
1178 int argcnt = 6;
1179
1180 const Type** fields = TypeTuple::fields(argcnt);
1181 int argp = TypeFunc::Parms;
1182 fields[argp++] = TypePtr::NOTNULL; // src array
1183 fields[argp++] = TypeInt::INT; // offset
1184 fields[argp++] = TypeInt::INT; // length
1185 fields[argp++] = TypePtr::NOTNULL; // dest array
1186 fields[argp++] = TypeInt::INT; // dp
1187 fields[argp++] = TypeInt::BOOL; // isURL
1188 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1189 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1190
1191 // result type needed
1192 fields = TypeTuple::fields(1);
1193 fields[TypeFunc::Parms + 0] = nullptr; // void
1194 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1195 return TypeFunc::make(domain, range);
1196 }
1197 // Base64 decode function
1198 const TypeFunc* OptoRuntime::base64_decodeBlock_Type() {
1199 int argcnt = 7;
1200
1201 const Type** fields = TypeTuple::fields(argcnt);
1202 int argp = TypeFunc::Parms;
1203 fields[argp++] = TypePtr::NOTNULL; // src array
1204 fields[argp++] = TypeInt::INT; // src offset
1205 fields[argp++] = TypeInt::INT; // src length
1206 fields[argp++] = TypePtr::NOTNULL; // dest array
1207 fields[argp++] = TypeInt::INT; // dest offset
1208 fields[argp++] = TypeInt::BOOL; // isURL
1209 fields[argp++] = TypeInt::BOOL; // isMIME
1210 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1211 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1212
1213 // result type needed
1214 fields = TypeTuple::fields(1);
1215 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst
1216 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1217 return TypeFunc::make(domain, range);
1218 }
1219
1220 //------------- Interpreter state access for on stack replacement
1221 const TypeFunc* OptoRuntime::osr_end_Type() {
1222 // create input type (domain)
1223 const Type **fields = TypeTuple::fields(1);
1224 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf
1225 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
1226
1227 // create result type
1228 fields = TypeTuple::fields(1);
1229 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop
1230 fields[TypeFunc::Parms+0] = nullptr; // void
1231 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
1232 return TypeFunc::make(domain, range);
1233 }
1234
1235 //-------------------------------------------------------------------------------------
1236 // register policy
1237
1238 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) {
1239 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register");
1240 switch (register_save_policy[reg]) {
1241 case 'C': return false; //SOC
1242 case 'E': return true ; //SOE
1243 case 'N': return false; //NS
1244 case 'A': return false; //AS
1245 }
1246 ShouldNotReachHere();
1247 return false;
1248 }
1249
1250 //-----------------------------------------------------------------------
1251 // Exceptions
1252 //
1253
1254 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg);
1255
1256 // The method is an entry that is always called by a C++ method not
1257 // directly from compiled code. Compiled code will call the C++ method following.
1258 // We can't allow async exception to be installed during exception processing.
1259 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm))
1260 // Do not confuse exception_oop with pending_exception. The exception_oop
1261 // is only used to pass arguments into the method. Not for general
1262 // exception handling. DO NOT CHANGE IT to use pending_exception, since
1263 // the runtime stubs checks this on exit.
1264 assert(current->exception_oop() != nullptr, "exception oop is found");
1265 address handler_address = nullptr;
1266
1267 Handle exception(current, current->exception_oop());
1268 address pc = current->exception_pc();
1269
1270 // Clear out the exception oop and pc since looking up an
1271 // exception handler can cause class loading, which might throw an
1272 // exception and those fields are expected to be clear during
1273 // normal bytecode execution.
1274 current->clear_exception_oop_and_pc();
1275
1276 LogTarget(Info, exceptions) lt;
1277 if (lt.is_enabled()) {
1278 ResourceMark rm;
1279 LogStream ls(lt);
1280 trace_exception(&ls, exception(), pc, "");
1281 }
1282
1283 // for AbortVMOnException flag
1284 Exceptions::debug_check_abort(exception);
1285
1286 #ifdef ASSERT
1287 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
1288 // should throw an exception here
1289 ShouldNotReachHere();
1290 }
1291 #endif
1292
1293 // new exception handling: this method is entered only from adapters
1294 // exceptions from compiled java methods are handled in compiled code
1295 // using rethrow node
1296
1297 nm = CodeCache::find_nmethod(pc);
1298 assert(nm != nullptr, "No NMethod found");
1299 if (nm->is_native_method()) {
1300 fatal("Native method should not have path to exception handling");
1301 } else {
1302 // we are switching to old paradigm: search for exception handler in caller_frame
1303 // instead in exception handler of caller_frame.sender()
1304
1305 if (JvmtiExport::can_post_on_exceptions()) {
1306 // "Full-speed catching" is not necessary here,
1307 // since we're notifying the VM on every catch.
1308 // Force deoptimization and the rest of the lookup
1309 // will be fine.
1310 deoptimize_caller_frame(current);
1311 }
1312
1313 // Check the stack guard pages. If enabled, look for handler in this frame;
1314 // otherwise, forcibly unwind the frame.
1315 //
1316 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate.
1317 bool force_unwind = !current->stack_overflow_state()->reguard_stack();
1318 bool deopting = false;
1319 if (nm->is_deopt_pc(pc)) {
1320 deopting = true;
1321 RegisterMap map(current, false);
1322 frame deoptee = current->last_frame().sender(&map);
1323 assert(deoptee.is_deoptimized_frame(), "must be deopted");
1324 // Adjust the pc back to the original throwing pc
1325 pc = deoptee.pc();
1326 }
1327
1328 // If we are forcing an unwind because of stack overflow then deopt is
1329 // irrelevant since we are throwing the frame away anyway.
1330
1331 if (deopting && !force_unwind) {
1332 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
1333 } else {
1334
1335 handler_address =
1336 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc);
1337
1338 if (handler_address == nullptr) {
1339 bool recursive_exception = false;
1340 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1341 assert (handler_address != nullptr, "must have compiled handler");
1342 // Update the exception cache only when the unwind was not forced
1343 // and there didn't happen another exception during the computation of the
1344 // compiled exception handler. Checking for exception oop equality is not
1345 // sufficient because some exceptions are pre-allocated and reused.
1346 if (!force_unwind && !recursive_exception) {
1347 nm->add_handler_for_exception_and_pc(exception,pc,handler_address);
1348 }
1349 } else {
1350 #ifdef ASSERT
1351 bool recursive_exception = false;
1352 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1353 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT,
1354 p2i(handler_address), p2i(computed_address));
1355 #endif
1356 }
1357 }
1358
1359 current->set_exception_pc(pc);
1360 current->set_exception_handler_pc(handler_address);
1361
1362 // Check if the exception PC is a MethodHandle call site.
1363 current->set_is_method_handle_return(nm->is_method_handle_return(pc));
1364 }
1365
1366 // Restore correct return pc. Was saved above.
1367 current->set_exception_oop(exception());
1368 return handler_address;
1369
1370 JRT_END
1371
1372 // We are entering here from exception_blob
1373 // If there is a compiled exception handler in this method, we will continue there;
1374 // otherwise we will unwind the stack and continue at the caller of top frame method
1375 // Note we enter without the usual JRT wrapper. We will call a helper routine that
1376 // will do the normal VM entry. We do it this way so that we can see if the nmethod
1377 // we looked up the handler for has been deoptimized in the meantime. If it has been
1378 // we must not use the handler and instead return the deopt blob.
1379 address OptoRuntime::handle_exception_C(JavaThread* current) {
1380 //
1381 // We are in Java not VM and in debug mode we have a NoHandleMark
1382 //
1383 #ifndef PRODUCT
1384 SharedRuntime::_find_handler_ctr++; // find exception handler
1385 #endif
1386 debug_only(NoHandleMark __hm;)
1387 nmethod* nm = nullptr;
1388 address handler_address = nullptr;
1389 {
1390 // Enter the VM
1391
1392 ResetNoHandleMark rnhm;
1393 handler_address = handle_exception_C_helper(current, nm);
1394 }
1395
1396 // Back in java: Use no oops, DON'T safepoint
1397
1398 // Now check to see if the handler we are returning is in a now
1399 // deoptimized frame
1400
1401 if (nm != nullptr) {
1402 RegisterMap map(current, false);
1403 frame caller = current->last_frame().sender(&map);
1404 #ifdef ASSERT
1405 assert(caller.is_compiled_frame(), "must be");
1406 #endif // ASSERT
1407 if (caller.is_deoptimized_frame()) {
1408 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
1409 }
1410 }
1411 return handler_address;
1412 }
1413
1414 //------------------------------rethrow----------------------------------------
1415 // We get here after compiled code has executed a 'RethrowNode'. The callee
1416 // is either throwing or rethrowing an exception. The callee-save registers
1417 // have been restored, synchronized objects have been unlocked and the callee
1418 // stack frame has been removed. The return address was passed in.
1419 // Exception oop is passed as the 1st argument. This routine is then called
1420 // from the stub. On exit, we know where to jump in the caller's code.
1421 // After this C code exits, the stub will pop his frame and end in a jump
1422 // (instead of a return). We enter the caller's default handler.
1423 //
1424 // This must be JRT_LEAF:
1425 // - caller will not change its state as we cannot block on exit,
1426 // therefore raw_exception_handler_for_return_address is all it takes
1427 // to handle deoptimized blobs
1428 //
1429 // However, there needs to be a safepoint check in the middle! So compiled
1430 // safepoints are completely watertight.
1431 //
1432 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier.
1433 //
1434 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
1435 //
1436 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
1437 // The frame we rethrow the exception to might not have been processed by the GC yet.
1438 // The stack watermark barrier takes care of detecting that and ensuring the frame
1439 // has updated oops.
1440 StackWatermarkSet::after_unwind(thread);
1441
1442 #ifndef PRODUCT
1443 SharedRuntime::_rethrow_ctr++; // count rethrows
1444 #endif
1445 assert (exception != nullptr, "should have thrown a NullPointerException");
1446 #ifdef ASSERT
1447 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
1448 // should throw an exception here
1449 ShouldNotReachHere();
1450 }
1451 #endif
1452
1453 thread->set_vm_result(exception);
1454 // Frame not compiled (handles deoptimization blob)
1455 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc);
1456 }
1457
1458
1459 const TypeFunc *OptoRuntime::rethrow_Type() {
1460 // create input type (domain)
1461 const Type **fields = TypeTuple::fields(1);
1462 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
1463 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1464
1465 // create result type (range)
1466 fields = TypeTuple::fields(1);
1467 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
1468 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1469
1470 return TypeFunc::make(domain, range);
1471 }
1472
1473
1474 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) {
1475 // Deoptimize the caller before continuing, as the compiled
1476 // exception handler table may not be valid.
1477 if (!StressCompiledExceptionHandlers && doit) {
1478 deoptimize_caller_frame(thread);
1479 }
1480 }
1481
1482 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) {
1483 // Called from within the owner thread, so no need for safepoint
1484 RegisterMap reg_map(thread);
1485 frame stub_frame = thread->last_frame();
1486 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1487 frame caller_frame = stub_frame.sender(®_map);
1488
1489 // Deoptimize the caller frame.
1490 Deoptimization::deoptimize_frame(thread, caller_frame.id());
1491 }
1492
1493
1494 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
1495 // Called from within the owner thread, so no need for safepoint
1496 RegisterMap reg_map(thread);
1497 frame stub_frame = thread->last_frame();
1498 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
1499 frame caller_frame = stub_frame.sender(®_map);
1500 return caller_frame.is_deoptimized_frame();
1501 }
1502
1503
1504 const TypeFunc *OptoRuntime::register_finalizer_Type() {
1505 // create input type (domain)
1506 const Type **fields = TypeTuple::fields(1);
1507 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
1508 // // The JavaThread* is passed to each routine as the last argument
1509 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
1510 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
1511
1512 // create result type (range)
1513 fields = TypeTuple::fields(0);
1514
1515 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1516
1517 return TypeFunc::make(domain,range);
1518 }
1519
1520 #if INCLUDE_JFR
1521 const TypeFunc *OptoRuntime::get_class_id_intrinsic_Type() {
1522 // create input type (domain)
1523 const Type **fields = TypeTuple::fields(1);
1524 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
1525 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
1526
1527 // create result type (range)
1528 fields = TypeTuple::fields(0);
1529
1530 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
1531
1532 return TypeFunc::make(domain,range);
1533 }
1534 #endif
1535
1536 //-----------------------------------------------------------------------------
1537 // Dtrace support. entry and exit probes have the same signature
1538 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() {
1539 // create input type (domain)
1540 const Type **fields = TypeTuple::fields(2);
1541 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1542 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
1543 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1544
1545 // create result type (range)
1546 fields = TypeTuple::fields(0);
1547
1548 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1549
1550 return TypeFunc::make(domain,range);
1551 }
1552
1553 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() {
1554 // create input type (domain)
1555 const Type **fields = TypeTuple::fields(2);
1556 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
1557 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
1558
1559 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
1560
1561 // create result type (range)
1562 fields = TypeTuple::fields(0);
1563
1564 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1565
1566 return TypeFunc::make(domain,range);
1567 }
1568
1569
1570 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer(oopDesc* obj, JavaThread* current))
1571 assert(oopDesc::is_oop(obj), "must be a valid oop");
1572 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1573 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1574 JRT_END
1575
1576 //-----------------------------------------------------------------------------
1577
1578 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
1579
1580 //
1581 // dump the collected NamedCounters.
1582 //
1583 void OptoRuntime::print_named_counters() {
1584 int total_lock_count = 0;
1585 int eliminated_lock_count = 0;
1586
1587 NamedCounter* c = _named_counters;
1588 while (c) {
1589 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) {
1590 int count = c->count();
1591 if (count > 0) {
1592 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter;
1593 if (Verbose) {
1594 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : "");
1595 }
1596 total_lock_count += count;
1597 if (eliminated) {
1598 eliminated_lock_count += count;
1599 }
1600 }
1601 } else if (c->tag() == NamedCounter::BiasedLockingCounter) {
1602 BiasedLockingCounters* blc = ((BiasedLockingNamedCounter*)c)->counters();
1603 if (blc->nonzero()) {
1604 tty->print_cr("%s", c->name());
1605 blc->print_on(tty);
1606 }
1607 #if INCLUDE_RTM_OPT
1608 } else if (c->tag() == NamedCounter::RTMLockingCounter) {
1609 RTMLockingCounters* rlc = ((RTMLockingNamedCounter*)c)->counters();
1610 if (rlc->nonzero()) {
1611 tty->print_cr("%s", c->name());
1612 rlc->print_on(tty);
1613 }
1614 #endif
1615 }
1616 c = c->next();
1617 }
1618 if (total_lock_count > 0) {
1619 tty->print_cr("dynamic locks: %d", total_lock_count);
1620 if (eliminated_lock_count) {
1621 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count,
1622 (int)(eliminated_lock_count * 100.0 / total_lock_count));
1623 }
1624 }
1625 }
1626
1627 //
1628 // Allocate a new NamedCounter. The JVMState is used to generate the
1629 // name which consists of method@line for the inlining tree.
1630 //
1631
1632 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) {
1633 int max_depth = youngest_jvms->depth();
1634
1635 // Visit scopes from youngest to oldest.
1636 bool first = true;
1637 stringStream st;
1638 for (int depth = max_depth; depth >= 1; depth--) {
1639 JVMState* jvms = youngest_jvms->of_depth(depth);
1640 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr;
1641 if (!first) {
1642 st.print(" ");
1643 } else {
1644 first = false;
1645 }
1646 int bci = jvms->bci();
1647 if (bci < 0) bci = 0;
1648 if (m != nullptr) {
1649 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8());
1650 } else {
1651 st.print("no method");
1652 }
1653 st.print("@%d", bci);
1654 // To print linenumbers instead of bci use: m->line_number_from_bci(bci)
1655 }
1656 NamedCounter* c;
1657 if (tag == NamedCounter::BiasedLockingCounter) {
1658 c = new BiasedLockingNamedCounter(st.as_string());
1659 } else if (tag == NamedCounter::RTMLockingCounter) {
1660 c = new RTMLockingNamedCounter(st.as_string());
1661 } else {
1662 c = new NamedCounter(st.as_string(), tag);
1663 }
1664
1665 // atomically add the new counter to the head of the list. We only
1666 // add counters so this is safe.
1667 NamedCounter* head;
1668 do {
1669 c->set_next(nullptr);
1670 head = _named_counters;
1671 c->set_next(head);
1672 } while (Atomic::cmpxchg(&_named_counters, head, c) != head);
1673 return c;
1674 }
1675
1676 int trace_exception_counter = 0;
1677 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
1678 trace_exception_counter++;
1679 stringStream tempst;
1680
1681 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
1682 exception_oop->print_value_on(&tempst);
1683 tempst.print(" in ");
1684 CodeBlob* blob = CodeCache::find_blob(exception_pc);
1685 if (blob->is_compiled()) {
1686 CompiledMethod* cm = blob->as_compiled_method_or_null();
1687 cm->method()->print_value_on(&tempst);
1688 } else if (blob->is_runtime_stub()) {
1689 tempst.print("<runtime-stub>");
1690 } else {
1691 tempst.print("<unknown>");
1692 }
1693 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
1694 tempst.print("]");
1695
1696 st->print_raw_cr(tempst.as_string());
1697 }