1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/archiveBuilder.hpp"
26 #include "cds/archiveUtils.inline.hpp"
27 #include "classfile/classLoader.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/stringTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/aotCodeCache.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/compiledIC.hpp"
35 #include "code/nmethod.inline.hpp"
36 #include "code/scopeDesc.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "compiler/abstractCompiler.hpp"
39 #include "compiler/compileBroker.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/interpreterRuntime.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "metaprogramming/primitiveConversions.hpp"
51 #include "oops/klass.hpp"
52 #include "oops/method.inline.hpp"
53 #include "oops/objArrayKlass.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "prims/forte.hpp"
56 #include "prims/jvmtiExport.hpp"
57 #include "prims/jvmtiThreadState.hpp"
58 #include "prims/methodHandles.hpp"
59 #include "prims/nativeLookup.hpp"
60 #include "runtime/arguments.hpp"
61 #include "runtime/atomicAccess.hpp"
62 #include "runtime/basicLock.inline.hpp"
63 #include "runtime/frame.inline.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/init.hpp"
66 #include "runtime/interfaceSupport.inline.hpp"
67 #include "runtime/java.hpp"
68 #include "runtime/javaCalls.hpp"
69 #include "runtime/jniHandles.inline.hpp"
70 #include "runtime/osThread.hpp"
71 #include "runtime/perfData.hpp"
72 #include "runtime/sharedRuntime.hpp"
73 #include "runtime/stackWatermarkSet.hpp"
74 #include "runtime/stubRoutines.hpp"
75 #include "runtime/synchronizer.inline.hpp"
76 #include "runtime/timerTrace.hpp"
77 #include "runtime/vframe.inline.hpp"
78 #include "runtime/vframeArray.hpp"
79 #include "runtime/vm_version.hpp"
80 #include "utilities/copy.hpp"
81 #include "utilities/dtrace.hpp"
82 #include "utilities/events.hpp"
83 #include "utilities/globalDefinitions.hpp"
84 #include "utilities/hashTable.hpp"
85 #include "utilities/macros.hpp"
86 #include "utilities/xmlstream.hpp"
87 #ifdef COMPILER1
88 #include "c1/c1_Runtime1.hpp"
89 #endif
90 #ifdef COMPILER2
91 #include "opto/runtime.hpp"
92 #endif
93 #if INCLUDE_JFR
94 #include "jfr/jfr.inline.hpp"
95 #endif
96
97 // Shared runtime stub routines reside in their own unique blob with a
98 // single entry point
99
100
101 #define SHARED_STUB_FIELD_DEFINE(name, type) \
102 type* SharedRuntime::BLOB_FIELD_NAME(name);
103 SHARED_STUBS_DO(SHARED_STUB_FIELD_DEFINE)
104 #undef SHARED_STUB_FIELD_DEFINE
105
106 nmethod* SharedRuntime::_cont_doYield_stub;
107
108 #if 0
109 // TODO tweak global stub name generation to match this
110 #define SHARED_STUB_NAME_DECLARE(name, type) "Shared Runtime " # name "_blob",
111 const char *SharedRuntime::_stub_names[] = {
112 SHARED_STUBS_DO(SHARED_STUB_NAME_DECLARE)
113 };
114 #endif
115
116 //----------------------------generate_stubs-----------------------------------
117 void SharedRuntime::generate_initial_stubs() {
118 // Build this early so it's available for the interpreter.
119 _throw_StackOverflowError_blob =
120 generate_throw_exception(StubId::shared_throw_StackOverflowError_id,
121 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
122 }
123
124 void SharedRuntime::generate_stubs() {
125 _wrong_method_blob =
126 generate_resolve_blob(StubId::shared_wrong_method_id,
127 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method));
128 _wrong_method_abstract_blob =
129 generate_resolve_blob(StubId::shared_wrong_method_abstract_id,
130 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract));
131 _ic_miss_blob =
132 generate_resolve_blob(StubId::shared_ic_miss_id,
133 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss));
134 _resolve_opt_virtual_call_blob =
135 generate_resolve_blob(StubId::shared_resolve_opt_virtual_call_id,
136 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C));
137 _resolve_virtual_call_blob =
138 generate_resolve_blob(StubId::shared_resolve_virtual_call_id,
139 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C));
140 _resolve_static_call_blob =
141 generate_resolve_blob(StubId::shared_resolve_static_call_id,
142 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C));
143
144 _throw_delayed_StackOverflowError_blob =
145 generate_throw_exception(StubId::shared_throw_delayed_StackOverflowError_id,
146 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
147
148 _throw_AbstractMethodError_blob =
149 generate_throw_exception(StubId::shared_throw_AbstractMethodError_id,
150 CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
151
152 _throw_IncompatibleClassChangeError_blob =
153 generate_throw_exception(StubId::shared_throw_IncompatibleClassChangeError_id,
154 CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
155
156 _throw_NullPointerException_at_call_blob =
157 generate_throw_exception(StubId::shared_throw_NullPointerException_at_call_id,
158 CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
159
160 #if COMPILER2_OR_JVMCI
161 // Vectors are generated only by C2 and JVMCI.
162 bool support_wide = is_wide_vector(MaxVectorSize);
163 if (support_wide) {
164 _polling_page_vectors_safepoint_handler_blob =
165 generate_handler_blob(StubId::shared_polling_page_vectors_safepoint_handler_id,
166 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
167 }
168 #endif // COMPILER2_OR_JVMCI
169 _polling_page_safepoint_handler_blob =
170 generate_handler_blob(StubId::shared_polling_page_safepoint_handler_id,
171 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
172 _polling_page_return_handler_blob =
173 generate_handler_blob(StubId::shared_polling_page_return_handler_id,
174 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
175
176 generate_deopt_blob();
177 }
178
179 void SharedRuntime::init_adapter_library() {
180 AdapterHandlerLibrary::initialize();
181 }
182
183 #if INCLUDE_JFR
184 //------------------------------generate jfr runtime stubs ------
185 void SharedRuntime::generate_jfr_stubs() {
186 ResourceMark rm;
187 const char* timer_msg = "SharedRuntime generate_jfr_stubs";
188 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
189
190 _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint();
191 _jfr_return_lease_blob = generate_jfr_return_lease();
192 }
193
194 #endif // INCLUDE_JFR
195
196 #include <math.h>
197
198 // Implementation of SharedRuntime
199
200 #ifndef PRODUCT
201 // For statistics
202 uint SharedRuntime::_ic_miss_ctr = 0;
203 uint SharedRuntime::_wrong_method_ctr = 0;
204 uint SharedRuntime::_resolve_static_ctr = 0;
205 uint SharedRuntime::_resolve_virtual_ctr = 0;
206 uint SharedRuntime::_resolve_opt_virtual_ctr = 0;
207 uint SharedRuntime::_implicit_null_throws = 0;
208 uint SharedRuntime::_implicit_div0_throws = 0;
209
210 int64_t SharedRuntime::_nof_normal_calls = 0;
211 int64_t SharedRuntime::_nof_inlined_calls = 0;
212 int64_t SharedRuntime::_nof_megamorphic_calls = 0;
213 int64_t SharedRuntime::_nof_static_calls = 0;
214 int64_t SharedRuntime::_nof_inlined_static_calls = 0;
215 int64_t SharedRuntime::_nof_interface_calls = 0;
216 int64_t SharedRuntime::_nof_inlined_interface_calls = 0;
217
218 uint SharedRuntime::_new_instance_ctr=0;
219 uint SharedRuntime::_new_array_ctr=0;
220 uint SharedRuntime::_multi2_ctr=0;
221 uint SharedRuntime::_multi3_ctr=0;
222 uint SharedRuntime::_multi4_ctr=0;
223 uint SharedRuntime::_multi5_ctr=0;
224 uint SharedRuntime::_mon_enter_stub_ctr=0;
225 uint SharedRuntime::_mon_exit_stub_ctr=0;
226 uint SharedRuntime::_mon_enter_ctr=0;
227 uint SharedRuntime::_mon_exit_ctr=0;
228 uint SharedRuntime::_partial_subtype_ctr=0;
229 uint SharedRuntime::_jbyte_array_copy_ctr=0;
230 uint SharedRuntime::_jshort_array_copy_ctr=0;
231 uint SharedRuntime::_jint_array_copy_ctr=0;
232 uint SharedRuntime::_jlong_array_copy_ctr=0;
233 uint SharedRuntime::_oop_array_copy_ctr=0;
234 uint SharedRuntime::_checkcast_array_copy_ctr=0;
235 uint SharedRuntime::_unsafe_array_copy_ctr=0;
236 uint SharedRuntime::_generic_array_copy_ctr=0;
237 uint SharedRuntime::_slow_array_copy_ctr=0;
238 uint SharedRuntime::_find_handler_ctr=0;
239 uint SharedRuntime::_rethrow_ctr=0;
240 uint SharedRuntime::_unsafe_set_memory_ctr=0;
241
242 int SharedRuntime::_ICmiss_index = 0;
243 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
244 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
245
246
247 void SharedRuntime::trace_ic_miss(address at) {
248 for (int i = 0; i < _ICmiss_index; i++) {
249 if (_ICmiss_at[i] == at) {
250 _ICmiss_count[i]++;
251 return;
252 }
253 }
254 int index = _ICmiss_index++;
255 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
256 _ICmiss_at[index] = at;
257 _ICmiss_count[index] = 1;
258 }
259
260 void SharedRuntime::print_ic_miss_histogram() {
261 if (ICMissHistogram) {
262 tty->print_cr("IC Miss Histogram:");
263 int tot_misses = 0;
264 for (int i = 0; i < _ICmiss_index; i++) {
265 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
266 tot_misses += _ICmiss_count[i];
267 }
268 tty->print_cr("Total IC misses: %7d", tot_misses);
269 }
270 }
271
272 #ifdef COMPILER2
273 // Runtime methods for printf-style debug nodes (same printing format as fieldDescriptor::print_on_for)
274 void SharedRuntime::debug_print_value(jboolean x) {
275 tty->print_cr("boolean %d", x);
276 }
277
278 void SharedRuntime::debug_print_value(jbyte x) {
279 tty->print_cr("byte %d", x);
280 }
281
282 void SharedRuntime::debug_print_value(jshort x) {
283 tty->print_cr("short %d", x);
284 }
285
286 void SharedRuntime::debug_print_value(jchar x) {
287 tty->print_cr("char %c %d", isprint(x) ? x : ' ', x);
288 }
289
290 void SharedRuntime::debug_print_value(jint x) {
291 tty->print_cr("int %d", x);
292 }
293
294 void SharedRuntime::debug_print_value(jlong x) {
295 tty->print_cr("long " JLONG_FORMAT, x);
296 }
297
298 void SharedRuntime::debug_print_value(jfloat x) {
299 tty->print_cr("float %f", x);
300 }
301
302 void SharedRuntime::debug_print_value(jdouble x) {
303 tty->print_cr("double %lf", x);
304 }
305
306 void SharedRuntime::debug_print_value(oopDesc* x) {
307 x->print();
308 }
309 #endif // COMPILER2
310
311 #endif // PRODUCT
312
313
314 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
315 return x * y;
316 JRT_END
317
318
319 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
320 if (x == min_jlong && y == CONST64(-1)) {
321 return x;
322 } else {
323 return x / y;
324 }
325 JRT_END
326
327
328 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
329 if (x == min_jlong && y == CONST64(-1)) {
330 return 0;
331 } else {
332 return x % y;
333 }
334 JRT_END
335
336
337 #ifdef _WIN64
338 const juint float_sign_mask = 0x7FFFFFFF;
339 const juint float_infinity = 0x7F800000;
340 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
341 const julong double_infinity = CONST64(0x7FF0000000000000);
342 #endif
343
344 #if !defined(X86)
345 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
346 #ifdef _WIN64
347 // 64-bit Windows on amd64 returns the wrong values for
348 // infinity operands.
349 juint xbits = PrimitiveConversions::cast<juint>(x);
350 juint ybits = PrimitiveConversions::cast<juint>(y);
351 // x Mod Infinity == x unless x is infinity
352 if (((xbits & float_sign_mask) != float_infinity) &&
353 ((ybits & float_sign_mask) == float_infinity) ) {
354 return x;
355 }
356 return ((jfloat)fmod_winx64((double)x, (double)y));
357 #else
358 return ((jfloat)fmod((double)x,(double)y));
359 #endif
360 JRT_END
361
362 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
363 #ifdef _WIN64
364 julong xbits = PrimitiveConversions::cast<julong>(x);
365 julong ybits = PrimitiveConversions::cast<julong>(y);
366 // x Mod Infinity == x unless x is infinity
367 if (((xbits & double_sign_mask) != double_infinity) &&
368 ((ybits & double_sign_mask) == double_infinity) ) {
369 return x;
370 }
371 return ((jdouble)fmod_winx64((double)x, (double)y));
372 #else
373 return ((jdouble)fmod((double)x,(double)y));
374 #endif
375 JRT_END
376 #endif // !X86
377
378 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
379 return (jfloat)x;
380 JRT_END
381
382 #ifdef __SOFTFP__
383 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
384 return x + y;
385 JRT_END
386
387 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
388 return x - y;
389 JRT_END
390
391 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
392 return x * y;
393 JRT_END
394
395 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
396 return x / y;
397 JRT_END
398
399 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
400 return x + y;
401 JRT_END
402
403 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
404 return x - y;
405 JRT_END
406
407 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
408 return x * y;
409 JRT_END
410
411 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
412 return x / y;
413 JRT_END
414
415 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
416 return (jdouble)x;
417 JRT_END
418
419 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
420 return (jdouble)x;
421 JRT_END
422
423 JRT_LEAF(int, SharedRuntime::fcmpl(float x, float y))
424 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan*/
425 JRT_END
426
427 JRT_LEAF(int, SharedRuntime::fcmpg(float x, float y))
428 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
429 JRT_END
430
431 JRT_LEAF(int, SharedRuntime::dcmpl(double x, double y))
432 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
433 JRT_END
434
435 JRT_LEAF(int, SharedRuntime::dcmpg(double x, double y))
436 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
437 JRT_END
438
439 // Functions to return the opposite of the aeabi functions for nan.
440 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
441 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
442 JRT_END
443
444 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
445 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
446 JRT_END
447
448 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
449 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
450 JRT_END
451
452 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
453 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
454 JRT_END
455
456 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
457 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
458 JRT_END
459
460 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
461 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
462 JRT_END
463
464 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
465 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
466 JRT_END
467
468 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
469 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
470 JRT_END
471
472 // Intrinsics make gcc generate code for these.
473 float SharedRuntime::fneg(float f) {
474 return -f;
475 }
476
477 double SharedRuntime::dneg(double f) {
478 return -f;
479 }
480
481 #endif // __SOFTFP__
482
483 #if defined(__SOFTFP__) || defined(E500V2)
484 // Intrinsics make gcc generate code for these.
485 double SharedRuntime::dabs(double f) {
486 return (f <= (double)0.0) ? (double)0.0 - f : f;
487 }
488
489 #endif
490
491 #if defined(__SOFTFP__)
492 double SharedRuntime::dsqrt(double f) {
493 return sqrt(f);
494 }
495 #endif
496
497 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
498 if (g_isnan(x))
499 return 0;
500 if (x >= (jfloat) max_jint)
501 return max_jint;
502 if (x <= (jfloat) min_jint)
503 return min_jint;
504 return (jint) x;
505 JRT_END
506
507
508 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
509 if (g_isnan(x))
510 return 0;
511 if (x >= (jfloat) max_jlong)
512 return max_jlong;
513 if (x <= (jfloat) min_jlong)
514 return min_jlong;
515 return (jlong) x;
516 JRT_END
517
518
519 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
520 if (g_isnan(x))
521 return 0;
522 if (x >= (jdouble) max_jint)
523 return max_jint;
524 if (x <= (jdouble) min_jint)
525 return min_jint;
526 return (jint) x;
527 JRT_END
528
529
530 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
531 if (g_isnan(x))
532 return 0;
533 if (x >= (jdouble) max_jlong)
534 return max_jlong;
535 if (x <= (jdouble) min_jlong)
536 return min_jlong;
537 return (jlong) x;
538 JRT_END
539
540
541 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
542 return (jfloat)x;
543 JRT_END
544
545
546 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
547 return (jfloat)x;
548 JRT_END
549
550
551 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
552 return (jdouble)x;
553 JRT_END
554
555
556 // Exception handling across interpreter/compiler boundaries
557 //
558 // exception_handler_for_return_address(...) returns the continuation address.
559 // The continuation address is the entry point of the exception handler of the
560 // previous frame depending on the return address.
561
562 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) {
563 // Note: This is called when we have unwound the frame of the callee that did
564 // throw an exception. So far, no check has been performed by the StackWatermarkSet.
565 // Notably, the stack is not walkable at this point, and hence the check must
566 // be deferred until later. Specifically, any of the handlers returned here in
567 // this function, will get dispatched to, and call deferred checks to
568 // StackWatermarkSet::after_unwind at a point where the stack is walkable.
569 assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
570 assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
571
572 #if INCLUDE_JVMCI
573 // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
574 // and other exception handler continuations do not read it
575 current->set_exception_pc(nullptr);
576 #endif // INCLUDE_JVMCI
577
578 if (Continuation::is_return_barrier_entry(return_address)) {
579 return StubRoutines::cont_returnBarrierExc();
580 }
581
582 // The fastest case first
583 CodeBlob* blob = CodeCache::find_blob(return_address);
584 nmethod* nm = (blob != nullptr) ? blob->as_nmethod_or_null() : nullptr;
585 if (nm != nullptr) {
586 // native nmethods don't have exception handlers
587 assert(!nm->is_native_method() || nm->method()->is_continuation_enter_intrinsic(), "no exception handler");
588 assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
589 if (nm->is_deopt_pc(return_address)) {
590 // If we come here because of a stack overflow, the stack may be
591 // unguarded. Reguard the stack otherwise if we return to the
592 // deopt blob and the stack bang causes a stack overflow we
593 // crash.
594 StackOverflow* overflow_state = current->stack_overflow_state();
595 bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
596 if (overflow_state->reserved_stack_activation() != current->stack_base()) {
597 overflow_state->set_reserved_stack_activation(current->stack_base());
598 }
599 assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
600 // The deferred StackWatermarkSet::after_unwind check will be performed in
601 // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
602 return SharedRuntime::deopt_blob()->unpack_with_exception();
603 } else {
604 // The deferred StackWatermarkSet::after_unwind check will be performed in
605 // * OptoRuntime::handle_exception_C_helper for C2 code
606 // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
607 #ifdef COMPILER2
608 if (nm->compiler_type() == compiler_c2) {
609 return OptoRuntime::exception_blob()->entry_point();
610 }
611 #endif // COMPILER2
612 return nm->exception_begin();
613 }
614 }
615
616 // Entry code
617 if (StubRoutines::returns_to_call_stub(return_address)) {
618 // The deferred StackWatermarkSet::after_unwind check will be performed in
619 // JavaCallWrapper::~JavaCallWrapper
620 assert (StubRoutines::catch_exception_entry() != nullptr, "must be generated before");
621 return StubRoutines::catch_exception_entry();
622 }
623 if (blob != nullptr && blob->is_upcall_stub()) {
624 return StubRoutines::upcall_stub_exception_handler();
625 }
626 // Interpreted code
627 if (Interpreter::contains(return_address)) {
628 // The deferred StackWatermarkSet::after_unwind check will be performed in
629 // InterpreterRuntime::exception_handler_for_exception
630 return Interpreter::rethrow_exception_entry();
631 }
632
633 guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
634 guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
635
636 #ifndef PRODUCT
637 { ResourceMark rm;
638 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
639 os::print_location(tty, (intptr_t)return_address);
640 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
641 tty->print_cr("b) other problem");
642 }
643 #endif // PRODUCT
644 ShouldNotReachHere();
645 return nullptr;
646 }
647
648
649 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* current, address return_address))
650 return raw_exception_handler_for_return_address(current, return_address);
651 JRT_END
652
653
654 address SharedRuntime::get_poll_stub(address pc) {
655 address stub;
656 // Look up the code blob
657 CodeBlob *cb = CodeCache::find_blob(pc);
658
659 // Should be an nmethod
660 guarantee(cb != nullptr && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod");
661
662 // Look up the relocation information
663 assert(cb->as_nmethod()->is_at_poll_or_poll_return(pc),
664 "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
665
666 #ifdef ASSERT
667 if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
668 tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
669 Disassembler::decode(cb);
670 fatal("Only polling locations are used for safepoint");
671 }
672 #endif
673
674 bool at_poll_return = cb->as_nmethod()->is_at_poll_return(pc);
675 bool has_wide_vectors = cb->as_nmethod()->has_wide_vectors();
676 if (at_poll_return) {
677 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
678 "polling page return stub not created yet");
679 stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
680 } else if (has_wide_vectors) {
681 assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr,
682 "polling page vectors safepoint stub not created yet");
683 stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
684 } else {
685 assert(SharedRuntime::polling_page_safepoint_handler_blob() != nullptr,
686 "polling page safepoint stub not created yet");
687 stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
688 }
689 log_trace(safepoint)("Polling page exception: thread = " INTPTR_FORMAT " [%d], pc = "
690 INTPTR_FORMAT " (%s), stub = " INTPTR_FORMAT,
691 p2i(Thread::current()),
692 Thread::current()->osthread()->thread_id(),
693 p2i(pc),
694 at_poll_return ? "return" : "loop",
695 p2i(stub));
696 return stub;
697 }
698
699 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception) {
700 if (JvmtiExport::can_post_on_exceptions()) {
701 vframeStream vfst(current, true);
702 methodHandle method = methodHandle(current, vfst.method());
703 address bcp = method()->bcp_from(vfst.bci());
704 JvmtiExport::post_exception_throw(current, method(), bcp, h_exception());
705 }
706
707 #if INCLUDE_JVMCI
708 if (EnableJVMCI) {
709 vframeStream vfst(current, true);
710 methodHandle method = methodHandle(current, vfst.method());
711 int bci = vfst.bci();
712 MethodData* trap_mdo = method->method_data();
713 if (trap_mdo != nullptr) {
714 // Set exception_seen if the exceptional bytecode is an invoke
715 Bytecode_invoke call = Bytecode_invoke_check(method, bci);
716 if (call.is_valid()) {
717 ResourceMark rm(current);
718
719 // Lock to read ProfileData, and ensure lock is not broken by a safepoint
720 MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
721
722 ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr);
723 if (pdata != nullptr && pdata->is_BitData()) {
724 BitData* bit_data = (BitData*) pdata;
725 bit_data->set_exception_seen();
726 }
727 }
728 }
729 }
730 #endif
731
732 Exceptions::_throw(current, __FILE__, __LINE__, h_exception);
733 }
734
735 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message) {
736 Handle h_exception = Exceptions::new_exception(current, name, message);
737 throw_and_post_jvmti_exception(current, h_exception);
738 }
739
740 #if INCLUDE_JVMTI
741 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_start(oopDesc* vt, jboolean hide, JavaThread* current))
742 assert(hide == JNI_FALSE, "must be VTMS transition finish");
743 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
744 JvmtiVTMSTransitionDisabler::VTMS_vthread_start(vthread);
745 JNIHandles::destroy_local(vthread);
746 JRT_END
747
748 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_end(oopDesc* vt, jboolean hide, JavaThread* current))
749 assert(hide == JNI_TRUE, "must be VTMS transition start");
750 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
751 JvmtiVTMSTransitionDisabler::VTMS_vthread_end(vthread);
752 JNIHandles::destroy_local(vthread);
753 JRT_END
754
755 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_mount(oopDesc* vt, jboolean hide, JavaThread* current))
756 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
757 JvmtiVTMSTransitionDisabler::VTMS_vthread_mount(vthread, hide);
758 JNIHandles::destroy_local(vthread);
759 JRT_END
760
761 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_unmount(oopDesc* vt, jboolean hide, JavaThread* current))
762 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
763 JvmtiVTMSTransitionDisabler::VTMS_vthread_unmount(vthread, hide);
764 JNIHandles::destroy_local(vthread);
765 JRT_END
766 #endif // INCLUDE_JVMTI
767
768 // The interpreter code to call this tracing function is only
769 // called/generated when UL is on for redefine, class and has the right level
770 // and tags. Since obsolete methods are never compiled, we don't have
771 // to modify the compilers to generate calls to this function.
772 //
773 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
774 JavaThread* thread, Method* method))
775 if (method->is_obsolete()) {
776 // We are calling an obsolete method, but this is not necessarily
777 // an error. Our method could have been redefined just after we
778 // fetched the Method* from the constant pool.
779 ResourceMark rm;
780 log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
781 }
782 return 0;
783 JRT_END
784
785 // ret_pc points into caller; we are returning caller's exception handler
786 // for given exception
787 // Note that the implementation of this method assumes it's only called when an exception has actually occured
788 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
789 bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
790 assert(nm != nullptr, "must exist");
791 ResourceMark rm;
792
793 #if INCLUDE_JVMCI
794 if (nm->is_compiled_by_jvmci()) {
795 // lookup exception handler for this pc
796 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
797 ExceptionHandlerTable table(nm);
798 HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
799 if (t != nullptr) {
800 return nm->code_begin() + t->pco();
801 } else {
802 bool make_not_entrant = true;
803 return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
804 }
805 }
806 #endif // INCLUDE_JVMCI
807
808 ScopeDesc* sd = nm->scope_desc_at(ret_pc);
809 // determine handler bci, if any
810 EXCEPTION_MARK;
811
812 int handler_bci = -1;
813 int scope_depth = 0;
814 if (!force_unwind) {
815 int bci = sd->bci();
816 bool recursive_exception = false;
817 do {
818 bool skip_scope_increment = false;
819 // exception handler lookup
820 Klass* ek = exception->klass();
821 methodHandle mh(THREAD, sd->method());
822 handler_bci = Method::fast_exception_handler_bci_for(mh, ek, bci, THREAD);
823 if (HAS_PENDING_EXCEPTION) {
824 recursive_exception = true;
825 // We threw an exception while trying to find the exception handler.
826 // Transfer the new exception to the exception handle which will
827 // be set into thread local storage, and do another lookup for an
828 // exception handler for this exception, this time starting at the
829 // BCI of the exception handler which caused the exception to be
830 // thrown (bugs 4307310 and 4546590). Set "exception" reference
831 // argument to ensure that the correct exception is thrown (4870175).
832 recursive_exception_occurred = true;
833 exception = Handle(THREAD, PENDING_EXCEPTION);
834 CLEAR_PENDING_EXCEPTION;
835 if (handler_bci >= 0) {
836 bci = handler_bci;
837 handler_bci = -1;
838 skip_scope_increment = true;
839 }
840 }
841 else {
842 recursive_exception = false;
843 }
844 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
845 sd = sd->sender();
846 if (sd != nullptr) {
847 bci = sd->bci();
848 }
849 ++scope_depth;
850 }
851 } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != nullptr));
852 }
853
854 // found handling method => lookup exception handler
855 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
856
857 ExceptionHandlerTable table(nm);
858 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
859
860 // If the compiler did not anticipate a recursive exception, resulting in an exception
861 // thrown from the catch bci, then the compiled exception handler might be missing.
862 // This is rare. Just deoptimize and let the interpreter handle it.
863 if (t == nullptr && recursive_exception_occurred) {
864 bool make_not_entrant = false;
865 return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
866 }
867
868 if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) {
869 // Allow abbreviated catch tables. The idea is to allow a method
870 // to materialize its exceptions without committing to the exact
871 // routing of exceptions. In particular this is needed for adding
872 // a synthetic handler to unlock monitors when inlining
873 // synchronized methods since the unlock path isn't represented in
874 // the bytecodes.
875 t = table.entry_for(catch_pco, -1, 0);
876 }
877
878 #ifdef COMPILER1
879 if (t == nullptr && nm->is_compiled_by_c1()) {
880 assert(nm->unwind_handler_begin() != nullptr, "");
881 return nm->unwind_handler_begin();
882 }
883 #endif
884
885 if (t == nullptr) {
886 ttyLocker ttyl;
887 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
888 tty->print_cr(" Exception:");
889 exception->print();
890 tty->cr();
891 tty->print_cr(" Compiled exception table :");
892 table.print();
893 nm->print();
894 nm->print_code();
895 guarantee(false, "missing exception handler");
896 return nullptr;
897 }
898
899 if (handler_bci != -1) { // did we find a handler in this method?
900 sd->method()->set_exception_handler_entered(handler_bci); // profile
901 }
902 return nm->code_begin() + t->pco();
903 }
904
905 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current))
906 // These errors occur only at call sites
907 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
908 JRT_END
909
910 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
911 // These errors occur only at call sites
912 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
913 JRT_END
914
915 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
916 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
917 JRT_END
918
919 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* current))
920 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
921 JRT_END
922
923 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current))
924 // This entry point is effectively only used for NullPointerExceptions which occur at inline
925 // cache sites (when the callee activation is not yet set up) so we are at a call site
926 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
927 JRT_END
928
929 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* current))
930 throw_StackOverflowError_common(current, false);
931 JRT_END
932
933 JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* current))
934 throw_StackOverflowError_common(current, true);
935 JRT_END
936
937 void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool delayed) {
938 // We avoid using the normal exception construction in this case because
939 // it performs an upcall to Java, and we're already out of stack space.
940 JavaThread* THREAD = current; // For exception macros.
941 InstanceKlass* k = vmClasses::StackOverflowError_klass();
942 oop exception_oop = k->allocate_instance(CHECK);
943 if (delayed) {
944 java_lang_Throwable::set_message(exception_oop,
945 Universe::delayed_stack_overflow_error_message());
946 }
947 Handle exception (current, exception_oop);
948 if (StackTraceInThrowable) {
949 java_lang_Throwable::fill_in_stack_trace(exception);
950 }
951 // Remove the ScopedValue bindings in case we got a
952 // StackOverflowError while we were trying to remove ScopedValue
953 // bindings.
954 current->clear_scopedValueBindings();
955 // Increment counter for hs_err file reporting
956 AtomicAccess::inc(&Exceptions::_stack_overflow_errors);
957 throw_and_post_jvmti_exception(current, exception);
958 }
959
960 address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
961 address pc,
962 ImplicitExceptionKind exception_kind)
963 {
964 address target_pc = nullptr;
965
966 if (Interpreter::contains(pc)) {
967 switch (exception_kind) {
968 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
969 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
970 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
971 default: ShouldNotReachHere();
972 }
973 } else {
974 switch (exception_kind) {
975 case STACK_OVERFLOW: {
976 // Stack overflow only occurs upon frame setup; the callee is
977 // going to be unwound. Dispatch to a shared runtime stub
978 // which will cause the StackOverflowError to be fabricated
979 // and processed.
980 // Stack overflow should never occur during deoptimization:
981 // the compiled method bangs the stack by as much as the
982 // interpreter would need in case of a deoptimization. The
983 // deoptimization blob and uncommon trap blob bang the stack
984 // in a debug VM to verify the correctness of the compiled
985 // method stack banging.
986 assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap");
987 Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
988 return SharedRuntime::throw_StackOverflowError_entry();
989 }
990
991 case IMPLICIT_NULL: {
992 if (VtableStubs::contains(pc)) {
993 // We haven't yet entered the callee frame. Fabricate an
994 // exception and begin dispatching it in the caller. Since
995 // the caller was at a call site, it's safe to destroy all
996 // caller-saved registers, as these entry points do.
997 VtableStub* vt_stub = VtableStubs::stub_containing(pc);
998
999 // If vt_stub is null, then return null to signal handler to report the SEGV error.
1000 if (vt_stub == nullptr) return nullptr;
1001
1002 if (vt_stub->is_abstract_method_error(pc)) {
1003 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
1004 Events::log_exception(current, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc));
1005 // Instead of throwing the abstract method error here directly, we re-resolve
1006 // and will throw the AbstractMethodError during resolve. As a result, we'll
1007 // get a more detailed error message.
1008 return SharedRuntime::get_handle_wrong_method_stub();
1009 } else {
1010 Events::log_exception(current, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc));
1011 // Assert that the signal comes from the expected location in stub code.
1012 assert(vt_stub->is_null_pointer_exception(pc),
1013 "obtained signal from unexpected location in stub code");
1014 return SharedRuntime::throw_NullPointerException_at_call_entry();
1015 }
1016 } else {
1017 CodeBlob* cb = CodeCache::find_blob(pc);
1018
1019 // If code blob is null, then return null to signal handler to report the SEGV error.
1020 if (cb == nullptr) return nullptr;
1021
1022 // Exception happened in CodeCache. Must be either:
1023 // 1. Inline-cache check in C2I handler blob,
1024 // 2. Inline-cache check in nmethod, or
1025 // 3. Implicit null exception in nmethod
1026
1027 if (!cb->is_nmethod()) {
1028 bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
1029 if (!is_in_blob) {
1030 // Allow normal crash reporting to handle this
1031 return nullptr;
1032 }
1033 Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
1034 // There is no handler here, so we will simply unwind.
1035 return SharedRuntime::throw_NullPointerException_at_call_entry();
1036 }
1037
1038 // Otherwise, it's a compiled method. Consult its exception handlers.
1039 nmethod* nm = cb->as_nmethod();
1040 if (nm->inlinecache_check_contains(pc)) {
1041 // exception happened inside inline-cache check code
1042 // => the nmethod is not yet active (i.e., the frame
1043 // is not set up yet) => use return address pushed by
1044 // caller => don't push another return address
1045 Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc));
1046 return SharedRuntime::throw_NullPointerException_at_call_entry();
1047 }
1048
1049 if (nm->method()->is_method_handle_intrinsic()) {
1050 // exception happened inside MH dispatch code, similar to a vtable stub
1051 Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
1052 return SharedRuntime::throw_NullPointerException_at_call_entry();
1053 }
1054
1055 #ifndef PRODUCT
1056 _implicit_null_throws++;
1057 #endif
1058 target_pc = nm->continuation_for_implicit_null_exception(pc);
1059 // If there's an unexpected fault, target_pc might be null,
1060 // in which case we want to fall through into the normal
1061 // error handling code.
1062 }
1063
1064 break; // fall through
1065 }
1066
1067
1068 case IMPLICIT_DIVIDE_BY_ZERO: {
1069 nmethod* nm = CodeCache::find_nmethod(pc);
1070 guarantee(nm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions");
1071 #ifndef PRODUCT
1072 _implicit_div0_throws++;
1073 #endif
1074 target_pc = nm->continuation_for_implicit_div0_exception(pc);
1075 // If there's an unexpected fault, target_pc might be null,
1076 // in which case we want to fall through into the normal
1077 // error handling code.
1078 break; // fall through
1079 }
1080
1081 default: ShouldNotReachHere();
1082 }
1083
1084 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
1085
1086 if (exception_kind == IMPLICIT_NULL) {
1087 #ifndef PRODUCT
1088 // for AbortVMOnException flag
1089 Exceptions::debug_check_abort("java.lang.NullPointerException");
1090 #endif //PRODUCT
1091 Events::log_exception(current, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1092 } else {
1093 #ifndef PRODUCT
1094 // for AbortVMOnException flag
1095 Exceptions::debug_check_abort("java.lang.ArithmeticException");
1096 #endif //PRODUCT
1097 Events::log_exception(current, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1098 }
1099 return target_pc;
1100 }
1101
1102 ShouldNotReachHere();
1103 return nullptr;
1104 }
1105
1106
1107 /**
1108 * Throws an java/lang/UnsatisfiedLinkError. The address of this method is
1109 * installed in the native function entry of all native Java methods before
1110 * they get linked to their actual native methods.
1111 *
1112 * \note
1113 * This method actually never gets called! The reason is because
1114 * the interpreter's native entries call NativeLookup::lookup() which
1115 * throws the exception when the lookup fails. The exception is then
1116 * caught and forwarded on the return from NativeLookup::lookup() call
1117 * before the call to the native function. This might change in the future.
1118 */
1119 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
1120 {
1121 // We return a bad value here to make sure that the exception is
1122 // forwarded before we look at the return value.
1123 THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
1124 }
1125 JNI_END
1126
1127 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
1128 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
1129 }
1130
1131 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
1132 #if INCLUDE_JVMCI
1133 if (!obj->klass()->has_finalizer()) {
1134 return;
1135 }
1136 #endif // INCLUDE_JVMCI
1137 assert(oopDesc::is_oop(obj), "must be a valid oop");
1138 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1139 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1140 JRT_END
1141
1142 jlong SharedRuntime::get_java_tid(JavaThread* thread) {
1143 assert(thread != nullptr, "No thread");
1144 if (thread == nullptr) {
1145 return 0;
1146 }
1147 guarantee(Thread::current() != thread || thread->is_oop_safe(),
1148 "current cannot touch oops after its GC barrier is detached.");
1149 oop obj = thread->threadObj();
1150 return (obj == nullptr) ? 0 : java_lang_Thread::thread_id(obj);
1151 }
1152
1153 /**
1154 * This function ought to be a void function, but cannot be because
1155 * it gets turned into a tail-call on sparc, which runs into dtrace bug
1156 * 6254741. Once that is fixed we can remove the dummy return value.
1157 */
1158 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
1159 return dtrace_object_alloc(JavaThread::current(), o, o->size());
1160 }
1161
1162 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o) {
1163 return dtrace_object_alloc(thread, o, o->size());
1164 }
1165
1166 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size) {
1167 assert(DTraceAllocProbes, "wrong call");
1168 Klass* klass = o->klass();
1169 Symbol* name = klass->name();
1170 HOTSPOT_OBJECT_ALLOC(
1171 get_java_tid(thread),
1172 (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
1173 return 0;
1174 }
1175
1176 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
1177 JavaThread* current, Method* method))
1178 assert(current == JavaThread::current(), "pre-condition");
1179
1180 assert(DTraceMethodProbes, "wrong call");
1181 Symbol* kname = method->klass_name();
1182 Symbol* name = method->name();
1183 Symbol* sig = method->signature();
1184 HOTSPOT_METHOD_ENTRY(
1185 get_java_tid(current),
1186 (char *) kname->bytes(), kname->utf8_length(),
1187 (char *) name->bytes(), name->utf8_length(),
1188 (char *) sig->bytes(), sig->utf8_length());
1189 return 0;
1190 JRT_END
1191
1192 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
1193 JavaThread* current, Method* method))
1194 assert(current == JavaThread::current(), "pre-condition");
1195 assert(DTraceMethodProbes, "wrong call");
1196 Symbol* kname = method->klass_name();
1197 Symbol* name = method->name();
1198 Symbol* sig = method->signature();
1199 HOTSPOT_METHOD_RETURN(
1200 get_java_tid(current),
1201 (char *) kname->bytes(), kname->utf8_length(),
1202 (char *) name->bytes(), name->utf8_length(),
1203 (char *) sig->bytes(), sig->utf8_length());
1204 return 0;
1205 JRT_END
1206
1207
1208 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
1209 // for a call current in progress, i.e., arguments has been pushed on stack
1210 // put callee has not been invoked yet. Used by: resolve virtual/static,
1211 // vtable updates, etc. Caller frame must be compiled.
1212 Handle SharedRuntime::find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
1213 JavaThread* current = THREAD;
1214 ResourceMark rm(current);
1215
1216 // last java frame on stack (which includes native call frames)
1217 vframeStream vfst(current, true); // Do not skip and javaCalls
1218
1219 return find_callee_info_helper(vfst, bc, callinfo, THREAD);
1220 }
1221
1222 Method* SharedRuntime::extract_attached_method(vframeStream& vfst) {
1223 nmethod* caller = vfst.nm();
1224
1225 address pc = vfst.frame_pc();
1226 { // Get call instruction under lock because another thread may be busy patching it.
1227 CompiledICLocker ic_locker(caller);
1228 return caller->attached_method_before_pc(pc);
1229 }
1230 return nullptr;
1231 }
1232
1233 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1234 // for a call current in progress, i.e., arguments has been pushed on stack
1235 // but callee has not been invoked yet. Caller frame must be compiled.
1236 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1237 CallInfo& callinfo, TRAPS) {
1238 Handle receiver;
1239 Handle nullHandle; // create a handy null handle for exception returns
1240 JavaThread* current = THREAD;
1241
1242 assert(!vfst.at_end(), "Java frame must exist");
1243
1244 // Find caller and bci from vframe
1245 methodHandle caller(current, vfst.method());
1246 int bci = vfst.bci();
1247
1248 if (caller->is_continuation_enter_intrinsic()) {
1249 bc = Bytecodes::_invokestatic;
1250 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1251 return receiver;
1252 }
1253
1254 Bytecode_invoke bytecode(caller, bci);
1255 int bytecode_index = bytecode.index();
1256 bc = bytecode.invoke_code();
1257
1258 methodHandle attached_method(current, extract_attached_method(vfst));
1259 if (attached_method.not_null()) {
1260 Method* callee = bytecode.static_target(CHECK_NH);
1261 vmIntrinsics::ID id = callee->intrinsic_id();
1262 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1263 // it attaches statically resolved method to the call site.
1264 if (MethodHandles::is_signature_polymorphic(id) &&
1265 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1266 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1267
1268 // Adjust invocation mode according to the attached method.
1269 switch (bc) {
1270 case Bytecodes::_invokevirtual:
1271 if (attached_method->method_holder()->is_interface()) {
1272 bc = Bytecodes::_invokeinterface;
1273 }
1274 break;
1275 case Bytecodes::_invokeinterface:
1276 if (!attached_method->method_holder()->is_interface()) {
1277 bc = Bytecodes::_invokevirtual;
1278 }
1279 break;
1280 case Bytecodes::_invokehandle:
1281 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1282 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1283 : Bytecodes::_invokevirtual;
1284 }
1285 break;
1286 default:
1287 break;
1288 }
1289 }
1290 }
1291
1292 assert(bc != Bytecodes::_illegal, "not initialized");
1293
1294 bool has_receiver = bc != Bytecodes::_invokestatic &&
1295 bc != Bytecodes::_invokedynamic &&
1296 bc != Bytecodes::_invokehandle;
1297
1298 // Find receiver for non-static call
1299 if (has_receiver) {
1300 // This register map must be update since we need to find the receiver for
1301 // compiled frames. The receiver might be in a register.
1302 RegisterMap reg_map2(current,
1303 RegisterMap::UpdateMap::include,
1304 RegisterMap::ProcessFrames::include,
1305 RegisterMap::WalkContinuation::skip);
1306 frame stubFrame = current->last_frame();
1307 // Caller-frame is a compiled frame
1308 frame callerFrame = stubFrame.sender(®_map2);
1309
1310 if (attached_method.is_null()) {
1311 Method* callee = bytecode.static_target(CHECK_NH);
1312 if (callee == nullptr) {
1313 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1314 }
1315 }
1316
1317 // Retrieve from a compiled argument list
1318 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1319 assert(oopDesc::is_oop_or_null(receiver()), "");
1320
1321 if (receiver.is_null()) {
1322 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1323 }
1324 }
1325
1326 // Resolve method
1327 if (attached_method.not_null()) {
1328 // Parameterized by attached method.
1329 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1330 } else {
1331 // Parameterized by bytecode.
1332 constantPoolHandle constants(current, caller->constants());
1333 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1334 }
1335
1336 #ifdef ASSERT
1337 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1338 if (has_receiver) {
1339 assert(receiver.not_null(), "should have thrown exception");
1340 Klass* receiver_klass = receiver->klass();
1341 Klass* rk = nullptr;
1342 if (attached_method.not_null()) {
1343 // In case there's resolved method attached, use its holder during the check.
1344 rk = attached_method->method_holder();
1345 } else {
1346 // Klass is already loaded.
1347 constantPoolHandle constants(current, caller->constants());
1348 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1349 }
1350 Klass* static_receiver_klass = rk;
1351 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1352 "actual receiver must be subclass of static receiver klass");
1353 if (receiver_klass->is_instance_klass()) {
1354 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1355 tty->print_cr("ERROR: Klass not yet initialized!!");
1356 receiver_klass->print();
1357 }
1358 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1359 }
1360 }
1361 #endif
1362
1363 return receiver;
1364 }
1365
1366 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1367 JavaThread* current = THREAD;
1368 ResourceMark rm(current);
1369 // We need first to check if any Java activations (compiled, interpreted)
1370 // exist on the stack since last JavaCall. If not, we need
1371 // to get the target method from the JavaCall wrapper.
1372 vframeStream vfst(current, true); // Do not skip any javaCalls
1373 methodHandle callee_method;
1374 if (vfst.at_end()) {
1375 // No Java frames were found on stack since we did the JavaCall.
1376 // Hence the stack can only contain an entry_frame. We need to
1377 // find the target method from the stub frame.
1378 RegisterMap reg_map(current,
1379 RegisterMap::UpdateMap::skip,
1380 RegisterMap::ProcessFrames::include,
1381 RegisterMap::WalkContinuation::skip);
1382 frame fr = current->last_frame();
1383 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1384 fr = fr.sender(®_map);
1385 assert(fr.is_entry_frame(), "must be");
1386 // fr is now pointing to the entry frame.
1387 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1388 } else {
1389 Bytecodes::Code bc;
1390 CallInfo callinfo;
1391 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1392 callee_method = methodHandle(current, callinfo.selected_method());
1393 }
1394 assert(callee_method()->is_method(), "must be");
1395 return callee_method;
1396 }
1397
1398 // Resolves a call.
1399 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1400 JavaThread* current = THREAD;
1401 ResourceMark rm(current);
1402 RegisterMap cbl_map(current,
1403 RegisterMap::UpdateMap::skip,
1404 RegisterMap::ProcessFrames::include,
1405 RegisterMap::WalkContinuation::skip);
1406 frame caller_frame = current->last_frame().sender(&cbl_map);
1407
1408 CodeBlob* caller_cb = caller_frame.cb();
1409 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1410 nmethod* caller_nm = caller_cb->as_nmethod();
1411
1412 // determine call info & receiver
1413 // note: a) receiver is null for static calls
1414 // b) an exception is thrown if receiver is null for non-static calls
1415 CallInfo call_info;
1416 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1417 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1418
1419 NoSafepointVerifier nsv;
1420
1421 methodHandle callee_method(current, call_info.selected_method());
1422
1423 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1424 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1425 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1426 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1427 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1428
1429 assert(!caller_nm->is_unloading(), "It should not be unloading");
1430
1431 #ifndef PRODUCT
1432 // tracing/debugging/statistics
1433 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1434 (is_virtual) ? (&_resolve_virtual_ctr) :
1435 (&_resolve_static_ctr);
1436 AtomicAccess::inc(addr);
1437
1438 if (TraceCallFixup) {
1439 ResourceMark rm(current);
1440 tty->print("resolving %s%s (%s) call to",
1441 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1442 Bytecodes::name(invoke_code));
1443 callee_method->print_short_name(tty);
1444 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1445 p2i(caller_frame.pc()), p2i(callee_method->code()));
1446 }
1447 #endif
1448
1449 if (invoke_code == Bytecodes::_invokestatic) {
1450 assert(callee_method->method_holder()->is_initialized() ||
1451 callee_method->method_holder()->is_reentrant_initialization(current),
1452 "invalid class initialization state for invoke_static");
1453 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1454 // In order to keep class initialization check, do not patch call
1455 // site for static call when the class is not fully initialized.
1456 // Proper check is enforced by call site re-resolution on every invocation.
1457 //
1458 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1459 // explicit class initialization check is put in nmethod entry (VEP).
1460 assert(callee_method->method_holder()->is_linked(), "must be");
1461 return callee_method;
1462 }
1463 }
1464
1465
1466 // JSR 292 key invariant:
1467 // If the resolved method is a MethodHandle invoke target, the call
1468 // site must be a MethodHandle call site, because the lambda form might tail-call
1469 // leaving the stack in a state unknown to either caller or callee
1470
1471 // Compute entry points. The computation of the entry points is independent of
1472 // patching the call.
1473
1474 // Make sure the callee nmethod does not get deoptimized and removed before
1475 // we are done patching the code.
1476
1477
1478 CompiledICLocker ml(caller_nm);
1479 if (is_virtual && !is_optimized) {
1480 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1481 inline_cache->update(&call_info, receiver->klass());
1482 } else {
1483 // Callsite is a direct call - set it to the destination method
1484 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1485 callsite->set(callee_method);
1486 }
1487
1488 return callee_method;
1489 }
1490
1491 // Inline caches exist only in compiled code
1492 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1493 #ifdef ASSERT
1494 RegisterMap reg_map(current,
1495 RegisterMap::UpdateMap::skip,
1496 RegisterMap::ProcessFrames::include,
1497 RegisterMap::WalkContinuation::skip);
1498 frame stub_frame = current->last_frame();
1499 assert(stub_frame.is_runtime_frame(), "sanity check");
1500 frame caller_frame = stub_frame.sender(®_map);
1501 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1502 #endif /* ASSERT */
1503
1504 methodHandle callee_method;
1505 JRT_BLOCK
1506 callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1507 // Return Method* through TLS
1508 current->set_vm_result_metadata(callee_method());
1509 JRT_BLOCK_END
1510 // return compiled code entry point after potential safepoints
1511 return get_resolved_entry(current, callee_method);
1512 JRT_END
1513
1514
1515 // Handle call site that has been made non-entrant
1516 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1517 // 6243940 We might end up in here if the callee is deoptimized
1518 // as we race to call it. We don't want to take a safepoint if
1519 // the caller was interpreted because the caller frame will look
1520 // interpreted to the stack walkers and arguments are now
1521 // "compiled" so it is much better to make this transition
1522 // invisible to the stack walking code. The i2c path will
1523 // place the callee method in the callee_target. It is stashed
1524 // there because if we try and find the callee by normal means a
1525 // safepoint is possible and have trouble gc'ing the compiled args.
1526 RegisterMap reg_map(current,
1527 RegisterMap::UpdateMap::skip,
1528 RegisterMap::ProcessFrames::include,
1529 RegisterMap::WalkContinuation::skip);
1530 frame stub_frame = current->last_frame();
1531 assert(stub_frame.is_runtime_frame(), "sanity check");
1532 frame caller_frame = stub_frame.sender(®_map);
1533
1534 if (caller_frame.is_interpreted_frame() ||
1535 caller_frame.is_entry_frame() ||
1536 caller_frame.is_upcall_stub_frame()) {
1537 Method* callee = current->callee_target();
1538 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1539 current->set_vm_result_metadata(callee);
1540 current->set_callee_target(nullptr);
1541 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1542 // Bypass class initialization checks in c2i when caller is in native.
1543 // JNI calls to static methods don't have class initialization checks.
1544 // Fast class initialization checks are present in c2i adapters and call into
1545 // SharedRuntime::handle_wrong_method() on the slow path.
1546 //
1547 // JVM upcalls may land here as well, but there's a proper check present in
1548 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1549 // so bypassing it in c2i adapter is benign.
1550 return callee->get_c2i_no_clinit_check_entry();
1551 } else {
1552 return callee->get_c2i_entry();
1553 }
1554 }
1555
1556 // Must be compiled to compiled path which is safe to stackwalk
1557 methodHandle callee_method;
1558 JRT_BLOCK
1559 // Force resolving of caller (if we called from compiled frame)
1560 callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1561 current->set_vm_result_metadata(callee_method());
1562 JRT_BLOCK_END
1563 // return compiled code entry point after potential safepoints
1564 return get_resolved_entry(current, callee_method);
1565 JRT_END
1566
1567 // Handle abstract method call
1568 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1569 // Verbose error message for AbstractMethodError.
1570 // Get the called method from the invoke bytecode.
1571 vframeStream vfst(current, true);
1572 assert(!vfst.at_end(), "Java frame must exist");
1573 methodHandle caller(current, vfst.method());
1574 Bytecode_invoke invoke(caller, vfst.bci());
1575 DEBUG_ONLY( invoke.verify(); )
1576
1577 // Find the compiled caller frame.
1578 RegisterMap reg_map(current,
1579 RegisterMap::UpdateMap::include,
1580 RegisterMap::ProcessFrames::include,
1581 RegisterMap::WalkContinuation::skip);
1582 frame stubFrame = current->last_frame();
1583 assert(stubFrame.is_runtime_frame(), "must be");
1584 frame callerFrame = stubFrame.sender(®_map);
1585 assert(callerFrame.is_compiled_frame(), "must be");
1586
1587 // Install exception and return forward entry.
1588 address res = SharedRuntime::throw_AbstractMethodError_entry();
1589 JRT_BLOCK
1590 methodHandle callee(current, invoke.static_target(current));
1591 if (!callee.is_null()) {
1592 oop recv = callerFrame.retrieve_receiver(®_map);
1593 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1594 res = StubRoutines::forward_exception_entry();
1595 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1596 }
1597 JRT_BLOCK_END
1598 return res;
1599 JRT_END
1600
1601 // return verified_code_entry if interp_only_mode is not set for the current thread;
1602 // otherwise return c2i entry.
1603 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1604 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1605 // In interp_only_mode we need to go to the interpreted entry
1606 // The c2i won't patch in this mode -- see fixup_callers_callsite
1607 return callee_method->get_c2i_entry();
1608 }
1609 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1610 return callee_method->verified_code_entry();
1611 }
1612
1613 // resolve a static call and patch code
1614 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1615 methodHandle callee_method;
1616 bool enter_special = false;
1617 JRT_BLOCK
1618 callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1619 current->set_vm_result_metadata(callee_method());
1620 JRT_BLOCK_END
1621 // return compiled code entry point after potential safepoints
1622 return get_resolved_entry(current, callee_method);
1623 JRT_END
1624
1625 // resolve virtual call and update inline cache to monomorphic
1626 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1627 methodHandle callee_method;
1628 JRT_BLOCK
1629 callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1630 current->set_vm_result_metadata(callee_method());
1631 JRT_BLOCK_END
1632 // return compiled code entry point after potential safepoints
1633 return get_resolved_entry(current, callee_method);
1634 JRT_END
1635
1636
1637 // Resolve a virtual call that can be statically bound (e.g., always
1638 // monomorphic, so it has no inline cache). Patch code to resolved target.
1639 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1640 methodHandle callee_method;
1641 JRT_BLOCK
1642 callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1643 current->set_vm_result_metadata(callee_method());
1644 JRT_BLOCK_END
1645 // return compiled code entry point after potential safepoints
1646 return get_resolved_entry(current, callee_method);
1647 JRT_END
1648
1649 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1650 JavaThread* current = THREAD;
1651 ResourceMark rm(current);
1652 CallInfo call_info;
1653 Bytecodes::Code bc;
1654
1655 // receiver is null for static calls. An exception is thrown for null
1656 // receivers for non-static calls
1657 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1658
1659 methodHandle callee_method(current, call_info.selected_method());
1660
1661 #ifndef PRODUCT
1662 AtomicAccess::inc(&_ic_miss_ctr);
1663
1664 // Statistics & Tracing
1665 if (TraceCallFixup) {
1666 ResourceMark rm(current);
1667 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1668 callee_method->print_short_name(tty);
1669 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1670 }
1671
1672 if (ICMissHistogram) {
1673 MutexLocker m(VMStatistic_lock);
1674 RegisterMap reg_map(current,
1675 RegisterMap::UpdateMap::skip,
1676 RegisterMap::ProcessFrames::include,
1677 RegisterMap::WalkContinuation::skip);
1678 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1679 // produce statistics under the lock
1680 trace_ic_miss(f.pc());
1681 }
1682 #endif
1683
1684 // install an event collector so that when a vtable stub is created the
1685 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1686 // event can't be posted when the stub is created as locks are held
1687 // - instead the event will be deferred until the event collector goes
1688 // out of scope.
1689 JvmtiDynamicCodeEventCollector event_collector;
1690
1691 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1692 RegisterMap reg_map(current,
1693 RegisterMap::UpdateMap::skip,
1694 RegisterMap::ProcessFrames::include,
1695 RegisterMap::WalkContinuation::skip);
1696 frame caller_frame = current->last_frame().sender(®_map);
1697 CodeBlob* cb = caller_frame.cb();
1698 nmethod* caller_nm = cb->as_nmethod();
1699
1700 CompiledICLocker ml(caller_nm);
1701 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1702 inline_cache->update(&call_info, receiver()->klass());
1703
1704 return callee_method;
1705 }
1706
1707 //
1708 // Resets a call-site in compiled code so it will get resolved again.
1709 // This routines handles both virtual call sites, optimized virtual call
1710 // sites, and static call sites. Typically used to change a call sites
1711 // destination from compiled to interpreted.
1712 //
1713 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1714 JavaThread* current = THREAD;
1715 ResourceMark rm(current);
1716 RegisterMap reg_map(current,
1717 RegisterMap::UpdateMap::skip,
1718 RegisterMap::ProcessFrames::include,
1719 RegisterMap::WalkContinuation::skip);
1720 frame stub_frame = current->last_frame();
1721 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1722 frame caller = stub_frame.sender(®_map);
1723
1724 // Do nothing if the frame isn't a live compiled frame.
1725 // nmethod could be deoptimized by the time we get here
1726 // so no update to the caller is needed.
1727
1728 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1729 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1730
1731 address pc = caller.pc();
1732
1733 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1734 assert(caller_nm != nullptr, "did not find caller nmethod");
1735
1736 // Default call_addr is the location of the "basic" call.
1737 // Determine the address of the call we a reresolving. With
1738 // Inline Caches we will always find a recognizable call.
1739 // With Inline Caches disabled we may or may not find a
1740 // recognizable call. We will always find a call for static
1741 // calls and for optimized virtual calls. For vanilla virtual
1742 // calls it depends on the state of the UseInlineCaches switch.
1743 //
1744 // With Inline Caches disabled we can get here for a virtual call
1745 // for two reasons:
1746 // 1 - calling an abstract method. The vtable for abstract methods
1747 // will run us thru handle_wrong_method and we will eventually
1748 // end up in the interpreter to throw the ame.
1749 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1750 // call and between the time we fetch the entry address and
1751 // we jump to it the target gets deoptimized. Similar to 1
1752 // we will wind up in the interprter (thru a c2i with c2).
1753 //
1754 CompiledICLocker ml(caller_nm);
1755 address call_addr = caller_nm->call_instruction_address(pc);
1756
1757 if (call_addr != nullptr) {
1758 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1759 // bytes back in the instruction stream so we must also check for reloc info.
1760 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1761 bool ret = iter.next(); // Get item
1762 if (ret) {
1763 switch (iter.type()) {
1764 case relocInfo::static_call_type:
1765 case relocInfo::opt_virtual_call_type: {
1766 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1767 cdc->set_to_clean();
1768 break;
1769 }
1770
1771 case relocInfo::virtual_call_type: {
1772 // compiled, dispatched call (which used to call an interpreted method)
1773 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1774 inline_cache->set_to_clean();
1775 break;
1776 }
1777 default:
1778 break;
1779 }
1780 }
1781 }
1782 }
1783
1784 methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1785
1786
1787 #ifndef PRODUCT
1788 AtomicAccess::inc(&_wrong_method_ctr);
1789
1790 if (TraceCallFixup) {
1791 ResourceMark rm(current);
1792 tty->print("handle_wrong_method reresolving call to");
1793 callee_method->print_short_name(tty);
1794 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1795 }
1796 #endif
1797
1798 return callee_method;
1799 }
1800
1801 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1802 // The faulting unsafe accesses should be changed to throw the error
1803 // synchronously instead. Meanwhile the faulting instruction will be
1804 // skipped over (effectively turning it into a no-op) and an
1805 // asynchronous exception will be raised which the thread will
1806 // handle at a later point. If the instruction is a load it will
1807 // return garbage.
1808
1809 // Request an async exception.
1810 thread->set_pending_unsafe_access_error();
1811
1812 // Return address of next instruction to execute.
1813 return next_pc;
1814 }
1815
1816 #ifdef ASSERT
1817 void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
1818 const BasicType* sig_bt,
1819 const VMRegPair* regs) {
1820 ResourceMark rm;
1821 const int total_args_passed = method->size_of_parameters();
1822 const VMRegPair* regs_with_member_name = regs;
1823 VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
1824
1825 const int member_arg_pos = total_args_passed - 1;
1826 assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1827 assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1828
1829 java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
1830
1831 for (int i = 0; i < member_arg_pos; i++) {
1832 VMReg a = regs_with_member_name[i].first();
1833 VMReg b = regs_without_member_name[i].first();
1834 assert(a->value() == b->value(), "register allocation mismatch: a= %d, b= %d", a->value(), b->value());
1835 }
1836 assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
1837 }
1838 #endif
1839
1840 // ---------------------------------------------------------------------------
1841 // We are calling the interpreter via a c2i. Normally this would mean that
1842 // we were called by a compiled method. However we could have lost a race
1843 // where we went int -> i2c -> c2i and so the caller could in fact be
1844 // interpreted. If the caller is compiled we attempt to patch the caller
1845 // so he no longer calls into the interpreter.
1846 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1847 AARCH64_PORT_ONLY(assert(pauth_ptr_is_raw(caller_pc), "should be raw"));
1848
1849 // It's possible that deoptimization can occur at a call site which hasn't
1850 // been resolved yet, in which case this function will be called from
1851 // an nmethod that has been patched for deopt and we can ignore the
1852 // request for a fixup.
1853 // Also it is possible that we lost a race in that from_compiled_entry
1854 // is now back to the i2c in that case we don't need to patch and if
1855 // we did we'd leap into space because the callsite needs to use
1856 // "to interpreter" stub in order to load up the Method*. Don't
1857 // ask me how I know this...
1858
1859 // Result from nmethod::is_unloading is not stable across safepoints.
1860 NoSafepointVerifier nsv;
1861
1862 nmethod* callee = method->code();
1863 if (callee == nullptr) {
1864 return;
1865 }
1866
1867 // write lock needed because we might patch call site by set_to_clean()
1868 // and is_unloading() can modify nmethod's state
1869 MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, JavaThread::current()));
1870
1871 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1872 if (cb == nullptr || !cb->is_nmethod() || !callee->is_in_use() || callee->is_unloading()) {
1873 return;
1874 }
1875
1876 // The check above makes sure this is an nmethod.
1877 nmethod* caller = cb->as_nmethod();
1878
1879 // Get the return PC for the passed caller PC.
1880 address return_pc = caller_pc + frame::pc_return_offset;
1881
1882 if (!caller->is_in_use() || !NativeCall::is_call_before(return_pc)) {
1883 return;
1884 }
1885
1886 // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1887 CompiledICLocker ic_locker(caller);
1888 ResourceMark rm;
1889
1890 // If we got here through a static call or opt_virtual call, then we know where the
1891 // call address would be; let's peek at it
1892 address callsite_addr = (address)nativeCall_before(return_pc);
1893 RelocIterator iter(caller, callsite_addr, callsite_addr + 1);
1894 if (!iter.next()) {
1895 // No reloc entry found; not a static or optimized virtual call
1896 return;
1897 }
1898
1899 relocInfo::relocType type = iter.reloc()->type();
1900 if (type != relocInfo::static_call_type &&
1901 type != relocInfo::opt_virtual_call_type) {
1902 return;
1903 }
1904
1905 CompiledDirectCall* callsite = CompiledDirectCall::before(return_pc);
1906 callsite->set_to_clean();
1907 JRT_END
1908
1909
1910 // same as JVM_Arraycopy, but called directly from compiled code
1911 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
1912 oopDesc* dest, jint dest_pos,
1913 jint length,
1914 JavaThread* current)) {
1915 #ifndef PRODUCT
1916 _slow_array_copy_ctr++;
1917 #endif
1918 // Check if we have null pointers
1919 if (src == nullptr || dest == nullptr) {
1920 THROW(vmSymbols::java_lang_NullPointerException());
1921 }
1922 // Do the copy. The casts to arrayOop are necessary to the copy_array API,
1923 // even though the copy_array API also performs dynamic checks to ensure
1924 // that src and dest are truly arrays (and are conformable).
1925 // The copy_array mechanism is awkward and could be removed, but
1926 // the compilers don't call this function except as a last resort,
1927 // so it probably doesn't matter.
1928 src->klass()->copy_array((arrayOopDesc*)src, src_pos,
1929 (arrayOopDesc*)dest, dest_pos,
1930 length, current);
1931 }
1932 JRT_END
1933
1934 // The caller of generate_class_cast_message() (or one of its callers)
1935 // must use a ResourceMark in order to correctly free the result.
1936 char* SharedRuntime::generate_class_cast_message(
1937 JavaThread* thread, Klass* caster_klass) {
1938
1939 // Get target class name from the checkcast instruction
1940 vframeStream vfst(thread, true);
1941 assert(!vfst.at_end(), "Java frame must exist");
1942 Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
1943 constantPoolHandle cpool(thread, vfst.method()->constants());
1944 Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index());
1945 Symbol* target_klass_name = nullptr;
1946 if (target_klass == nullptr) {
1947 // This klass should be resolved, but just in case, get the name in the klass slot.
1948 target_klass_name = cpool->klass_name_at(cc.index());
1949 }
1950 return generate_class_cast_message(caster_klass, target_klass, target_klass_name);
1951 }
1952
1953
1954 // The caller of generate_class_cast_message() (or one of its callers)
1955 // must use a ResourceMark in order to correctly free the result.
1956 char* SharedRuntime::generate_class_cast_message(
1957 Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) {
1958 const char* caster_name = caster_klass->external_name();
1959
1960 assert(target_klass != nullptr || target_klass_name != nullptr, "one must be provided");
1961 const char* target_name = target_klass == nullptr ? target_klass_name->as_klass_external_name() :
1962 target_klass->external_name();
1963
1964 size_t msglen = strlen(caster_name) + strlen("class ") + strlen(" cannot be cast to class ") + strlen(target_name) + 1;
1965
1966 const char* caster_klass_description = "";
1967 const char* target_klass_description = "";
1968 const char* klass_separator = "";
1969 if (target_klass != nullptr && caster_klass->module() == target_klass->module()) {
1970 caster_klass_description = caster_klass->joint_in_module_of_loader(target_klass);
1971 } else {
1972 caster_klass_description = caster_klass->class_in_module_of_loader();
1973 target_klass_description = (target_klass != nullptr) ? target_klass->class_in_module_of_loader() : "";
1974 klass_separator = (target_klass != nullptr) ? "; " : "";
1975 }
1976
1977 // add 3 for parenthesis and preceding space
1978 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1979
1980 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1981 if (message == nullptr) {
1982 // Shouldn't happen, but don't cause even more problems if it does
1983 message = const_cast<char*>(caster_klass->external_name());
1984 } else {
1985 jio_snprintf(message,
1986 msglen,
1987 "class %s cannot be cast to class %s (%s%s%s)",
1988 caster_name,
1989 target_name,
1990 caster_klass_description,
1991 klass_separator,
1992 target_klass_description
1993 );
1994 }
1995 return message;
1996 }
1997
1998 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1999 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2000 JRT_END
2001
2002 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2003 if (!SafepointSynchronize::is_synchronizing()) {
2004 // Only try quick_enter() if we're not trying to reach a safepoint
2005 // so that the calling thread reaches the safepoint more quickly.
2006 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2007 return;
2008 }
2009 }
2010 // NO_ASYNC required because an async exception on the state transition destructor
2011 // would leave you with the lock held and it would never be released.
2012 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2013 // and the model is that an exception implies the method failed.
2014 JRT_BLOCK_NO_ASYNC
2015 Handle h_obj(THREAD, obj);
2016 ObjectSynchronizer::enter(h_obj, lock, current);
2017 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2018 JRT_BLOCK_END
2019 }
2020
2021 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
2022 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2023 SharedRuntime::monitor_enter_helper(obj, lock, current);
2024 JRT_END
2025
2026 void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2027 assert(JavaThread::current() == current, "invariant");
2028 // Exit must be non-blocking, and therefore no exceptions can be thrown.
2029 ExceptionMark em(current);
2030
2031 // Check if C2_MacroAssembler::fast_unlock() or
2032 // C2_MacroAssembler::fast_unlock_lightweight() unlocked an inflated
2033 // monitor before going slow path. Since there is no safepoint
2034 // polling when calling into the VM, we can be sure that the monitor
2035 // hasn't been deallocated.
2036 ObjectMonitor* m = current->unlocked_inflated_monitor();
2037 if (m != nullptr) {
2038 assert(!m->has_owner(current), "must be");
2039 current->clear_unlocked_inflated_monitor();
2040
2041 // We need to reacquire the lock before we can call ObjectSynchronizer::exit().
2042 if (!m->try_enter(current, /*check_for_recursion*/ false)) {
2043 // Some other thread acquired the lock (or the monitor was
2044 // deflated). Either way we are done.
2045 return;
2046 }
2047 }
2048
2049 // The object could become unlocked through a JNI call, which we have no other checks for.
2050 // Give a fatal message if CheckJNICalls. Otherwise we ignore it.
2051 if (obj->is_unlocked()) {
2052 if (CheckJNICalls) {
2053 fatal("Object has been unlocked by JNI");
2054 }
2055 return;
2056 }
2057 ObjectSynchronizer::exit(obj, lock, current);
2058 }
2059
2060 // Handles the uncommon cases of monitor unlocking in compiled code
2061 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2062 assert(current == JavaThread::current(), "pre-condition");
2063 SharedRuntime::monitor_exit_helper(obj, lock, current);
2064 JRT_END
2065
2066 #ifndef PRODUCT
2067
2068 void SharedRuntime::print_statistics() {
2069 ttyLocker ttyl;
2070 if (xtty != nullptr) xtty->head("statistics type='SharedRuntime'");
2071
2072 SharedRuntime::print_ic_miss_histogram();
2073
2074 // Dump the JRT_ENTRY counters
2075 if (_new_instance_ctr) tty->print_cr("%5u new instance requires GC", _new_instance_ctr);
2076 if (_new_array_ctr) tty->print_cr("%5u new array requires GC", _new_array_ctr);
2077 if (_multi2_ctr) tty->print_cr("%5u multianewarray 2 dim", _multi2_ctr);
2078 if (_multi3_ctr) tty->print_cr("%5u multianewarray 3 dim", _multi3_ctr);
2079 if (_multi4_ctr) tty->print_cr("%5u multianewarray 4 dim", _multi4_ctr);
2080 if (_multi5_ctr) tty->print_cr("%5u multianewarray 5 dim", _multi5_ctr);
2081
2082 tty->print_cr("%5u inline cache miss in compiled", _ic_miss_ctr);
2083 tty->print_cr("%5u wrong method", _wrong_method_ctr);
2084 tty->print_cr("%5u unresolved static call site", _resolve_static_ctr);
2085 tty->print_cr("%5u unresolved virtual call site", _resolve_virtual_ctr);
2086 tty->print_cr("%5u unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2087
2088 if (_mon_enter_stub_ctr) tty->print_cr("%5u monitor enter stub", _mon_enter_stub_ctr);
2089 if (_mon_exit_stub_ctr) tty->print_cr("%5u monitor exit stub", _mon_exit_stub_ctr);
2090 if (_mon_enter_ctr) tty->print_cr("%5u monitor enter slow", _mon_enter_ctr);
2091 if (_mon_exit_ctr) tty->print_cr("%5u monitor exit slow", _mon_exit_ctr);
2092 if (_partial_subtype_ctr) tty->print_cr("%5u slow partial subtype", _partial_subtype_ctr);
2093 if (_jbyte_array_copy_ctr) tty->print_cr("%5u byte array copies", _jbyte_array_copy_ctr);
2094 if (_jshort_array_copy_ctr) tty->print_cr("%5u short array copies", _jshort_array_copy_ctr);
2095 if (_jint_array_copy_ctr) tty->print_cr("%5u int array copies", _jint_array_copy_ctr);
2096 if (_jlong_array_copy_ctr) tty->print_cr("%5u long array copies", _jlong_array_copy_ctr);
2097 if (_oop_array_copy_ctr) tty->print_cr("%5u oop array copies", _oop_array_copy_ctr);
2098 if (_checkcast_array_copy_ctr) tty->print_cr("%5u checkcast array copies", _checkcast_array_copy_ctr);
2099 if (_unsafe_array_copy_ctr) tty->print_cr("%5u unsafe array copies", _unsafe_array_copy_ctr);
2100 if (_generic_array_copy_ctr) tty->print_cr("%5u generic array copies", _generic_array_copy_ctr);
2101 if (_slow_array_copy_ctr) tty->print_cr("%5u slow array copies", _slow_array_copy_ctr);
2102 if (_find_handler_ctr) tty->print_cr("%5u find exception handler", _find_handler_ctr);
2103 if (_rethrow_ctr) tty->print_cr("%5u rethrow handler", _rethrow_ctr);
2104 if (_unsafe_set_memory_ctr) tty->print_cr("%5u unsafe set memorys", _unsafe_set_memory_ctr);
2105
2106 AdapterHandlerLibrary::print_statistics();
2107
2108 if (xtty != nullptr) xtty->tail("statistics");
2109 }
2110
2111 inline double percent(int64_t x, int64_t y) {
2112 return 100.0 * (double)x / (double)MAX2(y, (int64_t)1);
2113 }
2114
2115 class MethodArityHistogram {
2116 public:
2117 enum { MAX_ARITY = 256 };
2118 private:
2119 static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args
2120 static uint64_t _size_histogram[MAX_ARITY]; // histogram of arg size in words
2121 static uint64_t _total_compiled_calls;
2122 static uint64_t _max_compiled_calls_per_method;
2123 static int _max_arity; // max. arity seen
2124 static int _max_size; // max. arg size seen
2125
2126 static void add_method_to_histogram(nmethod* nm) {
2127 Method* method = (nm == nullptr) ? nullptr : nm->method();
2128 if (method != nullptr) {
2129 ArgumentCount args(method->signature());
2130 int arity = args.size() + (method->is_static() ? 0 : 1);
2131 int argsize = method->size_of_parameters();
2132 arity = MIN2(arity, MAX_ARITY-1);
2133 argsize = MIN2(argsize, MAX_ARITY-1);
2134 uint64_t count = (uint64_t)method->compiled_invocation_count();
2135 _max_compiled_calls_per_method = count > _max_compiled_calls_per_method ? count : _max_compiled_calls_per_method;
2136 _total_compiled_calls += count;
2137 _arity_histogram[arity] += count;
2138 _size_histogram[argsize] += count;
2139 _max_arity = MAX2(_max_arity, arity);
2140 _max_size = MAX2(_max_size, argsize);
2141 }
2142 }
2143
2144 void print_histogram_helper(int n, uint64_t* histo, const char* name) {
2145 const int N = MIN2(9, n);
2146 double sum = 0;
2147 double weighted_sum = 0;
2148 for (int i = 0; i <= n; i++) { sum += (double)histo[i]; weighted_sum += (double)(i*histo[i]); }
2149 if (sum >= 1) { // prevent divide by zero or divide overflow
2150 double rest = sum;
2151 double percent = sum / 100;
2152 for (int i = 0; i <= N; i++) {
2153 rest -= (double)histo[i];
2154 tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], (double)histo[i] / percent);
2155 }
2156 tty->print_cr("rest: " INT64_FORMAT_W(12) " (%5.1f%%)", (int64_t)rest, rest / percent);
2157 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2158 tty->print_cr("(total # of compiled calls = " INT64_FORMAT_W(14) ")", _total_compiled_calls);
2159 tty->print_cr("(max # of compiled calls = " INT64_FORMAT_W(14) ")", _max_compiled_calls_per_method);
2160 } else {
2161 tty->print_cr("Histogram generation failed for %s. n = %d, sum = %7.5f", name, n, sum);
2162 }
2163 }
2164
2165 void print_histogram() {
2166 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2167 print_histogram_helper(_max_arity, _arity_histogram, "arity");
2168 tty->print_cr("\nHistogram of parameter block size (in words, incl. rcvr):");
2169 print_histogram_helper(_max_size, _size_histogram, "size");
2170 tty->cr();
2171 }
2172
2173 public:
2174 MethodArityHistogram() {
2175 // Take the Compile_lock to protect against changes in the CodeBlob structures
2176 MutexLocker mu1(Compile_lock, Mutex::_safepoint_check_flag);
2177 // Take the CodeCache_lock to protect against changes in the CodeHeap structure
2178 MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2179 _max_arity = _max_size = 0;
2180 _total_compiled_calls = 0;
2181 _max_compiled_calls_per_method = 0;
2182 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2183 CodeCache::nmethods_do(add_method_to_histogram);
2184 print_histogram();
2185 }
2186 };
2187
2188 uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2189 uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2190 uint64_t MethodArityHistogram::_total_compiled_calls;
2191 uint64_t MethodArityHistogram::_max_compiled_calls_per_method;
2192 int MethodArityHistogram::_max_arity;
2193 int MethodArityHistogram::_max_size;
2194
2195 void SharedRuntime::print_call_statistics(uint64_t comp_total) {
2196 tty->print_cr("Calls from compiled code:");
2197 int64_t total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2198 int64_t mono_c = _nof_normal_calls - _nof_megamorphic_calls;
2199 int64_t mono_i = _nof_interface_calls;
2200 tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%) total non-inlined ", total);
2201 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
2202 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2203 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
2204 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2205 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
2206 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2207 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
2208 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2209 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2210 tty->cr();
2211 tty->print_cr("Note 1: counter updates are not MT-safe.");
2212 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2213 tty->print_cr(" %% in nested categories are relative to their category");
2214 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2215 tty->cr();
2216
2217 MethodArityHistogram h;
2218 }
2219 #endif
2220
2221 #ifndef PRODUCT
2222 static int _lookups; // number of calls to lookup
2223 static int _equals; // number of buckets checked with matching hash
2224 static int _archived_hits; // number of successful lookups in archived table
2225 static int _runtime_hits; // number of successful lookups in runtime table
2226 #endif
2227
2228 // A simple wrapper class around the calling convention information
2229 // that allows sharing of adapters for the same calling convention.
2230 class AdapterFingerPrint : public MetaspaceObj {
2231 private:
2232 enum {
2233 _basic_type_bits = 4,
2234 _basic_type_mask = right_n_bits(_basic_type_bits),
2235 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2236 };
2237 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2238 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2239
2240 int _length;
2241
2242 static int data_offset() { return sizeof(AdapterFingerPrint); }
2243 int* data_pointer() {
2244 return (int*)((address)this + data_offset());
2245 }
2246
2247 // Private construtor. Use allocate() to get an instance.
2248 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt, int len) {
2249 int* data = data_pointer();
2250 // Pack the BasicTypes with 8 per int
2251 assert(len == length(total_args_passed), "sanity");
2252 _length = len;
2253 int sig_index = 0;
2254 for (int index = 0; index < _length; index++) {
2255 int value = 0;
2256 for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2257 int bt = adapter_encoding(sig_bt[sig_index++]);
2258 assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2259 value = (value << _basic_type_bits) | bt;
2260 }
2261 data[index] = value;
2262 }
2263 }
2264
2265 // Call deallocate instead
2266 ~AdapterFingerPrint() {
2267 ShouldNotCallThis();
2268 }
2269
2270 static int length(int total_args) {
2271 return (total_args + (_basic_types_per_int-1)) / _basic_types_per_int;
2272 }
2273
2274 static int compute_size_in_words(int len) {
2275 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(int)));
2276 }
2277
2278 // Remap BasicTypes that are handled equivalently by the adapters.
2279 // These are correct for the current system but someday it might be
2280 // necessary to make this mapping platform dependent.
2281 static int adapter_encoding(BasicType in) {
2282 switch (in) {
2283 case T_BOOLEAN:
2284 case T_BYTE:
2285 case T_SHORT:
2286 case T_CHAR:
2287 // There are all promoted to T_INT in the calling convention
2288 return T_INT;
2289
2290 case T_OBJECT:
2291 case T_ARRAY:
2292 // In other words, we assume that any register good enough for
2293 // an int or long is good enough for a managed pointer.
2294 #ifdef _LP64
2295 return T_LONG;
2296 #else
2297 return T_INT;
2298 #endif
2299
2300 case T_INT:
2301 case T_LONG:
2302 case T_FLOAT:
2303 case T_DOUBLE:
2304 case T_VOID:
2305 return in;
2306
2307 default:
2308 ShouldNotReachHere();
2309 return T_CONFLICT;
2310 }
2311 }
2312
2313 void* operator new(size_t size, size_t fp_size) throw() {
2314 assert(fp_size >= size, "sanity check");
2315 void* p = AllocateHeap(fp_size, mtCode);
2316 memset(p, 0, fp_size);
2317 return p;
2318 }
2319
2320 template<typename Function>
2321 void iterate_args(Function function) {
2322 for (int i = 0; i < length(); i++) {
2323 unsigned val = (unsigned)value(i);
2324 // args are packed so that first/lower arguments are in the highest
2325 // bits of each int value, so iterate from highest to the lowest
2326 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2327 unsigned v = (val >> j) & _basic_type_mask;
2328 if (v == 0) {
2329 continue;
2330 }
2331 function(v);
2332 }
2333 }
2334 }
2335
2336 public:
2337 static AdapterFingerPrint* allocate(int total_args_passed, BasicType* sig_bt) {
2338 int len = length(total_args_passed);
2339 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2340 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(total_args_passed, sig_bt, len);
2341 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2342 return afp;
2343 }
2344
2345 static void deallocate(AdapterFingerPrint* fp) {
2346 FreeHeap(fp);
2347 }
2348
2349 int value(int index) {
2350 int* data = data_pointer();
2351 return data[index];
2352 }
2353
2354 int length() {
2355 return _length;
2356 }
2357
2358 unsigned int compute_hash() {
2359 int hash = 0;
2360 for (int i = 0; i < length(); i++) {
2361 int v = value(i);
2362 //Add arithmetic operation to the hash, like +3 to improve hashing
2363 hash = ((hash << 8) ^ v ^ (hash >> 5)) + 3;
2364 }
2365 return (unsigned int)hash;
2366 }
2367
2368 const char* as_string() {
2369 stringStream st;
2370 st.print("0x");
2371 for (int i = 0; i < length(); i++) {
2372 st.print("%x", value(i));
2373 }
2374 return st.as_string();
2375 }
2376
2377 const char* as_basic_args_string() {
2378 stringStream st;
2379 bool long_prev = false;
2380 iterate_args([&] (int arg) {
2381 if (long_prev) {
2382 long_prev = false;
2383 if (arg == T_VOID) {
2384 st.print("J");
2385 } else {
2386 st.print("L");
2387 }
2388 }
2389 switch (arg) {
2390 case T_INT: st.print("I"); break;
2391 case T_LONG: long_prev = true; break;
2392 case T_FLOAT: st.print("F"); break;
2393 case T_DOUBLE: st.print("D"); break;
2394 case T_VOID: break;
2395 default: ShouldNotReachHere();
2396 }
2397 });
2398 if (long_prev) {
2399 st.print("L");
2400 }
2401 return st.as_string();
2402 }
2403
2404 BasicType* as_basic_type(int& nargs) {
2405 nargs = 0;
2406 GrowableArray<BasicType> btarray;
2407 bool long_prev = false;
2408
2409 iterate_args([&] (int arg) {
2410 if (long_prev) {
2411 long_prev = false;
2412 if (arg == T_VOID) {
2413 btarray.append(T_LONG);
2414 } else {
2415 btarray.append(T_OBJECT); // it could be T_ARRAY; it shouldn't matter
2416 }
2417 }
2418 switch (arg) {
2419 case T_INT: // fallthrough
2420 case T_FLOAT: // fallthrough
2421 case T_DOUBLE:
2422 case T_VOID:
2423 btarray.append((BasicType)arg);
2424 break;
2425 case T_LONG:
2426 long_prev = true;
2427 break;
2428 default: ShouldNotReachHere();
2429 }
2430 });
2431
2432 if (long_prev) {
2433 btarray.append(T_OBJECT);
2434 }
2435
2436 nargs = btarray.length();
2437 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nargs);
2438 int index = 0;
2439 GrowableArrayIterator<BasicType> iter = btarray.begin();
2440 while (iter != btarray.end()) {
2441 sig_bt[index++] = *iter;
2442 ++iter;
2443 }
2444 assert(index == btarray.length(), "sanity check");
2445 #ifdef ASSERT
2446 {
2447 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(nargs, sig_bt);
2448 assert(this->equals(compare_fp), "sanity check");
2449 AdapterFingerPrint::deallocate(compare_fp);
2450 }
2451 #endif
2452 return sig_bt;
2453 }
2454
2455 bool equals(AdapterFingerPrint* other) {
2456 if (other->_length != _length) {
2457 return false;
2458 } else {
2459 for (int i = 0; i < _length; i++) {
2460 if (value(i) != other->value(i)) {
2461 return false;
2462 }
2463 }
2464 }
2465 return true;
2466 }
2467
2468 // methods required by virtue of being a MetaspaceObj
2469 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2470 int size() const { return compute_size_in_words(_length); }
2471 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2472
2473 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2474 NOT_PRODUCT(_equals++);
2475 return fp1->equals(fp2);
2476 }
2477
2478 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2479 return fp->compute_hash();
2480 }
2481 };
2482
2483 #if INCLUDE_CDS
2484 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2485 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2486 }
2487
2488 class ArchivedAdapterTable : public OffsetCompactHashtable<
2489 AdapterFingerPrint*,
2490 AdapterHandlerEntry*,
2491 adapter_fp_equals_compact_hashtable_entry> {};
2492 #endif // INCLUDE_CDS
2493
2494 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2495 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2496 AnyObj::C_HEAP, mtCode,
2497 AdapterFingerPrint::compute_hash,
2498 AdapterFingerPrint::equals>;
2499 static AdapterHandlerTable* _adapter_handler_table;
2500 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2501
2502 // Find a entry with the same fingerprint if it exists
2503 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(int total_args_passed, BasicType* sig_bt) {
2504 NOT_PRODUCT(_lookups++);
2505 assert_lock_strong(AdapterHandlerLibrary_lock);
2506 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2507 AdapterHandlerEntry* entry = nullptr;
2508 #if INCLUDE_CDS
2509 // if we are building the archive then the archived adapter table is
2510 // not valid and we need to use the ones added to the runtime table
2511 if (AOTCodeCache::is_using_adapter()) {
2512 // Search archived table first. It is read-only table so can be searched without lock
2513 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2514 #ifndef PRODUCT
2515 if (entry != nullptr) {
2516 _archived_hits++;
2517 }
2518 #endif
2519 }
2520 #endif // INCLUDE_CDS
2521 if (entry == nullptr) {
2522 assert_lock_strong(AdapterHandlerLibrary_lock);
2523 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2524 if (entry_p != nullptr) {
2525 entry = *entry_p;
2526 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2527 entry->fingerprint()->as_basic_args_string(), entry->fingerprint()->as_string(), entry->fingerprint()->compute_hash(),
2528 fp->as_basic_args_string(), fp->as_string(), fp->compute_hash());
2529 #ifndef PRODUCT
2530 _runtime_hits++;
2531 #endif
2532 }
2533 }
2534 AdapterFingerPrint::deallocate(fp);
2535 return entry;
2536 }
2537
2538 #ifndef PRODUCT
2539 static void print_table_statistics() {
2540 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2541 return sizeof(*key) + sizeof(*a);
2542 };
2543 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2544 ts.print(tty, "AdapterHandlerTable");
2545 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2546 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2547 int total_hits = _archived_hits + _runtime_hits;
2548 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2549 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2550 }
2551 #endif
2552
2553 // ---------------------------------------------------------------------------
2554 // Implementation of AdapterHandlerLibrary
2555 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2556 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2557 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2558 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2559 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2560 #if INCLUDE_CDS
2561 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2562 #endif // INCLUDE_CDS
2563 static const int AdapterHandlerLibrary_size = 16*K;
2564 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2565 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2566
2567 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2568 assert(_buffer != nullptr, "should be initialized");
2569 return _buffer;
2570 }
2571
2572 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2573 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2574 AdapterBlob* adapter_blob = entry->adapter_blob();
2575 char blob_id[256];
2576 jio_snprintf(blob_id,
2577 sizeof(blob_id),
2578 "%s(%s)",
2579 adapter_blob->name(),
2580 entry->fingerprint()->as_string());
2581 if (Forte::is_enabled()) {
2582 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2583 }
2584
2585 if (JvmtiExport::should_post_dynamic_code_generated()) {
2586 JvmtiExport::post_dynamic_code_generated(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2587 }
2588 }
2589 }
2590
2591 void AdapterHandlerLibrary::initialize() {
2592 {
2593 ResourceMark rm;
2594 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2595 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2596 }
2597
2598 #if INCLUDE_CDS
2599 // Link adapters in AOT Cache to their code in AOT Code Cache
2600 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2601 link_aot_adapters();
2602 lookup_simple_adapters();
2603 return;
2604 }
2605 #endif // INCLUDE_CDS
2606
2607 ResourceMark rm;
2608 {
2609 MutexLocker mu(AdapterHandlerLibrary_lock);
2610
2611 _no_arg_handler = create_adapter(0, nullptr);
2612
2613 BasicType obj_args[] = { T_OBJECT };
2614 _obj_arg_handler = create_adapter(1, obj_args);
2615
2616 BasicType int_args[] = { T_INT };
2617 _int_arg_handler = create_adapter(1, int_args);
2618
2619 BasicType obj_int_args[] = { T_OBJECT, T_INT };
2620 _obj_int_arg_handler = create_adapter(2, obj_int_args);
2621
2622 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2623 _obj_obj_arg_handler = create_adapter(2, obj_obj_args);
2624
2625 // we should always get an entry back but we don't have any
2626 // associated blob on Zero
2627 assert(_no_arg_handler != nullptr &&
2628 _obj_arg_handler != nullptr &&
2629 _int_arg_handler != nullptr &&
2630 _obj_int_arg_handler != nullptr &&
2631 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2632 }
2633
2634 // Outside of the lock
2635 #ifndef ZERO
2636 // no blobs to register when we are on Zero
2637 post_adapter_creation(_no_arg_handler);
2638 post_adapter_creation(_obj_arg_handler);
2639 post_adapter_creation(_int_arg_handler);
2640 post_adapter_creation(_obj_int_arg_handler);
2641 post_adapter_creation(_obj_obj_arg_handler);
2642 #endif // ZERO
2643 }
2644
2645 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2646 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2647 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2648 return AdapterHandlerEntry::allocate(id, fingerprint);
2649 }
2650
2651 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2652 int total_args_passed = method->size_of_parameters(); // All args on stack
2653 if (total_args_passed == 0) {
2654 return _no_arg_handler;
2655 } else if (total_args_passed == 1) {
2656 if (!method->is_static()) {
2657 return _obj_arg_handler;
2658 }
2659 switch (method->signature()->char_at(1)) {
2660 case JVM_SIGNATURE_CLASS:
2661 case JVM_SIGNATURE_ARRAY:
2662 return _obj_arg_handler;
2663 case JVM_SIGNATURE_INT:
2664 case JVM_SIGNATURE_BOOLEAN:
2665 case JVM_SIGNATURE_CHAR:
2666 case JVM_SIGNATURE_BYTE:
2667 case JVM_SIGNATURE_SHORT:
2668 return _int_arg_handler;
2669 }
2670 } else if (total_args_passed == 2 &&
2671 !method->is_static()) {
2672 switch (method->signature()->char_at(1)) {
2673 case JVM_SIGNATURE_CLASS:
2674 case JVM_SIGNATURE_ARRAY:
2675 return _obj_obj_arg_handler;
2676 case JVM_SIGNATURE_INT:
2677 case JVM_SIGNATURE_BOOLEAN:
2678 case JVM_SIGNATURE_CHAR:
2679 case JVM_SIGNATURE_BYTE:
2680 case JVM_SIGNATURE_SHORT:
2681 return _obj_int_arg_handler;
2682 }
2683 }
2684 return nullptr;
2685 }
2686
2687 class AdapterSignatureIterator : public SignatureIterator {
2688 private:
2689 BasicType stack_sig_bt[16];
2690 BasicType* sig_bt;
2691 int index;
2692
2693 public:
2694 AdapterSignatureIterator(Symbol* signature,
2695 fingerprint_t fingerprint,
2696 bool is_static,
2697 int total_args_passed) :
2698 SignatureIterator(signature, fingerprint),
2699 index(0)
2700 {
2701 sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2702 if (!is_static) { // Pass in receiver first
2703 sig_bt[index++] = T_OBJECT;
2704 }
2705 do_parameters_on(this);
2706 }
2707
2708 BasicType* basic_types() {
2709 return sig_bt;
2710 }
2711
2712 #ifdef ASSERT
2713 int slots() {
2714 return index;
2715 }
2716 #endif
2717
2718 private:
2719
2720 friend class SignatureIterator; // so do_parameters_on can call do_type
2721 void do_type(BasicType type) {
2722 sig_bt[index++] = type;
2723 if (type == T_LONG || type == T_DOUBLE) {
2724 sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots
2725 }
2726 }
2727 };
2728
2729
2730 const char* AdapterHandlerEntry::_entry_names[] = {
2731 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
2732 };
2733
2734 #ifdef ASSERT
2735 void AdapterHandlerLibrary::verify_adapter_sharing(int total_args_passed, BasicType* sig_bt, AdapterHandlerEntry* cached_entry) {
2736 // we can only check for the same code if there is any
2737 #ifndef ZERO
2738 AdapterHandlerEntry* comparison_entry = create_adapter(total_args_passed, sig_bt, true);
2739 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
2740 assert(comparison_entry->compare_code(cached_entry), "code must match");
2741 // Release the one just created
2742 AdapterHandlerEntry::deallocate(comparison_entry);
2743 # endif // ZERO
2744 }
2745 #endif /* ASSERT*/
2746
2747 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2748 assert(!method->is_abstract(), "abstract methods do not have adapters");
2749 // Use customized signature handler. Need to lock around updates to
2750 // the _adapter_handler_table (it is not safe for concurrent readers
2751 // and a single writer: this could be fixed if it becomes a
2752 // problem).
2753
2754 // Fast-path for trivial adapters
2755 AdapterHandlerEntry* entry = get_simple_adapter(method);
2756 if (entry != nullptr) {
2757 return entry;
2758 }
2759
2760 ResourceMark rm;
2761 bool new_entry = false;
2762
2763 // Fill in the signature array, for the calling-convention call.
2764 int total_args_passed = method->size_of_parameters(); // All args on stack
2765
2766 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2767 method->is_static(), total_args_passed);
2768 assert(si.slots() == total_args_passed, "");
2769 BasicType* sig_bt = si.basic_types();
2770 {
2771 MutexLocker mu(AdapterHandlerLibrary_lock);
2772
2773 // Lookup method signature's fingerprint
2774 entry = lookup(total_args_passed, sig_bt);
2775
2776 if (entry != nullptr) {
2777 #ifndef ZERO
2778 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
2779 #endif
2780 #ifdef ASSERT
2781 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
2782 verify_adapter_sharing(total_args_passed, sig_bt, entry);
2783 }
2784 #endif
2785 } else {
2786 entry = create_adapter(total_args_passed, sig_bt);
2787 if (entry != nullptr) {
2788 new_entry = true;
2789 }
2790 }
2791 }
2792
2793 // Outside of the lock
2794 if (new_entry) {
2795 post_adapter_creation(entry);
2796 }
2797 return entry;
2798 }
2799
2800 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
2801 ResourceMark rm;
2802 const char* name = AdapterHandlerLibrary::name(handler);
2803 const uint32_t id = AdapterHandlerLibrary::id(handler);
2804
2805 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
2806 if (blob != nullptr) {
2807 handler->set_adapter_blob(blob->as_adapter_blob());
2808 }
2809 }
2810
2811 #ifndef PRODUCT
2812 void AdapterHandlerLibrary::print_adapter_handler_info(outputStream* st, AdapterHandlerEntry* handler) {
2813 ttyLocker ttyl;
2814 ResourceMark rm;
2815 int insts_size;
2816 // on Zero the blob may be null
2817 handler->print_adapter_on(tty);
2818 AdapterBlob* adapter_blob = handler->adapter_blob();
2819 if (adapter_blob == nullptr) {
2820 return;
2821 }
2822 insts_size = adapter_blob->code_size();
2823 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
2824 handler->fingerprint()->as_basic_args_string(),
2825 handler->fingerprint()->as_string(), insts_size);
2826 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
2827 if (Verbose || PrintStubCode) {
2828 address first_pc = adapter_blob->content_begin();
2829 if (first_pc != nullptr) {
2830 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
2831 st->cr();
2832 }
2833 }
2834 }
2835 #endif // PRODUCT
2836
2837 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
2838 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
2839 entry_offset[AdapterBlob::I2C] = 0;
2840 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
2841 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
2842 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
2843 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
2844 } else {
2845 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
2846 }
2847 }
2848
2849 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
2850 int total_args_passed,
2851 BasicType* sig_bt,
2852 bool is_transient) {
2853 if (log_is_enabled(Info, perf, class, link)) {
2854 ClassLoader::perf_method_adapters_count()->inc();
2855 }
2856
2857 #ifndef ZERO
2858 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2859 CodeBuffer buffer(buf);
2860 short buffer_locs[20];
2861 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2862 sizeof(buffer_locs)/sizeof(relocInfo));
2863 MacroAssembler masm(&buffer);
2864 VMRegPair stack_regs[16];
2865 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2866
2867 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2868 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2869 address entry_address[AdapterBlob::ENTRY_COUNT];
2870 SharedRuntime::generate_i2c2i_adapters(&masm,
2871 total_args_passed,
2872 comp_args_on_stack,
2873 sig_bt,
2874 regs,
2875 entry_address);
2876 // On zero there is no code to save and no need to create a blob and
2877 // or relocate the handler.
2878 int entry_offset[AdapterBlob::ENTRY_COUNT];
2879 address_to_offset(entry_address, entry_offset);
2880 #ifdef ASSERT
2881 if (VerifyAdapterSharing) {
2882 handler->save_code(buf->code_begin(), buffer.insts_size());
2883 if (is_transient) {
2884 return true;
2885 }
2886 }
2887 #endif
2888 AdapterBlob* adapter_blob = AdapterBlob::create(&buffer, entry_offset);
2889 if (adapter_blob == nullptr) {
2890 // CodeCache is full, disable compilation
2891 // Ought to log this but compile log is only per compile thread
2892 // and we're some non descript Java thread.
2893 return false;
2894 }
2895 handler->set_adapter_blob(adapter_blob);
2896 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
2897 // try to save generated code
2898 const char* name = AdapterHandlerLibrary::name(handler);
2899 const uint32_t id = AdapterHandlerLibrary::id(handler);
2900 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
2901 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
2902 }
2903 #endif // ZERO
2904
2905 #ifndef PRODUCT
2906 // debugging support
2907 if (PrintAdapterHandlers || PrintStubCode) {
2908 print_adapter_handler_info(tty, handler);
2909 }
2910 #endif
2911
2912 return true;
2913 }
2914
2915 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(int total_args_passed,
2916 BasicType* sig_bt,
2917 bool is_transient) {
2918 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2919 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
2920 if (!generate_adapter_code(handler, total_args_passed, sig_bt, is_transient)) {
2921 AdapterHandlerEntry::deallocate(handler);
2922 return nullptr;
2923 }
2924 if (!is_transient) {
2925 assert_lock_strong(AdapterHandlerLibrary_lock);
2926 _adapter_handler_table->put(fp, handler);
2927 }
2928 return handler;
2929 }
2930
2931 #if INCLUDE_CDS
2932 void AdapterHandlerEntry::remove_unshareable_info() {
2933 #ifdef ASSERT
2934 _saved_code = nullptr;
2935 _saved_code_length = 0;
2936 #endif // ASSERT
2937 _adapter_blob = nullptr;
2938 _linked = false;
2939 }
2940
2941 class CopyAdapterTableToArchive : StackObj {
2942 private:
2943 CompactHashtableWriter* _writer;
2944 ArchiveBuilder* _builder;
2945 public:
2946 CopyAdapterTableToArchive(CompactHashtableWriter* writer) : _writer(writer),
2947 _builder(ArchiveBuilder::current())
2948 {}
2949
2950 bool do_entry(AdapterFingerPrint* fp, AdapterHandlerEntry* entry) {
2951 LogStreamHandle(Trace, aot) lsh;
2952 if (ArchiveBuilder::current()->has_been_archived((address)entry)) {
2953 assert(ArchiveBuilder::current()->has_been_archived((address)fp), "must be");
2954 AdapterFingerPrint* buffered_fp = ArchiveBuilder::current()->get_buffered_addr(fp);
2955 assert(buffered_fp != nullptr,"sanity check");
2956 AdapterHandlerEntry* buffered_entry = ArchiveBuilder::current()->get_buffered_addr(entry);
2957 assert(buffered_entry != nullptr,"sanity check");
2958
2959 uint hash = fp->compute_hash();
2960 u4 delta = _builder->buffer_to_offset_u4((address)buffered_entry);
2961 _writer->add(hash, delta);
2962 if (lsh.is_enabled()) {
2963 address fp_runtime_addr = (address)buffered_fp + ArchiveBuilder::current()->buffer_to_requested_delta();
2964 address entry_runtime_addr = (address)buffered_entry + ArchiveBuilder::current()->buffer_to_requested_delta();
2965 log_trace(aot)("Added fp=%p (%s), entry=%p to the archived adater table", buffered_fp, buffered_fp->as_basic_args_string(), buffered_entry);
2966 }
2967 } else {
2968 if (lsh.is_enabled()) {
2969 log_trace(aot)("Skipping adapter handler %p (fp=%s) as it is not archived", entry, fp->as_basic_args_string());
2970 }
2971 }
2972 return true;
2973 }
2974 };
2975
2976 void AdapterHandlerLibrary::dump_aot_adapter_table() {
2977 CompactHashtableStats stats;
2978 CompactHashtableWriter writer(_adapter_handler_table->number_of_entries(), &stats);
2979 CopyAdapterTableToArchive copy(&writer);
2980 _adapter_handler_table->iterate(©);
2981 writer.dump(&_aot_adapter_handler_table, "archived adapter table");
2982 }
2983
2984 void AdapterHandlerLibrary::serialize_shared_table_header(SerializeClosure* soc) {
2985 _aot_adapter_handler_table.serialize_header(soc);
2986 }
2987
2988 void AdapterHandlerLibrary::link_aot_adapter_handler(AdapterHandlerEntry* handler) {
2989 #ifdef ASSERT
2990 if (TestAOTAdapterLinkFailure) {
2991 return;
2992 }
2993 #endif
2994 lookup_aot_cache(handler);
2995 #ifndef PRODUCT
2996 // debugging support
2997 if (PrintAdapterHandlers || PrintStubCode) {
2998 print_adapter_handler_info(tty, handler);
2999 }
3000 #endif
3001 }
3002
3003 // This method is used during production run to link archived adapters (stored in AOT Cache)
3004 // to their code in AOT Code Cache
3005 void AdapterHandlerEntry::link() {
3006 ResourceMark rm;
3007 assert(_fingerprint != nullptr, "_fingerprint must not be null");
3008 bool generate_code = false;
3009 // Generate code only if AOTCodeCache is not available, or
3010 // caching adapters is disabled, or we fail to link
3011 // the AdapterHandlerEntry to its code in the AOTCodeCache
3012 if (AOTCodeCache::is_using_adapter()) {
3013 AdapterHandlerLibrary::link_aot_adapter_handler(this);
3014 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3015 if (_adapter_blob == nullptr) {
3016 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3017 generate_code = true;
3018 }
3019 } else {
3020 generate_code = true;
3021 }
3022 if (generate_code) {
3023 int nargs;
3024 BasicType* bt = _fingerprint->as_basic_type(nargs);
3025 if (!AdapterHandlerLibrary::generate_adapter_code(this, nargs, bt, /* is_transient */ false)) {
3026 // Don't throw exceptions during VM initialization because java.lang.* classes
3027 // might not have been initialized, causing problems when constructing the
3028 // Java exception object.
3029 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3030 }
3031 }
3032 if (_adapter_blob != nullptr) {
3033 post_adapter_creation(this);
3034 }
3035 assert(_linked, "AdapterHandlerEntry must now be linked");
3036 }
3037
3038 void AdapterHandlerLibrary::link_aot_adapters() {
3039 uint max_id = 0;
3040 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3041 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3042 * That implies adapter ids of the adapters in the cache may not be contiguous.
3043 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3044 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3045 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3046 */
3047 _aot_adapter_handler_table.iterate([&](AdapterHandlerEntry* entry) {
3048 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3049 entry->link();
3050 max_id = MAX2(max_id, entry->id());
3051 });
3052 // Set adapter id to the maximum id found in the AOTCache
3053 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3054 _id_counter = max_id;
3055 }
3056
3057 // This method is called during production run to lookup simple adapters
3058 // in the archived adapter handler table
3059 void AdapterHandlerLibrary::lookup_simple_adapters() {
3060 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3061
3062 MutexLocker mu(AdapterHandlerLibrary_lock);
3063 _no_arg_handler = lookup(0, nullptr);
3064
3065 BasicType obj_args[] = { T_OBJECT };
3066 _obj_arg_handler = lookup(1, obj_args);
3067
3068 BasicType int_args[] = { T_INT };
3069 _int_arg_handler = lookup(1, int_args);
3070
3071 BasicType obj_int_args[] = { T_OBJECT, T_INT };
3072 _obj_int_arg_handler = lookup(2, obj_int_args);
3073
3074 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
3075 _obj_obj_arg_handler = lookup(2, obj_obj_args);
3076
3077 assert(_no_arg_handler != nullptr &&
3078 _obj_arg_handler != nullptr &&
3079 _int_arg_handler != nullptr &&
3080 _obj_int_arg_handler != nullptr &&
3081 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3082 assert(_no_arg_handler->is_linked() &&
3083 _obj_arg_handler->is_linked() &&
3084 _int_arg_handler->is_linked() &&
3085 _obj_int_arg_handler->is_linked() &&
3086 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3087 }
3088 #endif // INCLUDE_CDS
3089
3090 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3091 LogStreamHandle(Trace, aot) lsh;
3092 if (lsh.is_enabled()) {
3093 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3094 lsh.cr();
3095 }
3096 it->push(&_fingerprint);
3097 }
3098
3099 AdapterHandlerEntry::~AdapterHandlerEntry() {
3100 if (_fingerprint != nullptr) {
3101 AdapterFingerPrint::deallocate(_fingerprint);
3102 _fingerprint = nullptr;
3103 }
3104 #ifdef ASSERT
3105 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3106 #endif
3107 FreeHeap(this);
3108 }
3109
3110
3111 #ifdef ASSERT
3112 // Capture the code before relocation so that it can be compared
3113 // against other versions. If the code is captured after relocation
3114 // then relative instructions won't be equivalent.
3115 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3116 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3117 _saved_code_length = length;
3118 memcpy(_saved_code, buffer, length);
3119 }
3120
3121
3122 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3123 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3124
3125 if (other->_saved_code_length != _saved_code_length) {
3126 return false;
3127 }
3128
3129 return memcmp(other->_saved_code, _saved_code, _saved_code_length) == 0;
3130 }
3131 #endif
3132
3133
3134 /**
3135 * Create a native wrapper for this native method. The wrapper converts the
3136 * Java-compiled calling convention to the native convention, handles
3137 * arguments, and transitions to native. On return from the native we transition
3138 * back to java blocking if a safepoint is in progress.
3139 */
3140 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
3141 ResourceMark rm;
3142 nmethod* nm = nullptr;
3143
3144 // Check if memory should be freed before allocation
3145 CodeCache::gc_on_allocation();
3146
3147 assert(method->is_native(), "must be native");
3148 assert(method->is_special_native_intrinsic() ||
3149 method->has_native_function(), "must have something valid to call!");
3150
3151 {
3152 // Perform the work while holding the lock, but perform any printing outside the lock
3153 MutexLocker mu(AdapterHandlerLibrary_lock);
3154 // See if somebody beat us to it
3155 if (method->code() != nullptr) {
3156 return;
3157 }
3158
3159 const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
3160 assert(compile_id > 0, "Must generate native wrapper");
3161
3162
3163 ResourceMark rm;
3164 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3165 if (buf != nullptr) {
3166 CodeBuffer buffer(buf);
3167
3168 if (method->is_continuation_enter_intrinsic()) {
3169 buffer.initialize_stubs_size(192);
3170 }
3171
3172 struct { double data[20]; } locs_buf;
3173 struct { double data[20]; } stubs_locs_buf;
3174 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3175 #if defined(AARCH64) || defined(PPC64)
3176 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3177 // in the constant pool to ensure ordering between the barrier and oops
3178 // accesses. For native_wrappers we need a constant.
3179 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3180 // static java call that is resolved in the runtime.
3181 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3182 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3183 }
3184 #endif
3185 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3186 MacroAssembler _masm(&buffer);
3187
3188 // Fill in the signature array, for the calling-convention call.
3189 const int total_args_passed = method->size_of_parameters();
3190
3191 VMRegPair stack_regs[16];
3192 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3193
3194 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3195 method->is_static(), total_args_passed);
3196 BasicType* sig_bt = si.basic_types();
3197 assert(si.slots() == total_args_passed, "");
3198 BasicType ret_type = si.return_type();
3199
3200 // Now get the compiled-Java arguments layout.
3201 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3202
3203 // Generate the compiled-to-native wrapper code
3204 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3205
3206 if (nm != nullptr) {
3207 {
3208 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3209 if (nm->make_in_use()) {
3210 method->set_code(method, nm);
3211 }
3212 }
3213
3214 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3215 if (directive->PrintAssemblyOption) {
3216 nm->print_code();
3217 }
3218 DirectivesStack::release(directive);
3219 }
3220 }
3221 } // Unlock AdapterHandlerLibrary_lock
3222
3223
3224 // Install the generated code.
3225 if (nm != nullptr) {
3226 const char *msg = method->is_static() ? "(static)" : "";
3227 CompileTask::print_ul(nm, msg);
3228 if (PrintCompilation) {
3229 ttyLocker ttyl;
3230 CompileTask::print(tty, nm, msg);
3231 }
3232 nm->post_compiled_method_load_event();
3233 }
3234 }
3235
3236 // -------------------------------------------------------------------------
3237 // Java-Java calling convention
3238 // (what you use when Java calls Java)
3239
3240 //------------------------------name_for_receiver----------------------------------
3241 // For a given signature, return the VMReg for parameter 0.
3242 VMReg SharedRuntime::name_for_receiver() {
3243 VMRegPair regs;
3244 BasicType sig_bt = T_OBJECT;
3245 (void) java_calling_convention(&sig_bt, ®s, 1);
3246 // Return argument 0 register. In the LP64 build pointers
3247 // take 2 registers, but the VM wants only the 'main' name.
3248 return regs.first();
3249 }
3250
3251 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
3252 // This method is returning a data structure allocating as a
3253 // ResourceObject, so do not put any ResourceMarks in here.
3254
3255 BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
3256 VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
3257 int cnt = 0;
3258 if (has_receiver) {
3259 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
3260 }
3261
3262 for (SignatureStream ss(sig); !ss.at_return_type(); ss.next()) {
3263 BasicType type = ss.type();
3264 sig_bt[cnt++] = type;
3265 if (is_double_word_type(type))
3266 sig_bt[cnt++] = T_VOID;
3267 }
3268
3269 if (has_appendix) {
3270 sig_bt[cnt++] = T_OBJECT;
3271 }
3272
3273 assert(cnt < 256, "grow table size");
3274
3275 int comp_args_on_stack;
3276 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt);
3277
3278 // the calling convention doesn't count out_preserve_stack_slots so
3279 // we must add that in to get "true" stack offsets.
3280
3281 if (comp_args_on_stack) {
3282 for (int i = 0; i < cnt; i++) {
3283 VMReg reg1 = regs[i].first();
3284 if (reg1->is_stack()) {
3285 // Yuck
3286 reg1 = reg1->bias(out_preserve_stack_slots());
3287 }
3288 VMReg reg2 = regs[i].second();
3289 if (reg2->is_stack()) {
3290 // Yuck
3291 reg2 = reg2->bias(out_preserve_stack_slots());
3292 }
3293 regs[i].set_pair(reg2, reg1);
3294 }
3295 }
3296
3297 // results
3298 *arg_size = cnt;
3299 return regs;
3300 }
3301
3302 // OSR Migration Code
3303 //
3304 // This code is used convert interpreter frames into compiled frames. It is
3305 // called from very start of a compiled OSR nmethod. A temp array is
3306 // allocated to hold the interesting bits of the interpreter frame. All
3307 // active locks are inflated to allow them to move. The displaced headers and
3308 // active interpreter locals are copied into the temp buffer. Then we return
3309 // back to the compiled code. The compiled code then pops the current
3310 // interpreter frame off the stack and pushes a new compiled frame. Then it
3311 // copies the interpreter locals and displaced headers where it wants.
3312 // Finally it calls back to free the temp buffer.
3313 //
3314 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3315
3316 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
3317 assert(current == JavaThread::current(), "pre-condition");
3318 JFR_ONLY(Jfr::check_and_process_sample_request(current);)
3319 // During OSR migration, we unwind the interpreted frame and replace it with a compiled
3320 // frame. The stack watermark code below ensures that the interpreted frame is processed
3321 // before it gets unwound. This is helpful as the size of the compiled frame could be
3322 // larger than the interpreted frame, which could result in the new frame not being
3323 // processed correctly.
3324 StackWatermarkSet::before_unwind(current);
3325
3326 //
3327 // This code is dependent on the memory layout of the interpreter local
3328 // array and the monitors. On all of our platforms the layout is identical
3329 // so this code is shared. If some platform lays the their arrays out
3330 // differently then this code could move to platform specific code or
3331 // the code here could be modified to copy items one at a time using
3332 // frame accessor methods and be platform independent.
3333
3334 frame fr = current->last_frame();
3335 assert(fr.is_interpreted_frame(), "");
3336 assert(fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks");
3337
3338 // Figure out how many monitors are active.
3339 int active_monitor_count = 0;
3340 for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
3341 kptr < fr.interpreter_frame_monitor_begin();
3342 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
3343 if (kptr->obj() != nullptr) active_monitor_count++;
3344 }
3345
3346 // QQQ we could place number of active monitors in the array so that compiled code
3347 // could double check it.
3348
3349 Method* moop = fr.interpreter_frame_method();
3350 int max_locals = moop->max_locals();
3351 // Allocate temp buffer, 1 word per local & 2 per active monitor
3352 int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size();
3353 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
3354
3355 // Copy the locals. Order is preserved so that loading of longs works.
3356 // Since there's no GC I can copy the oops blindly.
3357 assert(sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
3358 Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
3359 (HeapWord*)&buf[0],
3360 max_locals);
3361
3362 // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
3363 int i = max_locals;
3364 for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3365 kptr2 < fr.interpreter_frame_monitor_begin();
3366 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3367 if (kptr2->obj() != nullptr) { // Avoid 'holes' in the monitor array
3368 BasicLock *lock = kptr2->lock();
3369 if (UseObjectMonitorTable) {
3370 buf[i] = (intptr_t)lock->object_monitor_cache();
3371 }
3372 #ifdef ASSERT
3373 else {
3374 buf[i] = badDispHeaderOSR;
3375 }
3376 #endif
3377 i++;
3378 buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3379 }
3380 }
3381 assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3382
3383 RegisterMap map(current,
3384 RegisterMap::UpdateMap::skip,
3385 RegisterMap::ProcessFrames::include,
3386 RegisterMap::WalkContinuation::skip);
3387 frame sender = fr.sender(&map);
3388 if (sender.is_interpreted_frame()) {
3389 current->push_cont_fastpath(sender.sp());
3390 }
3391
3392 return buf;
3393 JRT_END
3394
3395 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3396 FREE_C_HEAP_ARRAY(intptr_t, buf);
3397 JRT_END
3398
3399 bool AdapterHandlerLibrary::contains(const CodeBlob* b) {
3400 bool found = false;
3401 #if INCLUDE_CDS
3402 if (AOTCodeCache::is_using_adapter()) {
3403 auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3404 return (found = (b == CodeCache::find_blob(handler->get_i2c_entry())));
3405 };
3406 _aot_adapter_handler_table.iterate(findblob_archived_table);
3407 }
3408 #endif // INCLUDE_CDS
3409 if (!found) {
3410 auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
3411 return (found = (b == CodeCache::find_blob(a->get_i2c_entry())));
3412 };
3413 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3414 _adapter_handler_table->iterate(findblob_runtime_table);
3415 }
3416 return found;
3417 }
3418
3419 const char* AdapterHandlerLibrary::name(AdapterHandlerEntry* handler) {
3420 return handler->fingerprint()->as_basic_args_string();
3421 }
3422
3423 uint32_t AdapterHandlerLibrary::id(AdapterHandlerEntry* handler) {
3424 return handler->id();
3425 }
3426
3427 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3428 bool found = false;
3429 #if INCLUDE_CDS
3430 if (AOTCodeCache::is_using_adapter()) {
3431 auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3432 if (b == CodeCache::find_blob(handler->get_i2c_entry())) {
3433 found = true;
3434 st->print("Adapter for signature: ");
3435 handler->print_adapter_on(st);
3436 return true;
3437 } else {
3438 return false; // keep looking
3439 }
3440 };
3441 _aot_adapter_handler_table.iterate(findblob_archived_table);
3442 }
3443 #endif // INCLUDE_CDS
3444 if (!found) {
3445 auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
3446 if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3447 found = true;
3448 st->print("Adapter for signature: ");
3449 a->print_adapter_on(st);
3450 return true;
3451 } else {
3452 return false; // keep looking
3453 }
3454 };
3455 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3456 _adapter_handler_table->iterate(findblob_runtime_table);
3457 }
3458 assert(found, "Should have found handler");
3459 }
3460
3461 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3462 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3463 if (adapter_blob() != nullptr) {
3464 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3465 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3466 st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3467 if (get_c2i_no_clinit_check_entry() != nullptr) {
3468 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3469 }
3470 }
3471 st->cr();
3472 }
3473
3474 #ifndef PRODUCT
3475
3476 void AdapterHandlerLibrary::print_statistics() {
3477 print_table_statistics();
3478 }
3479
3480 #endif /* PRODUCT */
3481
3482 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3483 assert(current == JavaThread::current(), "pre-condition");
3484 StackOverflow* overflow_state = current->stack_overflow_state();
3485 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3486 overflow_state->set_reserved_stack_activation(current->stack_base());
3487 JRT_END
3488
3489 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
3490 ResourceMark rm(current);
3491 frame activation;
3492 nmethod* nm = nullptr;
3493 int count = 1;
3494
3495 assert(fr.is_java_frame(), "Must start on Java frame");
3496
3497 RegisterMap map(JavaThread::current(),
3498 RegisterMap::UpdateMap::skip,
3499 RegisterMap::ProcessFrames::skip,
3500 RegisterMap::WalkContinuation::skip); // don't walk continuations
3501 for (; !fr.is_first_frame(); fr = fr.sender(&map)) {
3502 if (!fr.is_java_frame()) {
3503 continue;
3504 }
3505
3506 Method* method = nullptr;
3507 bool found = false;
3508 if (fr.is_interpreted_frame()) {
3509 method = fr.interpreter_frame_method();
3510 if (method != nullptr && method->has_reserved_stack_access()) {
3511 found = true;
3512 }
3513 } else {
3514 CodeBlob* cb = fr.cb();
3515 if (cb != nullptr && cb->is_nmethod()) {
3516 nm = cb->as_nmethod();
3517 method = nm->method();
3518 for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != nullptr; sd = sd->sender()) {
3519 method = sd->method();
3520 if (method != nullptr && method->has_reserved_stack_access()) {
3521 found = true;
3522 }
3523 }
3524 }
3525 }
3526 if (found) {
3527 activation = fr;
3528 warning("Potentially dangerous stack overflow in "
3529 "ReservedStackAccess annotated method %s [%d]",
3530 method->name_and_sig_as_C_string(), count++);
3531 EventReservedStackActivation event;
3532 if (event.should_commit()) {
3533 event.set_method(method);
3534 event.commit();
3535 }
3536 }
3537 }
3538 return activation;
3539 }
3540
3541 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3542 // After any safepoint, just before going back to compiled code,
3543 // we inform the GC that we will be doing initializing writes to
3544 // this object in the future without emitting card-marks, so
3545 // GC may take any compensating steps.
3546
3547 oop new_obj = current->vm_result_oop();
3548 if (new_obj == nullptr) return;
3549
3550 BarrierSet *bs = BarrierSet::barrier_set();
3551 bs->on_slowpath_allocation_exit(current, new_obj);
3552 }