1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/archiveBuilder.hpp"
26 #include "cds/archiveUtils.inline.hpp"
27 #include "classfile/classLoader.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/stringTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/aotCodeCache.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/compiledIC.hpp"
35 #include "code/nmethod.inline.hpp"
36 #include "code/scopeDesc.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "compiler/abstractCompiler.hpp"
39 #include "compiler/compileBroker.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/interpreterRuntime.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "metaprogramming/primitiveConversions.hpp"
51 #include "oops/klass.hpp"
52 #include "oops/method.inline.hpp"
53 #include "oops/objArrayKlass.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "prims/forte.hpp"
56 #include "prims/jvmtiExport.hpp"
57 #include "prims/jvmtiThreadState.hpp"
58 #include "prims/methodHandles.hpp"
59 #include "prims/nativeLookup.hpp"
60 #include "runtime/arguments.hpp"
61 #include "runtime/atomicAccess.hpp"
62 #include "runtime/basicLock.inline.hpp"
63 #include "runtime/frame.inline.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/init.hpp"
66 #include "runtime/interfaceSupport.inline.hpp"
67 #include "runtime/java.hpp"
68 #include "runtime/javaCalls.hpp"
69 #include "runtime/jniHandles.inline.hpp"
70 #include "runtime/osThread.hpp"
71 #include "runtime/perfData.hpp"
72 #include "runtime/sharedRuntime.hpp"
73 #include "runtime/stackWatermarkSet.hpp"
74 #include "runtime/stubRoutines.hpp"
75 #include "runtime/synchronizer.hpp"
76 #include "runtime/timerTrace.hpp"
77 #include "runtime/vframe.inline.hpp"
78 #include "runtime/vframeArray.hpp"
79 #include "runtime/vm_version.hpp"
80 #include "utilities/copy.hpp"
81 #include "utilities/dtrace.hpp"
82 #include "utilities/events.hpp"
83 #include "utilities/globalDefinitions.hpp"
84 #include "utilities/hashTable.hpp"
85 #include "utilities/macros.hpp"
86 #include "utilities/xmlstream.hpp"
87 #ifdef COMPILER1
88 #include "c1/c1_Runtime1.hpp"
89 #endif
90 #ifdef COMPILER2
91 #include "opto/runtime.hpp"
92 #endif
93 #if INCLUDE_JFR
94 #include "jfr/jfr.inline.hpp"
95 #endif
96
97 // Shared runtime stub routines reside in their own unique blob with a
98 // single entry point
99
100
101 #define SHARED_STUB_FIELD_DEFINE(name, type) \
102 type* SharedRuntime::BLOB_FIELD_NAME(name);
103 SHARED_STUBS_DO(SHARED_STUB_FIELD_DEFINE)
104 #undef SHARED_STUB_FIELD_DEFINE
105
106 nmethod* SharedRuntime::_cont_doYield_stub;
107
108 #if 0
109 // TODO tweak global stub name generation to match this
110 #define SHARED_STUB_NAME_DECLARE(name, type) "Shared Runtime " # name "_blob",
111 const char *SharedRuntime::_stub_names[] = {
112 SHARED_STUBS_DO(SHARED_STUB_NAME_DECLARE)
113 };
114 #endif
115
116 //----------------------------generate_stubs-----------------------------------
117 void SharedRuntime::generate_initial_stubs() {
118 // Build this early so it's available for the interpreter.
119 _throw_StackOverflowError_blob =
120 generate_throw_exception(StubId::shared_throw_StackOverflowError_id,
121 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
122 }
123
124 void SharedRuntime::generate_stubs() {
125 _wrong_method_blob =
126 generate_resolve_blob(StubId::shared_wrong_method_id,
127 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method));
128 _wrong_method_abstract_blob =
129 generate_resolve_blob(StubId::shared_wrong_method_abstract_id,
130 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract));
131 _ic_miss_blob =
132 generate_resolve_blob(StubId::shared_ic_miss_id,
133 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss));
134 _resolve_opt_virtual_call_blob =
135 generate_resolve_blob(StubId::shared_resolve_opt_virtual_call_id,
136 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C));
137 _resolve_virtual_call_blob =
138 generate_resolve_blob(StubId::shared_resolve_virtual_call_id,
139 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C));
140 _resolve_static_call_blob =
141 generate_resolve_blob(StubId::shared_resolve_static_call_id,
142 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C));
143
144 _throw_delayed_StackOverflowError_blob =
145 generate_throw_exception(StubId::shared_throw_delayed_StackOverflowError_id,
146 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
147
148 _throw_AbstractMethodError_blob =
149 generate_throw_exception(StubId::shared_throw_AbstractMethodError_id,
150 CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
151
152 _throw_IncompatibleClassChangeError_blob =
153 generate_throw_exception(StubId::shared_throw_IncompatibleClassChangeError_id,
154 CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
155
156 _throw_NullPointerException_at_call_blob =
157 generate_throw_exception(StubId::shared_throw_NullPointerException_at_call_id,
158 CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
159
160 #if COMPILER2_OR_JVMCI
161 // Vectors are generated only by C2 and JVMCI.
162 bool support_wide = is_wide_vector(MaxVectorSize);
163 if (support_wide) {
164 _polling_page_vectors_safepoint_handler_blob =
165 generate_handler_blob(StubId::shared_polling_page_vectors_safepoint_handler_id,
166 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
167 }
168 #endif // COMPILER2_OR_JVMCI
169 _polling_page_safepoint_handler_blob =
170 generate_handler_blob(StubId::shared_polling_page_safepoint_handler_id,
171 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
172 _polling_page_return_handler_blob =
173 generate_handler_blob(StubId::shared_polling_page_return_handler_id,
174 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
175
176 generate_deopt_blob();
177 }
178
179 void SharedRuntime::init_adapter_library() {
180 AdapterHandlerLibrary::initialize();
181 }
182
183 #if INCLUDE_JFR
184 //------------------------------generate jfr runtime stubs ------
185 void SharedRuntime::generate_jfr_stubs() {
186 ResourceMark rm;
187 const char* timer_msg = "SharedRuntime generate_jfr_stubs";
188 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
189
190 _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint();
191 _jfr_return_lease_blob = generate_jfr_return_lease();
192 }
193
194 #endif // INCLUDE_JFR
195
196 #include <math.h>
197
198 // Implementation of SharedRuntime
199
200 #ifndef PRODUCT
201 // For statistics
202 uint SharedRuntime::_ic_miss_ctr = 0;
203 uint SharedRuntime::_wrong_method_ctr = 0;
204 uint SharedRuntime::_resolve_static_ctr = 0;
205 uint SharedRuntime::_resolve_virtual_ctr = 0;
206 uint SharedRuntime::_resolve_opt_virtual_ctr = 0;
207 uint SharedRuntime::_implicit_null_throws = 0;
208 uint SharedRuntime::_implicit_div0_throws = 0;
209
210 int64_t SharedRuntime::_nof_normal_calls = 0;
211 int64_t SharedRuntime::_nof_inlined_calls = 0;
212 int64_t SharedRuntime::_nof_megamorphic_calls = 0;
213 int64_t SharedRuntime::_nof_static_calls = 0;
214 int64_t SharedRuntime::_nof_inlined_static_calls = 0;
215 int64_t SharedRuntime::_nof_interface_calls = 0;
216 int64_t SharedRuntime::_nof_inlined_interface_calls = 0;
217
218 uint SharedRuntime::_new_instance_ctr=0;
219 uint SharedRuntime::_new_array_ctr=0;
220 uint SharedRuntime::_multi2_ctr=0;
221 uint SharedRuntime::_multi3_ctr=0;
222 uint SharedRuntime::_multi4_ctr=0;
223 uint SharedRuntime::_multi5_ctr=0;
224 uint SharedRuntime::_mon_enter_stub_ctr=0;
225 uint SharedRuntime::_mon_exit_stub_ctr=0;
226 uint SharedRuntime::_mon_enter_ctr=0;
227 uint SharedRuntime::_mon_exit_ctr=0;
228 uint SharedRuntime::_partial_subtype_ctr=0;
229 uint SharedRuntime::_jbyte_array_copy_ctr=0;
230 uint SharedRuntime::_jshort_array_copy_ctr=0;
231 uint SharedRuntime::_jint_array_copy_ctr=0;
232 uint SharedRuntime::_jlong_array_copy_ctr=0;
233 uint SharedRuntime::_oop_array_copy_ctr=0;
234 uint SharedRuntime::_checkcast_array_copy_ctr=0;
235 uint SharedRuntime::_unsafe_array_copy_ctr=0;
236 uint SharedRuntime::_generic_array_copy_ctr=0;
237 uint SharedRuntime::_slow_array_copy_ctr=0;
238 uint SharedRuntime::_find_handler_ctr=0;
239 uint SharedRuntime::_rethrow_ctr=0;
240 uint SharedRuntime::_unsafe_set_memory_ctr=0;
241
242 int SharedRuntime::_ICmiss_index = 0;
243 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
244 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
245
246
247 void SharedRuntime::trace_ic_miss(address at) {
248 for (int i = 0; i < _ICmiss_index; i++) {
249 if (_ICmiss_at[i] == at) {
250 _ICmiss_count[i]++;
251 return;
252 }
253 }
254 int index = _ICmiss_index++;
255 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
256 _ICmiss_at[index] = at;
257 _ICmiss_count[index] = 1;
258 }
259
260 void SharedRuntime::print_ic_miss_histogram() {
261 if (ICMissHistogram) {
262 tty->print_cr("IC Miss Histogram:");
263 int tot_misses = 0;
264 for (int i = 0; i < _ICmiss_index; i++) {
265 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
266 tot_misses += _ICmiss_count[i];
267 }
268 tty->print_cr("Total IC misses: %7d", tot_misses);
269 }
270 }
271
272 #ifdef COMPILER2
273 // Runtime methods for printf-style debug nodes (same printing format as fieldDescriptor::print_on_for)
274 void SharedRuntime::debug_print_value(jboolean x) {
275 tty->print_cr("boolean %d", x);
276 }
277
278 void SharedRuntime::debug_print_value(jbyte x) {
279 tty->print_cr("byte %d", x);
280 }
281
282 void SharedRuntime::debug_print_value(jshort x) {
283 tty->print_cr("short %d", x);
284 }
285
286 void SharedRuntime::debug_print_value(jchar x) {
287 tty->print_cr("char %c %d", isprint(x) ? x : ' ', x);
288 }
289
290 void SharedRuntime::debug_print_value(jint x) {
291 tty->print_cr("int %d", x);
292 }
293
294 void SharedRuntime::debug_print_value(jlong x) {
295 tty->print_cr("long " JLONG_FORMAT, x);
296 }
297
298 void SharedRuntime::debug_print_value(jfloat x) {
299 tty->print_cr("float %f", x);
300 }
301
302 void SharedRuntime::debug_print_value(jdouble x) {
303 tty->print_cr("double %lf", x);
304 }
305
306 void SharedRuntime::debug_print_value(oopDesc* x) {
307 x->print();
308 }
309 #endif // COMPILER2
310
311 #endif // PRODUCT
312
313
314 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
315 return x * y;
316 JRT_END
317
318
319 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
320 if (x == min_jlong && y == CONST64(-1)) {
321 return x;
322 } else {
323 return x / y;
324 }
325 JRT_END
326
327
328 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
329 if (x == min_jlong && y == CONST64(-1)) {
330 return 0;
331 } else {
332 return x % y;
333 }
334 JRT_END
335
336
337 #ifdef _WIN64
338 const juint float_sign_mask = 0x7FFFFFFF;
339 const juint float_infinity = 0x7F800000;
340 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
341 const julong double_infinity = CONST64(0x7FF0000000000000);
342 #endif
343
344 #if !defined(X86)
345 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
346 #ifdef _WIN64
347 // 64-bit Windows on amd64 returns the wrong values for
348 // infinity operands.
349 juint xbits = PrimitiveConversions::cast<juint>(x);
350 juint ybits = PrimitiveConversions::cast<juint>(y);
351 // x Mod Infinity == x unless x is infinity
352 if (((xbits & float_sign_mask) != float_infinity) &&
353 ((ybits & float_sign_mask) == float_infinity) ) {
354 return x;
355 }
356 return ((jfloat)fmod_winx64((double)x, (double)y));
357 #else
358 return ((jfloat)fmod((double)x,(double)y));
359 #endif
360 JRT_END
361
362 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
363 #ifdef _WIN64
364 julong xbits = PrimitiveConversions::cast<julong>(x);
365 julong ybits = PrimitiveConversions::cast<julong>(y);
366 // x Mod Infinity == x unless x is infinity
367 if (((xbits & double_sign_mask) != double_infinity) &&
368 ((ybits & double_sign_mask) == double_infinity) ) {
369 return x;
370 }
371 return ((jdouble)fmod_winx64((double)x, (double)y));
372 #else
373 return ((jdouble)fmod((double)x,(double)y));
374 #endif
375 JRT_END
376 #endif // !X86
377
378 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
379 return (jfloat)x;
380 JRT_END
381
382 #ifdef __SOFTFP__
383 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
384 return x + y;
385 JRT_END
386
387 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
388 return x - y;
389 JRT_END
390
391 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
392 return x * y;
393 JRT_END
394
395 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
396 return x / y;
397 JRT_END
398
399 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
400 return x + y;
401 JRT_END
402
403 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
404 return x - y;
405 JRT_END
406
407 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
408 return x * y;
409 JRT_END
410
411 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
412 return x / y;
413 JRT_END
414
415 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
416 return (jdouble)x;
417 JRT_END
418
419 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
420 return (jdouble)x;
421 JRT_END
422
423 JRT_LEAF(int, SharedRuntime::fcmpl(float x, float y))
424 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan*/
425 JRT_END
426
427 JRT_LEAF(int, SharedRuntime::fcmpg(float x, float y))
428 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
429 JRT_END
430
431 JRT_LEAF(int, SharedRuntime::dcmpl(double x, double y))
432 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
433 JRT_END
434
435 JRT_LEAF(int, SharedRuntime::dcmpg(double x, double y))
436 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
437 JRT_END
438
439 // Functions to return the opposite of the aeabi functions for nan.
440 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
441 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
442 JRT_END
443
444 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
445 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
446 JRT_END
447
448 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
449 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
450 JRT_END
451
452 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
453 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
454 JRT_END
455
456 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
457 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
458 JRT_END
459
460 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
461 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
462 JRT_END
463
464 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
465 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
466 JRT_END
467
468 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
469 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
470 JRT_END
471
472 // Intrinsics make gcc generate code for these.
473 float SharedRuntime::fneg(float f) {
474 return -f;
475 }
476
477 double SharedRuntime::dneg(double f) {
478 return -f;
479 }
480
481 #endif // __SOFTFP__
482
483 #if defined(__SOFTFP__) || defined(E500V2)
484 // Intrinsics make gcc generate code for these.
485 double SharedRuntime::dabs(double f) {
486 return (f <= (double)0.0) ? (double)0.0 - f : f;
487 }
488
489 #endif
490
491 #if defined(__SOFTFP__)
492 double SharedRuntime::dsqrt(double f) {
493 return sqrt(f);
494 }
495 #endif
496
497 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
498 if (g_isnan(x))
499 return 0;
500 if (x >= (jfloat) max_jint)
501 return max_jint;
502 if (x <= (jfloat) min_jint)
503 return min_jint;
504 return (jint) x;
505 JRT_END
506
507
508 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
509 if (g_isnan(x))
510 return 0;
511 if (x >= (jfloat) max_jlong)
512 return max_jlong;
513 if (x <= (jfloat) min_jlong)
514 return min_jlong;
515 return (jlong) x;
516 JRT_END
517
518
519 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
520 if (g_isnan(x))
521 return 0;
522 if (x >= (jdouble) max_jint)
523 return max_jint;
524 if (x <= (jdouble) min_jint)
525 return min_jint;
526 return (jint) x;
527 JRT_END
528
529
530 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
531 if (g_isnan(x))
532 return 0;
533 if (x >= (jdouble) max_jlong)
534 return max_jlong;
535 if (x <= (jdouble) min_jlong)
536 return min_jlong;
537 return (jlong) x;
538 JRT_END
539
540
541 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
542 return (jfloat)x;
543 JRT_END
544
545
546 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
547 return (jfloat)x;
548 JRT_END
549
550
551 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
552 return (jdouble)x;
553 JRT_END
554
555
556 // Exception handling across interpreter/compiler boundaries
557 //
558 // exception_handler_for_return_address(...) returns the continuation address.
559 // The continuation address is the entry point of the exception handler of the
560 // previous frame depending on the return address.
561
562 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) {
563 // Note: This is called when we have unwound the frame of the callee that did
564 // throw an exception. So far, no check has been performed by the StackWatermarkSet.
565 // Notably, the stack is not walkable at this point, and hence the check must
566 // be deferred until later. Specifically, any of the handlers returned here in
567 // this function, will get dispatched to, and call deferred checks to
568 // StackWatermarkSet::after_unwind at a point where the stack is walkable.
569 assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
570 assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
571
572 #if INCLUDE_JVMCI
573 // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
574 // and other exception handler continuations do not read it
575 current->set_exception_pc(nullptr);
576 #endif // INCLUDE_JVMCI
577
578 if (Continuation::is_return_barrier_entry(return_address)) {
579 return StubRoutines::cont_returnBarrierExc();
580 }
581
582 // The fastest case first
583 CodeBlob* blob = CodeCache::find_blob(return_address);
584 nmethod* nm = (blob != nullptr) ? blob->as_nmethod_or_null() : nullptr;
585 if (nm != nullptr) {
586 // native nmethods don't have exception handlers
587 assert(!nm->is_native_method() || nm->method()->is_continuation_enter_intrinsic(), "no exception handler");
588 assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
589 if (nm->is_deopt_pc(return_address)) {
590 // If we come here because of a stack overflow, the stack may be
591 // unguarded. Reguard the stack otherwise if we return to the
592 // deopt blob and the stack bang causes a stack overflow we
593 // crash.
594 StackOverflow* overflow_state = current->stack_overflow_state();
595 bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
596 if (overflow_state->reserved_stack_activation() != current->stack_base()) {
597 overflow_state->set_reserved_stack_activation(current->stack_base());
598 }
599 assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
600 // The deferred StackWatermarkSet::after_unwind check will be performed in
601 // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
602 return SharedRuntime::deopt_blob()->unpack_with_exception();
603 } else {
604 // The deferred StackWatermarkSet::after_unwind check will be performed in
605 // * OptoRuntime::handle_exception_C_helper for C2 code
606 // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
607 #ifdef COMPILER2
608 if (nm->compiler_type() == compiler_c2) {
609 return OptoRuntime::exception_blob()->entry_point();
610 }
611 #endif // COMPILER2
612 return nm->exception_begin();
613 }
614 }
615
616 // Entry code
617 if (StubRoutines::returns_to_call_stub(return_address)) {
618 // The deferred StackWatermarkSet::after_unwind check will be performed in
619 // JavaCallWrapper::~JavaCallWrapper
620 assert (StubRoutines::catch_exception_entry() != nullptr, "must be generated before");
621 return StubRoutines::catch_exception_entry();
622 }
623 if (blob != nullptr && blob->is_upcall_stub()) {
624 return StubRoutines::upcall_stub_exception_handler();
625 }
626 // Interpreted code
627 if (Interpreter::contains(return_address)) {
628 // The deferred StackWatermarkSet::after_unwind check will be performed in
629 // InterpreterRuntime::exception_handler_for_exception
630 return Interpreter::rethrow_exception_entry();
631 }
632
633 guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
634 guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
635
636 #ifndef PRODUCT
637 { ResourceMark rm;
638 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
639 os::print_location(tty, (intptr_t)return_address);
640 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
641 tty->print_cr("b) other problem");
642 }
643 #endif // PRODUCT
644 ShouldNotReachHere();
645 return nullptr;
646 }
647
648
649 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* current, address return_address))
650 return raw_exception_handler_for_return_address(current, return_address);
651 JRT_END
652
653
654 address SharedRuntime::get_poll_stub(address pc) {
655 address stub;
656 // Look up the code blob
657 CodeBlob *cb = CodeCache::find_blob(pc);
658
659 // Should be an nmethod
660 guarantee(cb != nullptr && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod");
661
662 // Look up the relocation information
663 assert(cb->as_nmethod()->is_at_poll_or_poll_return(pc),
664 "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
665
666 #ifdef ASSERT
667 if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
668 tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
669 Disassembler::decode(cb);
670 fatal("Only polling locations are used for safepoint");
671 }
672 #endif
673
674 bool at_poll_return = cb->as_nmethod()->is_at_poll_return(pc);
675 bool has_wide_vectors = cb->as_nmethod()->has_wide_vectors();
676 if (at_poll_return) {
677 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
678 "polling page return stub not created yet");
679 stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
680 } else if (has_wide_vectors) {
681 assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr,
682 "polling page vectors safepoint stub not created yet");
683 stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
684 } else {
685 assert(SharedRuntime::polling_page_safepoint_handler_blob() != nullptr,
686 "polling page safepoint stub not created yet");
687 stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
688 }
689 log_trace(safepoint)("Polling page exception: thread = " INTPTR_FORMAT " [%d], pc = "
690 INTPTR_FORMAT " (%s), stub = " INTPTR_FORMAT,
691 p2i(Thread::current()),
692 Thread::current()->osthread()->thread_id(),
693 p2i(pc),
694 at_poll_return ? "return" : "loop",
695 p2i(stub));
696 return stub;
697 }
698
699 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception) {
700 if (JvmtiExport::can_post_on_exceptions()) {
701 vframeStream vfst(current, true);
702 methodHandle method = methodHandle(current, vfst.method());
703 address bcp = method()->bcp_from(vfst.bci());
704 JvmtiExport::post_exception_throw(current, method(), bcp, h_exception());
705 }
706
707 #if INCLUDE_JVMCI
708 if (EnableJVMCI) {
709 vframeStream vfst(current, true);
710 methodHandle method = methodHandle(current, vfst.method());
711 int bci = vfst.bci();
712 MethodData* trap_mdo = method->method_data();
713 if (trap_mdo != nullptr) {
714 // Set exception_seen if the exceptional bytecode is an invoke
715 Bytecode_invoke call = Bytecode_invoke_check(method, bci);
716 if (call.is_valid()) {
717 ResourceMark rm(current);
718
719 // Lock to read ProfileData, and ensure lock is not broken by a safepoint
720 MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
721
722 ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr);
723 if (pdata != nullptr && pdata->is_BitData()) {
724 BitData* bit_data = (BitData*) pdata;
725 bit_data->set_exception_seen();
726 }
727 }
728 }
729 }
730 #endif
731
732 Exceptions::_throw(current, __FILE__, __LINE__, h_exception);
733 }
734
735 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message) {
736 Handle h_exception = Exceptions::new_exception(current, name, message);
737 throw_and_post_jvmti_exception(current, h_exception);
738 }
739
740 // The interpreter code to call this tracing function is only
741 // called/generated when UL is on for redefine, class and has the right level
742 // and tags. Since obsolete methods are never compiled, we don't have
743 // to modify the compilers to generate calls to this function.
744 //
745 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
746 JavaThread* thread, Method* method))
747 if (method->is_obsolete()) {
748 // We are calling an obsolete method, but this is not necessarily
749 // an error. Our method could have been redefined just after we
750 // fetched the Method* from the constant pool.
751 ResourceMark rm;
752 log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
753 }
754 return 0;
755 JRT_END
756
757 // ret_pc points into caller; we are returning caller's exception handler
758 // for given exception
759 // Note that the implementation of this method assumes it's only called when an exception has actually occured
760 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
761 bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
762 assert(nm != nullptr, "must exist");
763 ResourceMark rm;
764
765 #if INCLUDE_JVMCI
766 if (nm->is_compiled_by_jvmci()) {
767 // lookup exception handler for this pc
768 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
769 ExceptionHandlerTable table(nm);
770 HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
771 if (t != nullptr) {
772 return nm->code_begin() + t->pco();
773 } else {
774 bool make_not_entrant = true;
775 return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
776 }
777 }
778 #endif // INCLUDE_JVMCI
779
780 ScopeDesc* sd = nm->scope_desc_at(ret_pc);
781 // determine handler bci, if any
782 EXCEPTION_MARK;
783
784 Handle orig_exception(THREAD, exception());
785
786 int handler_bci = -1;
787 int scope_depth = 0;
788 if (!force_unwind) {
789 int bci = sd->bci();
790 bool recursive_exception = false;
791 do {
792 bool skip_scope_increment = false;
793 // exception handler lookup
794 Klass* ek = exception->klass();
795 methodHandle mh(THREAD, sd->method());
796 handler_bci = Method::fast_exception_handler_bci_for(mh, ek, bci, THREAD);
797 if (HAS_PENDING_EXCEPTION) {
798 recursive_exception = true;
799 // We threw an exception while trying to find the exception handler.
800 // Transfer the new exception to the exception handle which will
801 // be set into thread local storage, and do another lookup for an
802 // exception handler for this exception, this time starting at the
803 // BCI of the exception handler which caused the exception to be
804 // thrown (bugs 4307310 and 4546590). Set "exception" reference
805 // argument to ensure that the correct exception is thrown (4870175).
806 recursive_exception_occurred = true;
807 exception.replace(PENDING_EXCEPTION);
808 CLEAR_PENDING_EXCEPTION;
809 if (handler_bci >= 0) {
810 bci = handler_bci;
811 handler_bci = -1;
812 skip_scope_increment = true;
813 }
814 }
815 else {
816 recursive_exception = false;
817 }
818 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
819 sd = sd->sender();
820 if (sd != nullptr) {
821 bci = sd->bci();
822 }
823 ++scope_depth;
824 }
825 } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != nullptr));
826 }
827
828 // found handling method => lookup exception handler
829 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
830
831 ExceptionHandlerTable table(nm);
832 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
833
834 // If the compiler did not anticipate a recursive exception, resulting in an exception
835 // thrown from the catch bci, then the compiled exception handler might be missing.
836 // This is rare. Just deoptimize and let the interpreter rethrow the original
837 // exception at the original bci.
838 if (t == nullptr && recursive_exception_occurred) {
839 exception.replace(orig_exception()); // restore original exception
840 bool make_not_entrant = false;
841 return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
842 }
843
844 if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) {
845 // Allow abbreviated catch tables. The idea is to allow a method
846 // to materialize its exceptions without committing to the exact
847 // routing of exceptions. In particular this is needed for adding
848 // a synthetic handler to unlock monitors when inlining
849 // synchronized methods since the unlock path isn't represented in
850 // the bytecodes.
851 t = table.entry_for(catch_pco, -1, 0);
852 }
853
854 #ifdef COMPILER1
855 if (t == nullptr && nm->is_compiled_by_c1()) {
856 assert(nm->unwind_handler_begin() != nullptr, "");
857 return nm->unwind_handler_begin();
858 }
859 #endif
860
861 if (t == nullptr) {
862 ttyLocker ttyl;
863 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
864 tty->print_cr(" Exception:");
865 exception->print();
866 tty->cr();
867 tty->print_cr(" Compiled exception table :");
868 table.print();
869 nm->print();
870 nm->print_code();
871 guarantee(false, "missing exception handler");
872 return nullptr;
873 }
874
875 if (handler_bci != -1) { // did we find a handler in this method?
876 sd->method()->set_exception_handler_entered(handler_bci); // profile
877 }
878 return nm->code_begin() + t->pco();
879 }
880
881 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current))
882 // These errors occur only at call sites
883 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
884 JRT_END
885
886 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
887 // These errors occur only at call sites
888 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
889 JRT_END
890
891 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
892 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
893 JRT_END
894
895 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* current))
896 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
897 JRT_END
898
899 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current))
900 // This entry point is effectively only used for NullPointerExceptions which occur at inline
901 // cache sites (when the callee activation is not yet set up) so we are at a call site
902 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
903 JRT_END
904
905 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* current))
906 throw_StackOverflowError_common(current, false);
907 JRT_END
908
909 JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* current))
910 throw_StackOverflowError_common(current, true);
911 JRT_END
912
913 void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool delayed) {
914 // We avoid using the normal exception construction in this case because
915 // it performs an upcall to Java, and we're already out of stack space.
916 JavaThread* THREAD = current; // For exception macros.
917 InstanceKlass* k = vmClasses::StackOverflowError_klass();
918 oop exception_oop = k->allocate_instance(CHECK);
919 if (delayed) {
920 java_lang_Throwable::set_message(exception_oop,
921 Universe::delayed_stack_overflow_error_message());
922 }
923 Handle exception (current, exception_oop);
924 if (StackTraceInThrowable) {
925 java_lang_Throwable::fill_in_stack_trace(exception);
926 }
927 // Remove the ScopedValue bindings in case we got a
928 // StackOverflowError while we were trying to remove ScopedValue
929 // bindings.
930 current->clear_scopedValueBindings();
931 // Increment counter for hs_err file reporting
932 AtomicAccess::inc(&Exceptions::_stack_overflow_errors);
933 throw_and_post_jvmti_exception(current, exception);
934 }
935
936 address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
937 address pc,
938 ImplicitExceptionKind exception_kind)
939 {
940 address target_pc = nullptr;
941
942 if (Interpreter::contains(pc)) {
943 switch (exception_kind) {
944 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
945 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
946 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
947 default: ShouldNotReachHere();
948 }
949 } else {
950 switch (exception_kind) {
951 case STACK_OVERFLOW: {
952 // Stack overflow only occurs upon frame setup; the callee is
953 // going to be unwound. Dispatch to a shared runtime stub
954 // which will cause the StackOverflowError to be fabricated
955 // and processed.
956 // Stack overflow should never occur during deoptimization:
957 // the compiled method bangs the stack by as much as the
958 // interpreter would need in case of a deoptimization. The
959 // deoptimization blob and uncommon trap blob bang the stack
960 // in a debug VM to verify the correctness of the compiled
961 // method stack banging.
962 assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap");
963 Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
964 return SharedRuntime::throw_StackOverflowError_entry();
965 }
966
967 case IMPLICIT_NULL: {
968 if (VtableStubs::contains(pc)) {
969 // We haven't yet entered the callee frame. Fabricate an
970 // exception and begin dispatching it in the caller. Since
971 // the caller was at a call site, it's safe to destroy all
972 // caller-saved registers, as these entry points do.
973 VtableStub* vt_stub = VtableStubs::stub_containing(pc);
974
975 // If vt_stub is null, then return null to signal handler to report the SEGV error.
976 if (vt_stub == nullptr) return nullptr;
977
978 if (vt_stub->is_abstract_method_error(pc)) {
979 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
980 Events::log_exception(current, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc));
981 // Instead of throwing the abstract method error here directly, we re-resolve
982 // and will throw the AbstractMethodError during resolve. As a result, we'll
983 // get a more detailed error message.
984 return SharedRuntime::get_handle_wrong_method_stub();
985 } else {
986 Events::log_exception(current, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc));
987 // Assert that the signal comes from the expected location in stub code.
988 assert(vt_stub->is_null_pointer_exception(pc),
989 "obtained signal from unexpected location in stub code");
990 return SharedRuntime::throw_NullPointerException_at_call_entry();
991 }
992 } else {
993 CodeBlob* cb = CodeCache::find_blob(pc);
994
995 // If code blob is null, then return null to signal handler to report the SEGV error.
996 if (cb == nullptr) return nullptr;
997
998 // Exception happened in CodeCache. Must be either:
999 // 1. Inline-cache check in C2I handler blob,
1000 // 2. Inline-cache check in nmethod, or
1001 // 3. Implicit null exception in nmethod
1002
1003 if (!cb->is_nmethod()) {
1004 bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
1005 if (!is_in_blob) {
1006 // Allow normal crash reporting to handle this
1007 return nullptr;
1008 }
1009 Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
1010 // There is no handler here, so we will simply unwind.
1011 return SharedRuntime::throw_NullPointerException_at_call_entry();
1012 }
1013
1014 // Otherwise, it's a compiled method. Consult its exception handlers.
1015 nmethod* nm = cb->as_nmethod();
1016 if (nm->inlinecache_check_contains(pc)) {
1017 // exception happened inside inline-cache check code
1018 // => the nmethod is not yet active (i.e., the frame
1019 // is not set up yet) => use return address pushed by
1020 // caller => don't push another return address
1021 Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc));
1022 return SharedRuntime::throw_NullPointerException_at_call_entry();
1023 }
1024
1025 if (nm->method()->is_method_handle_intrinsic()) {
1026 // exception happened inside MH dispatch code, similar to a vtable stub
1027 Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
1028 return SharedRuntime::throw_NullPointerException_at_call_entry();
1029 }
1030
1031 #ifndef PRODUCT
1032 _implicit_null_throws++;
1033 #endif
1034 target_pc = nm->continuation_for_implicit_null_exception(pc);
1035 // If there's an unexpected fault, target_pc might be null,
1036 // in which case we want to fall through into the normal
1037 // error handling code.
1038 }
1039
1040 break; // fall through
1041 }
1042
1043
1044 case IMPLICIT_DIVIDE_BY_ZERO: {
1045 nmethod* nm = CodeCache::find_nmethod(pc);
1046 guarantee(nm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions");
1047 #ifndef PRODUCT
1048 _implicit_div0_throws++;
1049 #endif
1050 target_pc = nm->continuation_for_implicit_div0_exception(pc);
1051 // If there's an unexpected fault, target_pc might be null,
1052 // in which case we want to fall through into the normal
1053 // error handling code.
1054 break; // fall through
1055 }
1056
1057 default: ShouldNotReachHere();
1058 }
1059
1060 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
1061
1062 if (exception_kind == IMPLICIT_NULL) {
1063 #ifndef PRODUCT
1064 // for AbortVMOnException flag
1065 Exceptions::debug_check_abort("java.lang.NullPointerException");
1066 #endif //PRODUCT
1067 Events::log_exception(current, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1068 } else {
1069 #ifndef PRODUCT
1070 // for AbortVMOnException flag
1071 Exceptions::debug_check_abort("java.lang.ArithmeticException");
1072 #endif //PRODUCT
1073 Events::log_exception(current, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1074 }
1075 return target_pc;
1076 }
1077
1078 ShouldNotReachHere();
1079 return nullptr;
1080 }
1081
1082
1083 /**
1084 * Throws an java/lang/UnsatisfiedLinkError. The address of this method is
1085 * installed in the native function entry of all native Java methods before
1086 * they get linked to their actual native methods.
1087 *
1088 * \note
1089 * This method actually never gets called! The reason is because
1090 * the interpreter's native entries call NativeLookup::lookup() which
1091 * throws the exception when the lookup fails. The exception is then
1092 * caught and forwarded on the return from NativeLookup::lookup() call
1093 * before the call to the native function. This might change in the future.
1094 */
1095 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
1096 {
1097 // We return a bad value here to make sure that the exception is
1098 // forwarded before we look at the return value.
1099 THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
1100 }
1101 JNI_END
1102
1103 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
1104 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
1105 }
1106
1107 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
1108 #if INCLUDE_JVMCI
1109 if (!obj->klass()->has_finalizer()) {
1110 return;
1111 }
1112 #endif // INCLUDE_JVMCI
1113 assert(oopDesc::is_oop(obj), "must be a valid oop");
1114 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1115 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1116 JRT_END
1117
1118 jlong SharedRuntime::get_java_tid(JavaThread* thread) {
1119 assert(thread != nullptr, "No thread");
1120 if (thread == nullptr) {
1121 return 0;
1122 }
1123 guarantee(Thread::current() != thread || thread->is_oop_safe(),
1124 "current cannot touch oops after its GC barrier is detached.");
1125 oop obj = thread->threadObj();
1126 return (obj == nullptr) ? 0 : java_lang_Thread::thread_id(obj);
1127 }
1128
1129 /**
1130 * This function ought to be a void function, but cannot be because
1131 * it gets turned into a tail-call on sparc, which runs into dtrace bug
1132 * 6254741. Once that is fixed we can remove the dummy return value.
1133 */
1134 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
1135 return dtrace_object_alloc(JavaThread::current(), o, o->size());
1136 }
1137
1138 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o) {
1139 return dtrace_object_alloc(thread, o, o->size());
1140 }
1141
1142 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size) {
1143 assert(DTraceAllocProbes, "wrong call");
1144 Klass* klass = o->klass();
1145 Symbol* name = klass->name();
1146 HOTSPOT_OBJECT_ALLOC(
1147 get_java_tid(thread),
1148 (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
1149 return 0;
1150 }
1151
1152 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
1153 JavaThread* current, Method* method))
1154 assert(current == JavaThread::current(), "pre-condition");
1155
1156 assert(DTraceMethodProbes, "wrong call");
1157 Symbol* kname = method->klass_name();
1158 Symbol* name = method->name();
1159 Symbol* sig = method->signature();
1160 HOTSPOT_METHOD_ENTRY(
1161 get_java_tid(current),
1162 (char *) kname->bytes(), kname->utf8_length(),
1163 (char *) name->bytes(), name->utf8_length(),
1164 (char *) sig->bytes(), sig->utf8_length());
1165 return 0;
1166 JRT_END
1167
1168 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
1169 JavaThread* current, Method* method))
1170 assert(current == JavaThread::current(), "pre-condition");
1171 assert(DTraceMethodProbes, "wrong call");
1172 Symbol* kname = method->klass_name();
1173 Symbol* name = method->name();
1174 Symbol* sig = method->signature();
1175 HOTSPOT_METHOD_RETURN(
1176 get_java_tid(current),
1177 (char *) kname->bytes(), kname->utf8_length(),
1178 (char *) name->bytes(), name->utf8_length(),
1179 (char *) sig->bytes(), sig->utf8_length());
1180 return 0;
1181 JRT_END
1182
1183
1184 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
1185 // for a call current in progress, i.e., arguments has been pushed on stack
1186 // put callee has not been invoked yet. Used by: resolve virtual/static,
1187 // vtable updates, etc. Caller frame must be compiled.
1188 Handle SharedRuntime::find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
1189 JavaThread* current = THREAD;
1190 ResourceMark rm(current);
1191
1192 // last java frame on stack (which includes native call frames)
1193 vframeStream vfst(current, true); // Do not skip and javaCalls
1194
1195 return find_callee_info_helper(vfst, bc, callinfo, THREAD);
1196 }
1197
1198 Method* SharedRuntime::extract_attached_method(vframeStream& vfst) {
1199 nmethod* caller = vfst.nm();
1200
1201 address pc = vfst.frame_pc();
1202 { // Get call instruction under lock because another thread may be busy patching it.
1203 CompiledICLocker ic_locker(caller);
1204 return caller->attached_method_before_pc(pc);
1205 }
1206 return nullptr;
1207 }
1208
1209 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1210 // for a call current in progress, i.e., arguments has been pushed on stack
1211 // but callee has not been invoked yet. Caller frame must be compiled.
1212 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1213 CallInfo& callinfo, TRAPS) {
1214 Handle receiver;
1215 Handle nullHandle; // create a handy null handle for exception returns
1216 JavaThread* current = THREAD;
1217
1218 assert(!vfst.at_end(), "Java frame must exist");
1219
1220 // Find caller and bci from vframe
1221 methodHandle caller(current, vfst.method());
1222 int bci = vfst.bci();
1223
1224 if (caller->is_continuation_enter_intrinsic()) {
1225 bc = Bytecodes::_invokestatic;
1226 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1227 return receiver;
1228 }
1229
1230 Bytecode_invoke bytecode(caller, bci);
1231 int bytecode_index = bytecode.index();
1232 bc = bytecode.invoke_code();
1233
1234 methodHandle attached_method(current, extract_attached_method(vfst));
1235 if (attached_method.not_null()) {
1236 Method* callee = bytecode.static_target(CHECK_NH);
1237 vmIntrinsics::ID id = callee->intrinsic_id();
1238 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1239 // it attaches statically resolved method to the call site.
1240 if (MethodHandles::is_signature_polymorphic(id) &&
1241 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1242 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1243
1244 // Adjust invocation mode according to the attached method.
1245 switch (bc) {
1246 case Bytecodes::_invokevirtual:
1247 if (attached_method->method_holder()->is_interface()) {
1248 bc = Bytecodes::_invokeinterface;
1249 }
1250 break;
1251 case Bytecodes::_invokeinterface:
1252 if (!attached_method->method_holder()->is_interface()) {
1253 bc = Bytecodes::_invokevirtual;
1254 }
1255 break;
1256 case Bytecodes::_invokehandle:
1257 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1258 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1259 : Bytecodes::_invokevirtual;
1260 }
1261 break;
1262 default:
1263 break;
1264 }
1265 }
1266 }
1267
1268 assert(bc != Bytecodes::_illegal, "not initialized");
1269
1270 bool has_receiver = bc != Bytecodes::_invokestatic &&
1271 bc != Bytecodes::_invokedynamic &&
1272 bc != Bytecodes::_invokehandle;
1273
1274 // Find receiver for non-static call
1275 if (has_receiver) {
1276 // This register map must be update since we need to find the receiver for
1277 // compiled frames. The receiver might be in a register.
1278 RegisterMap reg_map2(current,
1279 RegisterMap::UpdateMap::include,
1280 RegisterMap::ProcessFrames::include,
1281 RegisterMap::WalkContinuation::skip);
1282 frame stubFrame = current->last_frame();
1283 // Caller-frame is a compiled frame
1284 frame callerFrame = stubFrame.sender(®_map2);
1285
1286 if (attached_method.is_null()) {
1287 Method* callee = bytecode.static_target(CHECK_NH);
1288 if (callee == nullptr) {
1289 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1290 }
1291 }
1292
1293 // Retrieve from a compiled argument list
1294 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1295 assert(oopDesc::is_oop_or_null(receiver()), "");
1296
1297 if (receiver.is_null()) {
1298 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1299 }
1300 }
1301
1302 // Resolve method
1303 if (attached_method.not_null()) {
1304 // Parameterized by attached method.
1305 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1306 } else {
1307 // Parameterized by bytecode.
1308 constantPoolHandle constants(current, caller->constants());
1309 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1310 }
1311
1312 #ifdef ASSERT
1313 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1314 if (has_receiver) {
1315 assert(receiver.not_null(), "should have thrown exception");
1316 Klass* receiver_klass = receiver->klass();
1317 Klass* rk = nullptr;
1318 if (attached_method.not_null()) {
1319 // In case there's resolved method attached, use its holder during the check.
1320 rk = attached_method->method_holder();
1321 } else {
1322 // Klass is already loaded.
1323 constantPoolHandle constants(current, caller->constants());
1324 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1325 }
1326 Klass* static_receiver_klass = rk;
1327 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1328 "actual receiver must be subclass of static receiver klass");
1329 if (receiver_klass->is_instance_klass()) {
1330 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1331 tty->print_cr("ERROR: Klass not yet initialized!!");
1332 receiver_klass->print();
1333 }
1334 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1335 }
1336 }
1337 #endif
1338
1339 return receiver;
1340 }
1341
1342 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1343 JavaThread* current = THREAD;
1344 ResourceMark rm(current);
1345 // We need first to check if any Java activations (compiled, interpreted)
1346 // exist on the stack since last JavaCall. If not, we need
1347 // to get the target method from the JavaCall wrapper.
1348 vframeStream vfst(current, true); // Do not skip any javaCalls
1349 methodHandle callee_method;
1350 if (vfst.at_end()) {
1351 // No Java frames were found on stack since we did the JavaCall.
1352 // Hence the stack can only contain an entry_frame. We need to
1353 // find the target method from the stub frame.
1354 RegisterMap reg_map(current,
1355 RegisterMap::UpdateMap::skip,
1356 RegisterMap::ProcessFrames::include,
1357 RegisterMap::WalkContinuation::skip);
1358 frame fr = current->last_frame();
1359 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1360 fr = fr.sender(®_map);
1361 assert(fr.is_entry_frame(), "must be");
1362 // fr is now pointing to the entry frame.
1363 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1364 } else {
1365 Bytecodes::Code bc;
1366 CallInfo callinfo;
1367 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1368 callee_method = methodHandle(current, callinfo.selected_method());
1369 }
1370 assert(callee_method()->is_method(), "must be");
1371 return callee_method;
1372 }
1373
1374 // Resolves a call.
1375 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1376 JavaThread* current = THREAD;
1377 ResourceMark rm(current);
1378 RegisterMap cbl_map(current,
1379 RegisterMap::UpdateMap::skip,
1380 RegisterMap::ProcessFrames::include,
1381 RegisterMap::WalkContinuation::skip);
1382 frame caller_frame = current->last_frame().sender(&cbl_map);
1383
1384 CodeBlob* caller_cb = caller_frame.cb();
1385 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1386 nmethod* caller_nm = caller_cb->as_nmethod();
1387
1388 // determine call info & receiver
1389 // note: a) receiver is null for static calls
1390 // b) an exception is thrown if receiver is null for non-static calls
1391 CallInfo call_info;
1392 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1393 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1394
1395 NoSafepointVerifier nsv;
1396
1397 methodHandle callee_method(current, call_info.selected_method());
1398
1399 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1400 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1401 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1402 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1403 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1404
1405 assert(!caller_nm->is_unloading(), "It should not be unloading");
1406
1407 #ifndef PRODUCT
1408 // tracing/debugging/statistics
1409 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1410 (is_virtual) ? (&_resolve_virtual_ctr) :
1411 (&_resolve_static_ctr);
1412 AtomicAccess::inc(addr);
1413
1414 if (TraceCallFixup) {
1415 ResourceMark rm(current);
1416 tty->print("resolving %s%s (%s) call to",
1417 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1418 Bytecodes::name(invoke_code));
1419 callee_method->print_short_name(tty);
1420 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1421 p2i(caller_frame.pc()), p2i(callee_method->code()));
1422 }
1423 #endif
1424
1425 if (invoke_code == Bytecodes::_invokestatic) {
1426 assert(callee_method->method_holder()->is_initialized() ||
1427 callee_method->method_holder()->is_reentrant_initialization(current),
1428 "invalid class initialization state for invoke_static");
1429 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1430 // In order to keep class initialization check, do not patch call
1431 // site for static call when the class is not fully initialized.
1432 // Proper check is enforced by call site re-resolution on every invocation.
1433 //
1434 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1435 // explicit class initialization check is put in nmethod entry (VEP).
1436 assert(callee_method->method_holder()->is_linked(), "must be");
1437 return callee_method;
1438 }
1439 }
1440
1441
1442 // JSR 292 key invariant:
1443 // If the resolved method is a MethodHandle invoke target, the call
1444 // site must be a MethodHandle call site, because the lambda form might tail-call
1445 // leaving the stack in a state unknown to either caller or callee
1446
1447 // Compute entry points. The computation of the entry points is independent of
1448 // patching the call.
1449
1450 // Make sure the callee nmethod does not get deoptimized and removed before
1451 // we are done patching the code.
1452
1453
1454 CompiledICLocker ml(caller_nm);
1455 if (is_virtual && !is_optimized) {
1456 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1457 inline_cache->update(&call_info, receiver->klass());
1458 } else {
1459 // Callsite is a direct call - set it to the destination method
1460 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1461 callsite->set(callee_method);
1462 }
1463
1464 return callee_method;
1465 }
1466
1467 // Inline caches exist only in compiled code
1468 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1469 #ifdef ASSERT
1470 RegisterMap reg_map(current,
1471 RegisterMap::UpdateMap::skip,
1472 RegisterMap::ProcessFrames::include,
1473 RegisterMap::WalkContinuation::skip);
1474 frame stub_frame = current->last_frame();
1475 assert(stub_frame.is_runtime_frame(), "sanity check");
1476 frame caller_frame = stub_frame.sender(®_map);
1477 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1478 #endif /* ASSERT */
1479
1480 methodHandle callee_method;
1481 JRT_BLOCK
1482 callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1483 // Return Method* through TLS
1484 current->set_vm_result_metadata(callee_method());
1485 JRT_BLOCK_END
1486 // return compiled code entry point after potential safepoints
1487 return get_resolved_entry(current, callee_method);
1488 JRT_END
1489
1490
1491 // Handle call site that has been made non-entrant
1492 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1493 // 6243940 We might end up in here if the callee is deoptimized
1494 // as we race to call it. We don't want to take a safepoint if
1495 // the caller was interpreted because the caller frame will look
1496 // interpreted to the stack walkers and arguments are now
1497 // "compiled" so it is much better to make this transition
1498 // invisible to the stack walking code. The i2c path will
1499 // place the callee method in the callee_target. It is stashed
1500 // there because if we try and find the callee by normal means a
1501 // safepoint is possible and have trouble gc'ing the compiled args.
1502 RegisterMap reg_map(current,
1503 RegisterMap::UpdateMap::skip,
1504 RegisterMap::ProcessFrames::include,
1505 RegisterMap::WalkContinuation::skip);
1506 frame stub_frame = current->last_frame();
1507 assert(stub_frame.is_runtime_frame(), "sanity check");
1508 frame caller_frame = stub_frame.sender(®_map);
1509
1510 if (caller_frame.is_interpreted_frame() ||
1511 caller_frame.is_entry_frame() ||
1512 caller_frame.is_upcall_stub_frame()) {
1513 Method* callee = current->callee_target();
1514 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1515 current->set_vm_result_metadata(callee);
1516 current->set_callee_target(nullptr);
1517 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1518 // Bypass class initialization checks in c2i when caller is in native.
1519 // JNI calls to static methods don't have class initialization checks.
1520 // Fast class initialization checks are present in c2i adapters and call into
1521 // SharedRuntime::handle_wrong_method() on the slow path.
1522 //
1523 // JVM upcalls may land here as well, but there's a proper check present in
1524 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1525 // so bypassing it in c2i adapter is benign.
1526 return callee->get_c2i_no_clinit_check_entry();
1527 } else {
1528 return callee->get_c2i_entry();
1529 }
1530 }
1531
1532 // Must be compiled to compiled path which is safe to stackwalk
1533 methodHandle callee_method;
1534 JRT_BLOCK
1535 // Force resolving of caller (if we called from compiled frame)
1536 callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1537 current->set_vm_result_metadata(callee_method());
1538 JRT_BLOCK_END
1539 // return compiled code entry point after potential safepoints
1540 return get_resolved_entry(current, callee_method);
1541 JRT_END
1542
1543 // Handle abstract method call
1544 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1545 // Verbose error message for AbstractMethodError.
1546 // Get the called method from the invoke bytecode.
1547 vframeStream vfst(current, true);
1548 assert(!vfst.at_end(), "Java frame must exist");
1549 methodHandle caller(current, vfst.method());
1550 Bytecode_invoke invoke(caller, vfst.bci());
1551 DEBUG_ONLY( invoke.verify(); )
1552
1553 // Find the compiled caller frame.
1554 RegisterMap reg_map(current,
1555 RegisterMap::UpdateMap::include,
1556 RegisterMap::ProcessFrames::include,
1557 RegisterMap::WalkContinuation::skip);
1558 frame stubFrame = current->last_frame();
1559 assert(stubFrame.is_runtime_frame(), "must be");
1560 frame callerFrame = stubFrame.sender(®_map);
1561 assert(callerFrame.is_compiled_frame(), "must be");
1562
1563 // Install exception and return forward entry.
1564 address res = SharedRuntime::throw_AbstractMethodError_entry();
1565 JRT_BLOCK
1566 methodHandle callee(current, invoke.static_target(current));
1567 if (!callee.is_null()) {
1568 oop recv = callerFrame.retrieve_receiver(®_map);
1569 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1570 res = StubRoutines::forward_exception_entry();
1571 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1572 }
1573 JRT_BLOCK_END
1574 return res;
1575 JRT_END
1576
1577 // return verified_code_entry if interp_only_mode is not set for the current thread;
1578 // otherwise return c2i entry.
1579 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1580 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1581 // In interp_only_mode we need to go to the interpreted entry
1582 // The c2i won't patch in this mode -- see fixup_callers_callsite
1583 return callee_method->get_c2i_entry();
1584 }
1585 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1586 return callee_method->verified_code_entry();
1587 }
1588
1589 // resolve a static call and patch code
1590 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1591 methodHandle callee_method;
1592 bool enter_special = false;
1593 JRT_BLOCK
1594 callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1595 current->set_vm_result_metadata(callee_method());
1596 JRT_BLOCK_END
1597 // return compiled code entry point after potential safepoints
1598 return get_resolved_entry(current, callee_method);
1599 JRT_END
1600
1601 // resolve virtual call and update inline cache to monomorphic
1602 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1603 methodHandle callee_method;
1604 JRT_BLOCK
1605 callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1606 current->set_vm_result_metadata(callee_method());
1607 JRT_BLOCK_END
1608 // return compiled code entry point after potential safepoints
1609 return get_resolved_entry(current, callee_method);
1610 JRT_END
1611
1612
1613 // Resolve a virtual call that can be statically bound (e.g., always
1614 // monomorphic, so it has no inline cache). Patch code to resolved target.
1615 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1616 methodHandle callee_method;
1617 JRT_BLOCK
1618 callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1619 current->set_vm_result_metadata(callee_method());
1620 JRT_BLOCK_END
1621 // return compiled code entry point after potential safepoints
1622 return get_resolved_entry(current, callee_method);
1623 JRT_END
1624
1625 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1626 JavaThread* current = THREAD;
1627 ResourceMark rm(current);
1628 CallInfo call_info;
1629 Bytecodes::Code bc;
1630
1631 // receiver is null for static calls. An exception is thrown for null
1632 // receivers for non-static calls
1633 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1634
1635 methodHandle callee_method(current, call_info.selected_method());
1636
1637 #ifndef PRODUCT
1638 AtomicAccess::inc(&_ic_miss_ctr);
1639
1640 // Statistics & Tracing
1641 if (TraceCallFixup) {
1642 ResourceMark rm(current);
1643 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1644 callee_method->print_short_name(tty);
1645 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1646 }
1647
1648 if (ICMissHistogram) {
1649 MutexLocker m(VMStatistic_lock);
1650 RegisterMap reg_map(current,
1651 RegisterMap::UpdateMap::skip,
1652 RegisterMap::ProcessFrames::include,
1653 RegisterMap::WalkContinuation::skip);
1654 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1655 // produce statistics under the lock
1656 trace_ic_miss(f.pc());
1657 }
1658 #endif
1659
1660 // install an event collector so that when a vtable stub is created the
1661 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1662 // event can't be posted when the stub is created as locks are held
1663 // - instead the event will be deferred until the event collector goes
1664 // out of scope.
1665 JvmtiDynamicCodeEventCollector event_collector;
1666
1667 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1668 RegisterMap reg_map(current,
1669 RegisterMap::UpdateMap::skip,
1670 RegisterMap::ProcessFrames::include,
1671 RegisterMap::WalkContinuation::skip);
1672 frame caller_frame = current->last_frame().sender(®_map);
1673 CodeBlob* cb = caller_frame.cb();
1674 nmethod* caller_nm = cb->as_nmethod();
1675
1676 CompiledICLocker ml(caller_nm);
1677 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1678 inline_cache->update(&call_info, receiver()->klass());
1679
1680 return callee_method;
1681 }
1682
1683 //
1684 // Resets a call-site in compiled code so it will get resolved again.
1685 // This routines handles both virtual call sites, optimized virtual call
1686 // sites, and static call sites. Typically used to change a call sites
1687 // destination from compiled to interpreted.
1688 //
1689 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1690 JavaThread* current = THREAD;
1691 ResourceMark rm(current);
1692 RegisterMap reg_map(current,
1693 RegisterMap::UpdateMap::skip,
1694 RegisterMap::ProcessFrames::include,
1695 RegisterMap::WalkContinuation::skip);
1696 frame stub_frame = current->last_frame();
1697 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1698 frame caller = stub_frame.sender(®_map);
1699
1700 // Do nothing if the frame isn't a live compiled frame.
1701 // nmethod could be deoptimized by the time we get here
1702 // so no update to the caller is needed.
1703
1704 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1705 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1706
1707 address pc = caller.pc();
1708
1709 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1710 assert(caller_nm != nullptr, "did not find caller nmethod");
1711
1712 // Default call_addr is the location of the "basic" call.
1713 // Determine the address of the call we a reresolving. With
1714 // Inline Caches we will always find a recognizable call.
1715 // With Inline Caches disabled we may or may not find a
1716 // recognizable call. We will always find a call for static
1717 // calls and for optimized virtual calls. For vanilla virtual
1718 // calls it depends on the state of the UseInlineCaches switch.
1719 //
1720 // With Inline Caches disabled we can get here for a virtual call
1721 // for two reasons:
1722 // 1 - calling an abstract method. The vtable for abstract methods
1723 // will run us thru handle_wrong_method and we will eventually
1724 // end up in the interpreter to throw the ame.
1725 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1726 // call and between the time we fetch the entry address and
1727 // we jump to it the target gets deoptimized. Similar to 1
1728 // we will wind up in the interprter (thru a c2i with c2).
1729 //
1730 CompiledICLocker ml(caller_nm);
1731 address call_addr = caller_nm->call_instruction_address(pc);
1732
1733 if (call_addr != nullptr) {
1734 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1735 // bytes back in the instruction stream so we must also check for reloc info.
1736 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1737 bool ret = iter.next(); // Get item
1738 if (ret) {
1739 switch (iter.type()) {
1740 case relocInfo::static_call_type:
1741 case relocInfo::opt_virtual_call_type: {
1742 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1743 cdc->set_to_clean();
1744 break;
1745 }
1746
1747 case relocInfo::virtual_call_type: {
1748 // compiled, dispatched call (which used to call an interpreted method)
1749 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1750 inline_cache->set_to_clean();
1751 break;
1752 }
1753 default:
1754 break;
1755 }
1756 }
1757 }
1758 }
1759
1760 methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1761
1762
1763 #ifndef PRODUCT
1764 AtomicAccess::inc(&_wrong_method_ctr);
1765
1766 if (TraceCallFixup) {
1767 ResourceMark rm(current);
1768 tty->print("handle_wrong_method reresolving call to");
1769 callee_method->print_short_name(tty);
1770 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1771 }
1772 #endif
1773
1774 return callee_method;
1775 }
1776
1777 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1778 // The faulting unsafe accesses should be changed to throw the error
1779 // synchronously instead. Meanwhile the faulting instruction will be
1780 // skipped over (effectively turning it into a no-op) and an
1781 // asynchronous exception will be raised which the thread will
1782 // handle at a later point. If the instruction is a load it will
1783 // return garbage.
1784
1785 // Request an async exception.
1786 thread->set_pending_unsafe_access_error();
1787
1788 // Return address of next instruction to execute.
1789 return next_pc;
1790 }
1791
1792 #ifdef ASSERT
1793 void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
1794 const BasicType* sig_bt,
1795 const VMRegPair* regs) {
1796 ResourceMark rm;
1797 const int total_args_passed = method->size_of_parameters();
1798 const VMRegPair* regs_with_member_name = regs;
1799 VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
1800
1801 const int member_arg_pos = total_args_passed - 1;
1802 assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1803 assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1804
1805 java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
1806
1807 for (int i = 0; i < member_arg_pos; i++) {
1808 VMReg a = regs_with_member_name[i].first();
1809 VMReg b = regs_without_member_name[i].first();
1810 assert(a->value() == b->value(), "register allocation mismatch: a= %d, b= %d", a->value(), b->value());
1811 }
1812 assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
1813 }
1814 #endif
1815
1816 // ---------------------------------------------------------------------------
1817 // We are calling the interpreter via a c2i. Normally this would mean that
1818 // we were called by a compiled method. However we could have lost a race
1819 // where we went int -> i2c -> c2i and so the caller could in fact be
1820 // interpreted. If the caller is compiled we attempt to patch the caller
1821 // so he no longer calls into the interpreter.
1822 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1823 AARCH64_PORT_ONLY(assert(pauth_ptr_is_raw(caller_pc), "should be raw"));
1824
1825 // It's possible that deoptimization can occur at a call site which hasn't
1826 // been resolved yet, in which case this function will be called from
1827 // an nmethod that has been patched for deopt and we can ignore the
1828 // request for a fixup.
1829 // Also it is possible that we lost a race in that from_compiled_entry
1830 // is now back to the i2c in that case we don't need to patch and if
1831 // we did we'd leap into space because the callsite needs to use
1832 // "to interpreter" stub in order to load up the Method*. Don't
1833 // ask me how I know this...
1834
1835 // Result from nmethod::is_unloading is not stable across safepoints.
1836 NoSafepointVerifier nsv;
1837
1838 nmethod* callee = method->code();
1839 if (callee == nullptr) {
1840 return;
1841 }
1842
1843 // write lock needed because we might patch call site by set_to_clean()
1844 // and is_unloading() can modify nmethod's state
1845 MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, JavaThread::current()));
1846
1847 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1848 if (cb == nullptr || !cb->is_nmethod() || !callee->is_in_use() || callee->is_unloading()) {
1849 return;
1850 }
1851
1852 // The check above makes sure this is an nmethod.
1853 nmethod* caller = cb->as_nmethod();
1854
1855 // Get the return PC for the passed caller PC.
1856 address return_pc = caller_pc + frame::pc_return_offset;
1857
1858 if (!caller->is_in_use() || !NativeCall::is_call_before(return_pc)) {
1859 return;
1860 }
1861
1862 // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1863 CompiledICLocker ic_locker(caller);
1864 ResourceMark rm;
1865
1866 // If we got here through a static call or opt_virtual call, then we know where the
1867 // call address would be; let's peek at it
1868 address callsite_addr = (address)nativeCall_before(return_pc);
1869 RelocIterator iter(caller, callsite_addr, callsite_addr + 1);
1870 if (!iter.next()) {
1871 // No reloc entry found; not a static or optimized virtual call
1872 return;
1873 }
1874
1875 relocInfo::relocType type = iter.reloc()->type();
1876 if (type != relocInfo::static_call_type &&
1877 type != relocInfo::opt_virtual_call_type) {
1878 return;
1879 }
1880
1881 CompiledDirectCall* callsite = CompiledDirectCall::before(return_pc);
1882 callsite->set_to_clean();
1883 JRT_END
1884
1885
1886 // same as JVM_Arraycopy, but called directly from compiled code
1887 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
1888 oopDesc* dest, jint dest_pos,
1889 jint length,
1890 JavaThread* current)) {
1891 #ifndef PRODUCT
1892 _slow_array_copy_ctr++;
1893 #endif
1894 // Check if we have null pointers
1895 if (src == nullptr || dest == nullptr) {
1896 THROW(vmSymbols::java_lang_NullPointerException());
1897 }
1898 // Do the copy. The casts to arrayOop are necessary to the copy_array API,
1899 // even though the copy_array API also performs dynamic checks to ensure
1900 // that src and dest are truly arrays (and are conformable).
1901 // The copy_array mechanism is awkward and could be removed, but
1902 // the compilers don't call this function except as a last resort,
1903 // so it probably doesn't matter.
1904 src->klass()->copy_array((arrayOopDesc*)src, src_pos,
1905 (arrayOopDesc*)dest, dest_pos,
1906 length, current);
1907 }
1908 JRT_END
1909
1910 // The caller of generate_class_cast_message() (or one of its callers)
1911 // must use a ResourceMark in order to correctly free the result.
1912 char* SharedRuntime::generate_class_cast_message(
1913 JavaThread* thread, Klass* caster_klass) {
1914
1915 // Get target class name from the checkcast instruction
1916 vframeStream vfst(thread, true);
1917 assert(!vfst.at_end(), "Java frame must exist");
1918 Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
1919 constantPoolHandle cpool(thread, vfst.method()->constants());
1920 Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index());
1921 Symbol* target_klass_name = nullptr;
1922 if (target_klass == nullptr) {
1923 // This klass should be resolved, but just in case, get the name in the klass slot.
1924 target_klass_name = cpool->klass_name_at(cc.index());
1925 }
1926 return generate_class_cast_message(caster_klass, target_klass, target_klass_name);
1927 }
1928
1929
1930 // The caller of generate_class_cast_message() (or one of its callers)
1931 // must use a ResourceMark in order to correctly free the result.
1932 char* SharedRuntime::generate_class_cast_message(
1933 Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) {
1934 const char* caster_name = caster_klass->external_name();
1935
1936 assert(target_klass != nullptr || target_klass_name != nullptr, "one must be provided");
1937 const char* target_name = target_klass == nullptr ? target_klass_name->as_klass_external_name() :
1938 target_klass->external_name();
1939
1940 size_t msglen = strlen(caster_name) + strlen("class ") + strlen(" cannot be cast to class ") + strlen(target_name) + 1;
1941
1942 const char* caster_klass_description = "";
1943 const char* target_klass_description = "";
1944 const char* klass_separator = "";
1945 if (target_klass != nullptr && caster_klass->module() == target_klass->module()) {
1946 caster_klass_description = caster_klass->joint_in_module_of_loader(target_klass);
1947 } else {
1948 caster_klass_description = caster_klass->class_in_module_of_loader();
1949 target_klass_description = (target_klass != nullptr) ? target_klass->class_in_module_of_loader() : "";
1950 klass_separator = (target_klass != nullptr) ? "; " : "";
1951 }
1952
1953 // add 3 for parenthesis and preceding space
1954 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1955
1956 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1957 if (message == nullptr) {
1958 // Shouldn't happen, but don't cause even more problems if it does
1959 message = const_cast<char*>(caster_klass->external_name());
1960 } else {
1961 jio_snprintf(message,
1962 msglen,
1963 "class %s cannot be cast to class %s (%s%s%s)",
1964 caster_name,
1965 target_name,
1966 caster_klass_description,
1967 klass_separator,
1968 target_klass_description
1969 );
1970 }
1971 return message;
1972 }
1973
1974 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1975 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
1976 JRT_END
1977
1978 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
1979 if (!SafepointSynchronize::is_synchronizing()) {
1980 // Only try quick_enter() if we're not trying to reach a safepoint
1981 // so that the calling thread reaches the safepoint more quickly.
1982 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
1983 return;
1984 }
1985 }
1986 // NO_ASYNC required because an async exception on the state transition destructor
1987 // would leave you with the lock held and it would never be released.
1988 // The normal monitorenter NullPointerException is thrown without acquiring a lock
1989 // and the model is that an exception implies the method failed.
1990 JRT_BLOCK_NO_ASYNC
1991 Handle h_obj(THREAD, obj);
1992 ObjectSynchronizer::enter(h_obj, lock, current);
1993 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
1994 JRT_BLOCK_END
1995 }
1996
1997 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
1998 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
1999 SharedRuntime::monitor_enter_helper(obj, lock, current);
2000 JRT_END
2001
2002 void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2003 assert(JavaThread::current() == current, "invariant");
2004 // Exit must be non-blocking, and therefore no exceptions can be thrown.
2005 ExceptionMark em(current);
2006
2007 // Check if C2_MacroAssembler::fast_unlock() or
2008 // C2_MacroAssembler::fast_unlock() unlocked an inflated
2009 // monitor before going slow path. Since there is no safepoint
2010 // polling when calling into the VM, we can be sure that the monitor
2011 // hasn't been deallocated.
2012 ObjectMonitor* m = current->unlocked_inflated_monitor();
2013 if (m != nullptr) {
2014 assert(!m->has_owner(current), "must be");
2015 current->clear_unlocked_inflated_monitor();
2016
2017 // We need to reacquire the lock before we can call ObjectSynchronizer::exit().
2018 if (!m->try_enter(current, /*check_for_recursion*/ false)) {
2019 // Some other thread acquired the lock (or the monitor was
2020 // deflated). Either way we are done.
2021 return;
2022 }
2023 }
2024
2025 // The object could become unlocked through a JNI call, which we have no other checks for.
2026 // Give a fatal message if CheckJNICalls. Otherwise we ignore it.
2027 if (obj->is_unlocked()) {
2028 if (CheckJNICalls) {
2029 fatal("Object has been unlocked by JNI");
2030 }
2031 return;
2032 }
2033 ObjectSynchronizer::exit(obj, lock, current);
2034 }
2035
2036 // Handles the uncommon cases of monitor unlocking in compiled code
2037 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2038 assert(current == JavaThread::current(), "pre-condition");
2039 SharedRuntime::monitor_exit_helper(obj, lock, current);
2040 JRT_END
2041
2042 #ifndef PRODUCT
2043
2044 void SharedRuntime::print_statistics() {
2045 ttyLocker ttyl;
2046 if (xtty != nullptr) xtty->head("statistics type='SharedRuntime'");
2047
2048 SharedRuntime::print_ic_miss_histogram();
2049
2050 // Dump the JRT_ENTRY counters
2051 if (_new_instance_ctr) tty->print_cr("%5u new instance requires GC", _new_instance_ctr);
2052 if (_new_array_ctr) tty->print_cr("%5u new array requires GC", _new_array_ctr);
2053 if (_multi2_ctr) tty->print_cr("%5u multianewarray 2 dim", _multi2_ctr);
2054 if (_multi3_ctr) tty->print_cr("%5u multianewarray 3 dim", _multi3_ctr);
2055 if (_multi4_ctr) tty->print_cr("%5u multianewarray 4 dim", _multi4_ctr);
2056 if (_multi5_ctr) tty->print_cr("%5u multianewarray 5 dim", _multi5_ctr);
2057
2058 tty->print_cr("%5u inline cache miss in compiled", _ic_miss_ctr);
2059 tty->print_cr("%5u wrong method", _wrong_method_ctr);
2060 tty->print_cr("%5u unresolved static call site", _resolve_static_ctr);
2061 tty->print_cr("%5u unresolved virtual call site", _resolve_virtual_ctr);
2062 tty->print_cr("%5u unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2063
2064 if (_mon_enter_stub_ctr) tty->print_cr("%5u monitor enter stub", _mon_enter_stub_ctr);
2065 if (_mon_exit_stub_ctr) tty->print_cr("%5u monitor exit stub", _mon_exit_stub_ctr);
2066 if (_mon_enter_ctr) tty->print_cr("%5u monitor enter slow", _mon_enter_ctr);
2067 if (_mon_exit_ctr) tty->print_cr("%5u monitor exit slow", _mon_exit_ctr);
2068 if (_partial_subtype_ctr) tty->print_cr("%5u slow partial subtype", _partial_subtype_ctr);
2069 if (_jbyte_array_copy_ctr) tty->print_cr("%5u byte array copies", _jbyte_array_copy_ctr);
2070 if (_jshort_array_copy_ctr) tty->print_cr("%5u short array copies", _jshort_array_copy_ctr);
2071 if (_jint_array_copy_ctr) tty->print_cr("%5u int array copies", _jint_array_copy_ctr);
2072 if (_jlong_array_copy_ctr) tty->print_cr("%5u long array copies", _jlong_array_copy_ctr);
2073 if (_oop_array_copy_ctr) tty->print_cr("%5u oop array copies", _oop_array_copy_ctr);
2074 if (_checkcast_array_copy_ctr) tty->print_cr("%5u checkcast array copies", _checkcast_array_copy_ctr);
2075 if (_unsafe_array_copy_ctr) tty->print_cr("%5u unsafe array copies", _unsafe_array_copy_ctr);
2076 if (_generic_array_copy_ctr) tty->print_cr("%5u generic array copies", _generic_array_copy_ctr);
2077 if (_slow_array_copy_ctr) tty->print_cr("%5u slow array copies", _slow_array_copy_ctr);
2078 if (_find_handler_ctr) tty->print_cr("%5u find exception handler", _find_handler_ctr);
2079 if (_rethrow_ctr) tty->print_cr("%5u rethrow handler", _rethrow_ctr);
2080 if (_unsafe_set_memory_ctr) tty->print_cr("%5u unsafe set memorys", _unsafe_set_memory_ctr);
2081
2082 AdapterHandlerLibrary::print_statistics();
2083
2084 if (xtty != nullptr) xtty->tail("statistics");
2085 }
2086
2087 inline double percent(int64_t x, int64_t y) {
2088 return 100.0 * (double)x / (double)MAX2(y, (int64_t)1);
2089 }
2090
2091 class MethodArityHistogram {
2092 public:
2093 enum { MAX_ARITY = 256 };
2094 private:
2095 static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args
2096 static uint64_t _size_histogram[MAX_ARITY]; // histogram of arg size in words
2097 static uint64_t _total_compiled_calls;
2098 static uint64_t _max_compiled_calls_per_method;
2099 static int _max_arity; // max. arity seen
2100 static int _max_size; // max. arg size seen
2101
2102 static void add_method_to_histogram(nmethod* nm) {
2103 Method* method = (nm == nullptr) ? nullptr : nm->method();
2104 if (method != nullptr) {
2105 ArgumentCount args(method->signature());
2106 int arity = args.size() + (method->is_static() ? 0 : 1);
2107 int argsize = method->size_of_parameters();
2108 arity = MIN2(arity, MAX_ARITY-1);
2109 argsize = MIN2(argsize, MAX_ARITY-1);
2110 uint64_t count = (uint64_t)method->compiled_invocation_count();
2111 _max_compiled_calls_per_method = count > _max_compiled_calls_per_method ? count : _max_compiled_calls_per_method;
2112 _total_compiled_calls += count;
2113 _arity_histogram[arity] += count;
2114 _size_histogram[argsize] += count;
2115 _max_arity = MAX2(_max_arity, arity);
2116 _max_size = MAX2(_max_size, argsize);
2117 }
2118 }
2119
2120 void print_histogram_helper(int n, uint64_t* histo, const char* name) {
2121 const int N = MIN2(9, n);
2122 double sum = 0;
2123 double weighted_sum = 0;
2124 for (int i = 0; i <= n; i++) { sum += (double)histo[i]; weighted_sum += (double)(i*histo[i]); }
2125 if (sum >= 1) { // prevent divide by zero or divide overflow
2126 double rest = sum;
2127 double percent = sum / 100;
2128 for (int i = 0; i <= N; i++) {
2129 rest -= (double)histo[i];
2130 tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], (double)histo[i] / percent);
2131 }
2132 tty->print_cr("rest: " INT64_FORMAT_W(12) " (%5.1f%%)", (int64_t)rest, rest / percent);
2133 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2134 tty->print_cr("(total # of compiled calls = " INT64_FORMAT_W(14) ")", _total_compiled_calls);
2135 tty->print_cr("(max # of compiled calls = " INT64_FORMAT_W(14) ")", _max_compiled_calls_per_method);
2136 } else {
2137 tty->print_cr("Histogram generation failed for %s. n = %d, sum = %7.5f", name, n, sum);
2138 }
2139 }
2140
2141 void print_histogram() {
2142 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2143 print_histogram_helper(_max_arity, _arity_histogram, "arity");
2144 tty->print_cr("\nHistogram of parameter block size (in words, incl. rcvr):");
2145 print_histogram_helper(_max_size, _size_histogram, "size");
2146 tty->cr();
2147 }
2148
2149 public:
2150 MethodArityHistogram() {
2151 // Take the Compile_lock to protect against changes in the CodeBlob structures
2152 MutexLocker mu1(Compile_lock, Mutex::_safepoint_check_flag);
2153 // Take the CodeCache_lock to protect against changes in the CodeHeap structure
2154 MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2155 _max_arity = _max_size = 0;
2156 _total_compiled_calls = 0;
2157 _max_compiled_calls_per_method = 0;
2158 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2159 CodeCache::nmethods_do(add_method_to_histogram);
2160 print_histogram();
2161 }
2162 };
2163
2164 uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2165 uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2166 uint64_t MethodArityHistogram::_total_compiled_calls;
2167 uint64_t MethodArityHistogram::_max_compiled_calls_per_method;
2168 int MethodArityHistogram::_max_arity;
2169 int MethodArityHistogram::_max_size;
2170
2171 void SharedRuntime::print_call_statistics(uint64_t comp_total) {
2172 tty->print_cr("Calls from compiled code:");
2173 int64_t total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2174 int64_t mono_c = _nof_normal_calls - _nof_megamorphic_calls;
2175 int64_t mono_i = _nof_interface_calls;
2176 tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%) total non-inlined ", total);
2177 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
2178 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2179 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
2180 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2181 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
2182 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2183 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
2184 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2185 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2186 tty->cr();
2187 tty->print_cr("Note 1: counter updates are not MT-safe.");
2188 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2189 tty->print_cr(" %% in nested categories are relative to their category");
2190 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2191 tty->cr();
2192
2193 MethodArityHistogram h;
2194 }
2195 #endif
2196
2197 #ifndef PRODUCT
2198 static int _lookups; // number of calls to lookup
2199 static int _equals; // number of buckets checked with matching hash
2200 static int _archived_hits; // number of successful lookups in archived table
2201 static int _runtime_hits; // number of successful lookups in runtime table
2202 #endif
2203
2204 // A simple wrapper class around the calling convention information
2205 // that allows sharing of adapters for the same calling convention.
2206 class AdapterFingerPrint : public MetaspaceObj {
2207 private:
2208 enum {
2209 _basic_type_bits = 4,
2210 _basic_type_mask = right_n_bits(_basic_type_bits),
2211 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2212 };
2213 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2214 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2215
2216 int _length;
2217
2218 static int data_offset() { return sizeof(AdapterFingerPrint); }
2219 int* data_pointer() {
2220 return (int*)((address)this + data_offset());
2221 }
2222
2223 // Private construtor. Use allocate() to get an instance.
2224 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt, int len) {
2225 int* data = data_pointer();
2226 // Pack the BasicTypes with 8 per int
2227 assert(len == length(total_args_passed), "sanity");
2228 _length = len;
2229 int sig_index = 0;
2230 for (int index = 0; index < _length; index++) {
2231 int value = 0;
2232 for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2233 int bt = adapter_encoding(sig_bt[sig_index++]);
2234 assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2235 value = (value << _basic_type_bits) | bt;
2236 }
2237 data[index] = value;
2238 }
2239 }
2240
2241 // Call deallocate instead
2242 ~AdapterFingerPrint() {
2243 ShouldNotCallThis();
2244 }
2245
2246 static int length(int total_args) {
2247 return (total_args + (_basic_types_per_int-1)) / _basic_types_per_int;
2248 }
2249
2250 static int compute_size_in_words(int len) {
2251 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(int)));
2252 }
2253
2254 // Remap BasicTypes that are handled equivalently by the adapters.
2255 // These are correct for the current system but someday it might be
2256 // necessary to make this mapping platform dependent.
2257 static int adapter_encoding(BasicType in) {
2258 switch (in) {
2259 case T_BOOLEAN:
2260 case T_BYTE:
2261 case T_SHORT:
2262 case T_CHAR:
2263 // There are all promoted to T_INT in the calling convention
2264 return T_INT;
2265
2266 case T_OBJECT:
2267 case T_ARRAY:
2268 // In other words, we assume that any register good enough for
2269 // an int or long is good enough for a managed pointer.
2270 #ifdef _LP64
2271 return T_LONG;
2272 #else
2273 return T_INT;
2274 #endif
2275
2276 case T_INT:
2277 case T_LONG:
2278 case T_FLOAT:
2279 case T_DOUBLE:
2280 case T_VOID:
2281 return in;
2282
2283 default:
2284 ShouldNotReachHere();
2285 return T_CONFLICT;
2286 }
2287 }
2288
2289 void* operator new(size_t size, size_t fp_size) throw() {
2290 assert(fp_size >= size, "sanity check");
2291 void* p = AllocateHeap(fp_size, mtCode);
2292 memset(p, 0, fp_size);
2293 return p;
2294 }
2295
2296 template<typename Function>
2297 void iterate_args(Function function) {
2298 for (int i = 0; i < length(); i++) {
2299 unsigned val = (unsigned)value(i);
2300 // args are packed so that first/lower arguments are in the highest
2301 // bits of each int value, so iterate from highest to the lowest
2302 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2303 unsigned v = (val >> j) & _basic_type_mask;
2304 if (v == 0) {
2305 continue;
2306 }
2307 function(v);
2308 }
2309 }
2310 }
2311
2312 public:
2313 static AdapterFingerPrint* allocate(int total_args_passed, BasicType* sig_bt) {
2314 int len = length(total_args_passed);
2315 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2316 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(total_args_passed, sig_bt, len);
2317 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2318 return afp;
2319 }
2320
2321 static void deallocate(AdapterFingerPrint* fp) {
2322 FreeHeap(fp);
2323 }
2324
2325 int value(int index) {
2326 int* data = data_pointer();
2327 return data[index];
2328 }
2329
2330 int length() {
2331 return _length;
2332 }
2333
2334 unsigned int compute_hash() {
2335 int hash = 0;
2336 for (int i = 0; i < length(); i++) {
2337 int v = value(i);
2338 //Add arithmetic operation to the hash, like +3 to improve hashing
2339 hash = ((hash << 8) ^ v ^ (hash >> 5)) + 3;
2340 }
2341 return (unsigned int)hash;
2342 }
2343
2344 const char* as_string() {
2345 stringStream st;
2346 st.print("0x");
2347 for (int i = 0; i < length(); i++) {
2348 st.print("%x", value(i));
2349 }
2350 return st.as_string();
2351 }
2352
2353 const char* as_basic_args_string() {
2354 stringStream st;
2355 bool long_prev = false;
2356 iterate_args([&] (int arg) {
2357 if (long_prev) {
2358 long_prev = false;
2359 if (arg == T_VOID) {
2360 st.print("J");
2361 } else {
2362 st.print("L");
2363 }
2364 }
2365 switch (arg) {
2366 case T_INT: st.print("I"); break;
2367 case T_LONG: long_prev = true; break;
2368 case T_FLOAT: st.print("F"); break;
2369 case T_DOUBLE: st.print("D"); break;
2370 case T_VOID: break;
2371 default: ShouldNotReachHere();
2372 }
2373 });
2374 if (long_prev) {
2375 st.print("L");
2376 }
2377 return st.as_string();
2378 }
2379
2380 BasicType* as_basic_type(int& nargs) {
2381 nargs = 0;
2382 GrowableArray<BasicType> btarray;
2383 bool long_prev = false;
2384
2385 iterate_args([&] (int arg) {
2386 if (long_prev) {
2387 long_prev = false;
2388 if (arg == T_VOID) {
2389 btarray.append(T_LONG);
2390 } else {
2391 btarray.append(T_OBJECT); // it could be T_ARRAY; it shouldn't matter
2392 }
2393 }
2394 switch (arg) {
2395 case T_INT: // fallthrough
2396 case T_FLOAT: // fallthrough
2397 case T_DOUBLE:
2398 case T_VOID:
2399 btarray.append((BasicType)arg);
2400 break;
2401 case T_LONG:
2402 long_prev = true;
2403 break;
2404 default: ShouldNotReachHere();
2405 }
2406 });
2407
2408 if (long_prev) {
2409 btarray.append(T_OBJECT);
2410 }
2411
2412 nargs = btarray.length();
2413 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nargs);
2414 int index = 0;
2415 GrowableArrayIterator<BasicType> iter = btarray.begin();
2416 while (iter != btarray.end()) {
2417 sig_bt[index++] = *iter;
2418 ++iter;
2419 }
2420 assert(index == btarray.length(), "sanity check");
2421 #ifdef ASSERT
2422 {
2423 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(nargs, sig_bt);
2424 assert(this->equals(compare_fp), "sanity check");
2425 AdapterFingerPrint::deallocate(compare_fp);
2426 }
2427 #endif
2428 return sig_bt;
2429 }
2430
2431 bool equals(AdapterFingerPrint* other) {
2432 if (other->_length != _length) {
2433 return false;
2434 } else {
2435 for (int i = 0; i < _length; i++) {
2436 if (value(i) != other->value(i)) {
2437 return false;
2438 }
2439 }
2440 }
2441 return true;
2442 }
2443
2444 // methods required by virtue of being a MetaspaceObj
2445 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2446 int size() const { return compute_size_in_words(_length); }
2447 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2448
2449 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2450 NOT_PRODUCT(_equals++);
2451 return fp1->equals(fp2);
2452 }
2453
2454 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2455 return fp->compute_hash();
2456 }
2457 };
2458
2459 #if INCLUDE_CDS
2460 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2461 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2462 }
2463
2464 class ArchivedAdapterTable : public OffsetCompactHashtable<
2465 AdapterFingerPrint*,
2466 AdapterHandlerEntry*,
2467 adapter_fp_equals_compact_hashtable_entry> {};
2468 #endif // INCLUDE_CDS
2469
2470 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2471 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2472 AnyObj::C_HEAP, mtCode,
2473 AdapterFingerPrint::compute_hash,
2474 AdapterFingerPrint::equals>;
2475 static AdapterHandlerTable* _adapter_handler_table;
2476 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2477
2478 // Find a entry with the same fingerprint if it exists
2479 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(int total_args_passed, BasicType* sig_bt) {
2480 NOT_PRODUCT(_lookups++);
2481 assert_lock_strong(AdapterHandlerLibrary_lock);
2482 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2483 AdapterHandlerEntry* entry = nullptr;
2484 #if INCLUDE_CDS
2485 // if we are building the archive then the archived adapter table is
2486 // not valid and we need to use the ones added to the runtime table
2487 if (AOTCodeCache::is_using_adapter()) {
2488 // Search archived table first. It is read-only table so can be searched without lock
2489 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2490 #ifndef PRODUCT
2491 if (entry != nullptr) {
2492 _archived_hits++;
2493 }
2494 #endif
2495 }
2496 #endif // INCLUDE_CDS
2497 if (entry == nullptr) {
2498 assert_lock_strong(AdapterHandlerLibrary_lock);
2499 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2500 if (entry_p != nullptr) {
2501 entry = *entry_p;
2502 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2503 entry->fingerprint()->as_basic_args_string(), entry->fingerprint()->as_string(), entry->fingerprint()->compute_hash(),
2504 fp->as_basic_args_string(), fp->as_string(), fp->compute_hash());
2505 #ifndef PRODUCT
2506 _runtime_hits++;
2507 #endif
2508 }
2509 }
2510 AdapterFingerPrint::deallocate(fp);
2511 return entry;
2512 }
2513
2514 #ifndef PRODUCT
2515 static void print_table_statistics() {
2516 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2517 return sizeof(*key) + sizeof(*a);
2518 };
2519 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2520 ts.print(tty, "AdapterHandlerTable");
2521 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2522 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2523 int total_hits = _archived_hits + _runtime_hits;
2524 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2525 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2526 }
2527 #endif
2528
2529 // ---------------------------------------------------------------------------
2530 // Implementation of AdapterHandlerLibrary
2531 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2532 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2533 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2534 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2535 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2536 #if INCLUDE_CDS
2537 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2538 #endif // INCLUDE_CDS
2539 static const int AdapterHandlerLibrary_size = 16*K;
2540 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2541 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2542
2543 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2544 assert(_buffer != nullptr, "should be initialized");
2545 return _buffer;
2546 }
2547
2548 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2549 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2550 AdapterBlob* adapter_blob = entry->adapter_blob();
2551 char blob_id[256];
2552 jio_snprintf(blob_id,
2553 sizeof(blob_id),
2554 "%s(%s)",
2555 adapter_blob->name(),
2556 entry->fingerprint()->as_string());
2557 if (Forte::is_enabled()) {
2558 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2559 }
2560
2561 if (JvmtiExport::should_post_dynamic_code_generated()) {
2562 JvmtiExport::post_dynamic_code_generated(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2563 }
2564 }
2565 }
2566
2567 void AdapterHandlerLibrary::initialize() {
2568 {
2569 ResourceMark rm;
2570 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2571 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2572 }
2573
2574 #if INCLUDE_CDS
2575 // Link adapters in AOT Cache to their code in AOT Code Cache
2576 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2577 link_aot_adapters();
2578 lookup_simple_adapters();
2579 return;
2580 }
2581 #endif // INCLUDE_CDS
2582
2583 ResourceMark rm;
2584 {
2585 MutexLocker mu(AdapterHandlerLibrary_lock);
2586
2587 _no_arg_handler = create_adapter(0, nullptr);
2588
2589 BasicType obj_args[] = { T_OBJECT };
2590 _obj_arg_handler = create_adapter(1, obj_args);
2591
2592 BasicType int_args[] = { T_INT };
2593 _int_arg_handler = create_adapter(1, int_args);
2594
2595 BasicType obj_int_args[] = { T_OBJECT, T_INT };
2596 _obj_int_arg_handler = create_adapter(2, obj_int_args);
2597
2598 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2599 _obj_obj_arg_handler = create_adapter(2, obj_obj_args);
2600
2601 // we should always get an entry back but we don't have any
2602 // associated blob on Zero
2603 assert(_no_arg_handler != nullptr &&
2604 _obj_arg_handler != nullptr &&
2605 _int_arg_handler != nullptr &&
2606 _obj_int_arg_handler != nullptr &&
2607 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2608 }
2609
2610 // Outside of the lock
2611 #ifndef ZERO
2612 // no blobs to register when we are on Zero
2613 post_adapter_creation(_no_arg_handler);
2614 post_adapter_creation(_obj_arg_handler);
2615 post_adapter_creation(_int_arg_handler);
2616 post_adapter_creation(_obj_int_arg_handler);
2617 post_adapter_creation(_obj_obj_arg_handler);
2618 #endif // ZERO
2619 }
2620
2621 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2622 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2623 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2624 return AdapterHandlerEntry::allocate(id, fingerprint);
2625 }
2626
2627 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2628 int total_args_passed = method->size_of_parameters(); // All args on stack
2629 if (total_args_passed == 0) {
2630 return _no_arg_handler;
2631 } else if (total_args_passed == 1) {
2632 if (!method->is_static()) {
2633 return _obj_arg_handler;
2634 }
2635 switch (method->signature()->char_at(1)) {
2636 case JVM_SIGNATURE_CLASS:
2637 case JVM_SIGNATURE_ARRAY:
2638 return _obj_arg_handler;
2639 case JVM_SIGNATURE_INT:
2640 case JVM_SIGNATURE_BOOLEAN:
2641 case JVM_SIGNATURE_CHAR:
2642 case JVM_SIGNATURE_BYTE:
2643 case JVM_SIGNATURE_SHORT:
2644 return _int_arg_handler;
2645 }
2646 } else if (total_args_passed == 2 &&
2647 !method->is_static()) {
2648 switch (method->signature()->char_at(1)) {
2649 case JVM_SIGNATURE_CLASS:
2650 case JVM_SIGNATURE_ARRAY:
2651 return _obj_obj_arg_handler;
2652 case JVM_SIGNATURE_INT:
2653 case JVM_SIGNATURE_BOOLEAN:
2654 case JVM_SIGNATURE_CHAR:
2655 case JVM_SIGNATURE_BYTE:
2656 case JVM_SIGNATURE_SHORT:
2657 return _obj_int_arg_handler;
2658 }
2659 }
2660 return nullptr;
2661 }
2662
2663 class AdapterSignatureIterator : public SignatureIterator {
2664 private:
2665 BasicType stack_sig_bt[16];
2666 BasicType* sig_bt;
2667 int index;
2668
2669 public:
2670 AdapterSignatureIterator(Symbol* signature,
2671 fingerprint_t fingerprint,
2672 bool is_static,
2673 int total_args_passed) :
2674 SignatureIterator(signature, fingerprint),
2675 index(0)
2676 {
2677 sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2678 if (!is_static) { // Pass in receiver first
2679 sig_bt[index++] = T_OBJECT;
2680 }
2681 do_parameters_on(this);
2682 }
2683
2684 BasicType* basic_types() {
2685 return sig_bt;
2686 }
2687
2688 #ifdef ASSERT
2689 int slots() {
2690 return index;
2691 }
2692 #endif
2693
2694 private:
2695
2696 friend class SignatureIterator; // so do_parameters_on can call do_type
2697 void do_type(BasicType type) {
2698 sig_bt[index++] = type;
2699 if (type == T_LONG || type == T_DOUBLE) {
2700 sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots
2701 }
2702 }
2703 };
2704
2705
2706 const char* AdapterHandlerEntry::_entry_names[] = {
2707 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
2708 };
2709
2710 #ifdef ASSERT
2711 void AdapterHandlerLibrary::verify_adapter_sharing(int total_args_passed, BasicType* sig_bt, AdapterHandlerEntry* cached_entry) {
2712 // we can only check for the same code if there is any
2713 #ifndef ZERO
2714 AdapterHandlerEntry* comparison_entry = create_adapter(total_args_passed, sig_bt, true);
2715 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
2716 assert(comparison_entry->compare_code(cached_entry), "code must match");
2717 // Release the one just created
2718 AdapterHandlerEntry::deallocate(comparison_entry);
2719 # endif // ZERO
2720 }
2721 #endif /* ASSERT*/
2722
2723 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2724 assert(!method->is_abstract(), "abstract methods do not have adapters");
2725 // Use customized signature handler. Need to lock around updates to
2726 // the _adapter_handler_table (it is not safe for concurrent readers
2727 // and a single writer: this could be fixed if it becomes a
2728 // problem).
2729
2730 // Fast-path for trivial adapters
2731 AdapterHandlerEntry* entry = get_simple_adapter(method);
2732 if (entry != nullptr) {
2733 return entry;
2734 }
2735
2736 ResourceMark rm;
2737 bool new_entry = false;
2738
2739 // Fill in the signature array, for the calling-convention call.
2740 int total_args_passed = method->size_of_parameters(); // All args on stack
2741
2742 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2743 method->is_static(), total_args_passed);
2744 assert(si.slots() == total_args_passed, "");
2745 BasicType* sig_bt = si.basic_types();
2746 {
2747 MutexLocker mu(AdapterHandlerLibrary_lock);
2748
2749 // Lookup method signature's fingerprint
2750 entry = lookup(total_args_passed, sig_bt);
2751
2752 if (entry != nullptr) {
2753 #ifndef ZERO
2754 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
2755 #endif
2756 #ifdef ASSERT
2757 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
2758 verify_adapter_sharing(total_args_passed, sig_bt, entry);
2759 }
2760 #endif
2761 } else {
2762 entry = create_adapter(total_args_passed, sig_bt);
2763 if (entry != nullptr) {
2764 new_entry = true;
2765 }
2766 }
2767 }
2768
2769 // Outside of the lock
2770 if (new_entry) {
2771 post_adapter_creation(entry);
2772 }
2773 return entry;
2774 }
2775
2776 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
2777 ResourceMark rm;
2778 const char* name = AdapterHandlerLibrary::name(handler);
2779 const uint32_t id = AdapterHandlerLibrary::id(handler);
2780
2781 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
2782 if (blob != nullptr) {
2783 handler->set_adapter_blob(blob->as_adapter_blob());
2784 }
2785 }
2786
2787 #ifndef PRODUCT
2788 void AdapterHandlerLibrary::print_adapter_handler_info(outputStream* st, AdapterHandlerEntry* handler) {
2789 ttyLocker ttyl;
2790 ResourceMark rm;
2791 int insts_size;
2792 // on Zero the blob may be null
2793 handler->print_adapter_on(tty);
2794 AdapterBlob* adapter_blob = handler->adapter_blob();
2795 if (adapter_blob == nullptr) {
2796 return;
2797 }
2798 insts_size = adapter_blob->code_size();
2799 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
2800 handler->fingerprint()->as_basic_args_string(),
2801 handler->fingerprint()->as_string(), insts_size);
2802 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
2803 if (Verbose || PrintStubCode) {
2804 address first_pc = adapter_blob->content_begin();
2805 if (first_pc != nullptr) {
2806 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
2807 st->cr();
2808 }
2809 }
2810 }
2811 #endif // PRODUCT
2812
2813 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
2814 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
2815 entry_offset[AdapterBlob::I2C] = 0;
2816 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
2817 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
2818 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
2819 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
2820 } else {
2821 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
2822 }
2823 }
2824
2825 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
2826 int total_args_passed,
2827 BasicType* sig_bt,
2828 bool is_transient) {
2829 if (log_is_enabled(Info, perf, class, link)) {
2830 ClassLoader::perf_method_adapters_count()->inc();
2831 }
2832
2833 #ifndef ZERO
2834 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2835 CodeBuffer buffer(buf);
2836 short buffer_locs[20];
2837 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2838 sizeof(buffer_locs)/sizeof(relocInfo));
2839 MacroAssembler masm(&buffer);
2840 VMRegPair stack_regs[16];
2841 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2842
2843 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2844 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2845 address entry_address[AdapterBlob::ENTRY_COUNT];
2846 SharedRuntime::generate_i2c2i_adapters(&masm,
2847 total_args_passed,
2848 comp_args_on_stack,
2849 sig_bt,
2850 regs,
2851 entry_address);
2852 // On zero there is no code to save and no need to create a blob and
2853 // or relocate the handler.
2854 int entry_offset[AdapterBlob::ENTRY_COUNT];
2855 address_to_offset(entry_address, entry_offset);
2856 #ifdef ASSERT
2857 if (VerifyAdapterSharing) {
2858 handler->save_code(buf->code_begin(), buffer.insts_size());
2859 if (is_transient) {
2860 return true;
2861 }
2862 }
2863 #endif
2864 AdapterBlob* adapter_blob = AdapterBlob::create(&buffer, entry_offset);
2865 if (adapter_blob == nullptr) {
2866 // CodeCache is full, disable compilation
2867 // Ought to log this but compile log is only per compile thread
2868 // and we're some non descript Java thread.
2869 return false;
2870 }
2871 handler->set_adapter_blob(adapter_blob);
2872 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
2873 // try to save generated code
2874 const char* name = AdapterHandlerLibrary::name(handler);
2875 const uint32_t id = AdapterHandlerLibrary::id(handler);
2876 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
2877 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
2878 }
2879 #endif // ZERO
2880
2881 #ifndef PRODUCT
2882 // debugging support
2883 if (PrintAdapterHandlers || PrintStubCode) {
2884 print_adapter_handler_info(tty, handler);
2885 }
2886 #endif
2887
2888 return true;
2889 }
2890
2891 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(int total_args_passed,
2892 BasicType* sig_bt,
2893 bool is_transient) {
2894 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2895 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
2896 if (!generate_adapter_code(handler, total_args_passed, sig_bt, is_transient)) {
2897 AdapterHandlerEntry::deallocate(handler);
2898 return nullptr;
2899 }
2900 if (!is_transient) {
2901 assert_lock_strong(AdapterHandlerLibrary_lock);
2902 _adapter_handler_table->put(fp, handler);
2903 }
2904 return handler;
2905 }
2906
2907 #if INCLUDE_CDS
2908 void AdapterHandlerEntry::remove_unshareable_info() {
2909 #ifdef ASSERT
2910 _saved_code = nullptr;
2911 _saved_code_length = 0;
2912 #endif // ASSERT
2913 _adapter_blob = nullptr;
2914 _linked = false;
2915 }
2916
2917 class CopyAdapterTableToArchive : StackObj {
2918 private:
2919 CompactHashtableWriter* _writer;
2920 ArchiveBuilder* _builder;
2921 public:
2922 CopyAdapterTableToArchive(CompactHashtableWriter* writer) : _writer(writer),
2923 _builder(ArchiveBuilder::current())
2924 {}
2925
2926 bool do_entry(AdapterFingerPrint* fp, AdapterHandlerEntry* entry) {
2927 LogStreamHandle(Trace, aot) lsh;
2928 if (ArchiveBuilder::current()->has_been_archived((address)entry)) {
2929 assert(ArchiveBuilder::current()->has_been_archived((address)fp), "must be");
2930 AdapterFingerPrint* buffered_fp = ArchiveBuilder::current()->get_buffered_addr(fp);
2931 assert(buffered_fp != nullptr,"sanity check");
2932 AdapterHandlerEntry* buffered_entry = ArchiveBuilder::current()->get_buffered_addr(entry);
2933 assert(buffered_entry != nullptr,"sanity check");
2934
2935 uint hash = fp->compute_hash();
2936 u4 delta = _builder->buffer_to_offset_u4((address)buffered_entry);
2937 _writer->add(hash, delta);
2938 if (lsh.is_enabled()) {
2939 address fp_runtime_addr = (address)buffered_fp + ArchiveBuilder::current()->buffer_to_requested_delta();
2940 address entry_runtime_addr = (address)buffered_entry + ArchiveBuilder::current()->buffer_to_requested_delta();
2941 log_trace(aot)("Added fp=%p (%s), entry=%p to the archived adater table", buffered_fp, buffered_fp->as_basic_args_string(), buffered_entry);
2942 }
2943 } else {
2944 if (lsh.is_enabled()) {
2945 log_trace(aot)("Skipping adapter handler %p (fp=%s) as it is not archived", entry, fp->as_basic_args_string());
2946 }
2947 }
2948 return true;
2949 }
2950 };
2951
2952 void AdapterHandlerLibrary::dump_aot_adapter_table() {
2953 CompactHashtableStats stats;
2954 CompactHashtableWriter writer(_adapter_handler_table->number_of_entries(), &stats);
2955 CopyAdapterTableToArchive copy(&writer);
2956 _adapter_handler_table->iterate(©);
2957 writer.dump(&_aot_adapter_handler_table, "archived adapter table");
2958 }
2959
2960 void AdapterHandlerLibrary::serialize_shared_table_header(SerializeClosure* soc) {
2961 _aot_adapter_handler_table.serialize_header(soc);
2962 }
2963
2964 void AdapterHandlerLibrary::link_aot_adapter_handler(AdapterHandlerEntry* handler) {
2965 #ifdef ASSERT
2966 if (TestAOTAdapterLinkFailure) {
2967 return;
2968 }
2969 #endif
2970 lookup_aot_cache(handler);
2971 #ifndef PRODUCT
2972 // debugging support
2973 if (PrintAdapterHandlers || PrintStubCode) {
2974 print_adapter_handler_info(tty, handler);
2975 }
2976 #endif
2977 }
2978
2979 // This method is used during production run to link archived adapters (stored in AOT Cache)
2980 // to their code in AOT Code Cache
2981 void AdapterHandlerEntry::link() {
2982 ResourceMark rm;
2983 assert(_fingerprint != nullptr, "_fingerprint must not be null");
2984 bool generate_code = false;
2985 // Generate code only if AOTCodeCache is not available, or
2986 // caching adapters is disabled, or we fail to link
2987 // the AdapterHandlerEntry to its code in the AOTCodeCache
2988 if (AOTCodeCache::is_using_adapter()) {
2989 AdapterHandlerLibrary::link_aot_adapter_handler(this);
2990 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
2991 if (_adapter_blob == nullptr) {
2992 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
2993 generate_code = true;
2994 }
2995 } else {
2996 generate_code = true;
2997 }
2998 if (generate_code) {
2999 int nargs;
3000 BasicType* bt = _fingerprint->as_basic_type(nargs);
3001 if (!AdapterHandlerLibrary::generate_adapter_code(this, nargs, bt, /* is_transient */ false)) {
3002 // Don't throw exceptions during VM initialization because java.lang.* classes
3003 // might not have been initialized, causing problems when constructing the
3004 // Java exception object.
3005 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3006 }
3007 }
3008 if (_adapter_blob != nullptr) {
3009 post_adapter_creation(this);
3010 }
3011 assert(_linked, "AdapterHandlerEntry must now be linked");
3012 }
3013
3014 void AdapterHandlerLibrary::link_aot_adapters() {
3015 uint max_id = 0;
3016 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3017 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3018 * That implies adapter ids of the adapters in the cache may not be contiguous.
3019 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3020 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3021 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3022 */
3023 _aot_adapter_handler_table.iterate_all([&](AdapterHandlerEntry* entry) {
3024 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3025 entry->link();
3026 max_id = MAX2(max_id, entry->id());
3027 });
3028 // Set adapter id to the maximum id found in the AOTCache
3029 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3030 _id_counter = max_id;
3031 }
3032
3033 // This method is called during production run to lookup simple adapters
3034 // in the archived adapter handler table
3035 void AdapterHandlerLibrary::lookup_simple_adapters() {
3036 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3037
3038 MutexLocker mu(AdapterHandlerLibrary_lock);
3039 _no_arg_handler = lookup(0, nullptr);
3040
3041 BasicType obj_args[] = { T_OBJECT };
3042 _obj_arg_handler = lookup(1, obj_args);
3043
3044 BasicType int_args[] = { T_INT };
3045 _int_arg_handler = lookup(1, int_args);
3046
3047 BasicType obj_int_args[] = { T_OBJECT, T_INT };
3048 _obj_int_arg_handler = lookup(2, obj_int_args);
3049
3050 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
3051 _obj_obj_arg_handler = lookup(2, obj_obj_args);
3052
3053 assert(_no_arg_handler != nullptr &&
3054 _obj_arg_handler != nullptr &&
3055 _int_arg_handler != nullptr &&
3056 _obj_int_arg_handler != nullptr &&
3057 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3058 assert(_no_arg_handler->is_linked() &&
3059 _obj_arg_handler->is_linked() &&
3060 _int_arg_handler->is_linked() &&
3061 _obj_int_arg_handler->is_linked() &&
3062 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3063 }
3064 #endif // INCLUDE_CDS
3065
3066 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3067 LogStreamHandle(Trace, aot) lsh;
3068 if (lsh.is_enabled()) {
3069 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3070 lsh.cr();
3071 }
3072 it->push(&_fingerprint);
3073 }
3074
3075 AdapterHandlerEntry::~AdapterHandlerEntry() {
3076 if (_fingerprint != nullptr) {
3077 AdapterFingerPrint::deallocate(_fingerprint);
3078 _fingerprint = nullptr;
3079 }
3080 #ifdef ASSERT
3081 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3082 #endif
3083 FreeHeap(this);
3084 }
3085
3086
3087 #ifdef ASSERT
3088 // Capture the code before relocation so that it can be compared
3089 // against other versions. If the code is captured after relocation
3090 // then relative instructions won't be equivalent.
3091 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3092 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3093 _saved_code_length = length;
3094 memcpy(_saved_code, buffer, length);
3095 }
3096
3097
3098 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3099 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3100
3101 if (other->_saved_code_length != _saved_code_length) {
3102 return false;
3103 }
3104
3105 return memcmp(other->_saved_code, _saved_code, _saved_code_length) == 0;
3106 }
3107 #endif
3108
3109
3110 /**
3111 * Create a native wrapper for this native method. The wrapper converts the
3112 * Java-compiled calling convention to the native convention, handles
3113 * arguments, and transitions to native. On return from the native we transition
3114 * back to java blocking if a safepoint is in progress.
3115 */
3116 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
3117 ResourceMark rm;
3118 nmethod* nm = nullptr;
3119
3120 // Check if memory should be freed before allocation
3121 CodeCache::gc_on_allocation();
3122
3123 assert(method->is_native(), "must be native");
3124 assert(method->is_special_native_intrinsic() ||
3125 method->has_native_function(), "must have something valid to call!");
3126
3127 {
3128 // Perform the work while holding the lock, but perform any printing outside the lock
3129 MutexLocker mu(AdapterHandlerLibrary_lock);
3130 // See if somebody beat us to it
3131 if (method->code() != nullptr) {
3132 return;
3133 }
3134
3135 const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
3136 assert(compile_id > 0, "Must generate native wrapper");
3137
3138
3139 ResourceMark rm;
3140 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3141 if (buf != nullptr) {
3142 CodeBuffer buffer(buf);
3143
3144 if (method->is_continuation_enter_intrinsic()) {
3145 buffer.initialize_stubs_size(192);
3146 }
3147
3148 struct { double data[20]; } locs_buf;
3149 struct { double data[20]; } stubs_locs_buf;
3150 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3151 #if defined(AARCH64) || defined(PPC64)
3152 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3153 // in the constant pool to ensure ordering between the barrier and oops
3154 // accesses. For native_wrappers we need a constant.
3155 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3156 // static java call that is resolved in the runtime.
3157 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3158 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3159 }
3160 #endif
3161 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3162 MacroAssembler _masm(&buffer);
3163
3164 // Fill in the signature array, for the calling-convention call.
3165 const int total_args_passed = method->size_of_parameters();
3166
3167 VMRegPair stack_regs[16];
3168 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3169
3170 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3171 method->is_static(), total_args_passed);
3172 BasicType* sig_bt = si.basic_types();
3173 assert(si.slots() == total_args_passed, "");
3174 BasicType ret_type = si.return_type();
3175
3176 // Now get the compiled-Java arguments layout.
3177 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3178
3179 // Generate the compiled-to-native wrapper code
3180 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3181
3182 if (nm != nullptr) {
3183 {
3184 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3185 if (nm->make_in_use()) {
3186 method->set_code(method, nm);
3187 }
3188 }
3189
3190 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3191 if (directive->PrintAssemblyOption) {
3192 nm->print_code();
3193 }
3194 DirectivesStack::release(directive);
3195 }
3196 }
3197 } // Unlock AdapterHandlerLibrary_lock
3198
3199
3200 // Install the generated code.
3201 if (nm != nullptr) {
3202 const char *msg = method->is_static() ? "(static)" : "";
3203 CompileTask::print_ul(nm, msg);
3204 if (PrintCompilation) {
3205 ttyLocker ttyl;
3206 CompileTask::print(tty, nm, msg);
3207 }
3208 nm->post_compiled_method_load_event();
3209 }
3210 }
3211
3212 // -------------------------------------------------------------------------
3213 // Java-Java calling convention
3214 // (what you use when Java calls Java)
3215
3216 //------------------------------name_for_receiver----------------------------------
3217 // For a given signature, return the VMReg for parameter 0.
3218 VMReg SharedRuntime::name_for_receiver() {
3219 VMRegPair regs;
3220 BasicType sig_bt = T_OBJECT;
3221 (void) java_calling_convention(&sig_bt, ®s, 1);
3222 // Return argument 0 register. In the LP64 build pointers
3223 // take 2 registers, but the VM wants only the 'main' name.
3224 return regs.first();
3225 }
3226
3227 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
3228 // This method is returning a data structure allocating as a
3229 // ResourceObject, so do not put any ResourceMarks in here.
3230
3231 BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
3232 VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
3233 int cnt = 0;
3234 if (has_receiver) {
3235 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
3236 }
3237
3238 for (SignatureStream ss(sig); !ss.at_return_type(); ss.next()) {
3239 BasicType type = ss.type();
3240 sig_bt[cnt++] = type;
3241 if (is_double_word_type(type))
3242 sig_bt[cnt++] = T_VOID;
3243 }
3244
3245 if (has_appendix) {
3246 sig_bt[cnt++] = T_OBJECT;
3247 }
3248
3249 assert(cnt < 256, "grow table size");
3250
3251 int comp_args_on_stack;
3252 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt);
3253
3254 // the calling convention doesn't count out_preserve_stack_slots so
3255 // we must add that in to get "true" stack offsets.
3256
3257 if (comp_args_on_stack) {
3258 for (int i = 0; i < cnt; i++) {
3259 VMReg reg1 = regs[i].first();
3260 if (reg1->is_stack()) {
3261 // Yuck
3262 reg1 = reg1->bias(out_preserve_stack_slots());
3263 }
3264 VMReg reg2 = regs[i].second();
3265 if (reg2->is_stack()) {
3266 // Yuck
3267 reg2 = reg2->bias(out_preserve_stack_slots());
3268 }
3269 regs[i].set_pair(reg2, reg1);
3270 }
3271 }
3272
3273 // results
3274 *arg_size = cnt;
3275 return regs;
3276 }
3277
3278 // OSR Migration Code
3279 //
3280 // This code is used convert interpreter frames into compiled frames. It is
3281 // called from very start of a compiled OSR nmethod. A temp array is
3282 // allocated to hold the interesting bits of the interpreter frame. All
3283 // active locks are inflated to allow them to move. The displaced headers and
3284 // active interpreter locals are copied into the temp buffer. Then we return
3285 // back to the compiled code. The compiled code then pops the current
3286 // interpreter frame off the stack and pushes a new compiled frame. Then it
3287 // copies the interpreter locals and displaced headers where it wants.
3288 // Finally it calls back to free the temp buffer.
3289 //
3290 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3291
3292 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
3293 assert(current == JavaThread::current(), "pre-condition");
3294 JFR_ONLY(Jfr::check_and_process_sample_request(current);)
3295 // During OSR migration, we unwind the interpreted frame and replace it with a compiled
3296 // frame. The stack watermark code below ensures that the interpreted frame is processed
3297 // before it gets unwound. This is helpful as the size of the compiled frame could be
3298 // larger than the interpreted frame, which could result in the new frame not being
3299 // processed correctly.
3300 StackWatermarkSet::before_unwind(current);
3301
3302 //
3303 // This code is dependent on the memory layout of the interpreter local
3304 // array and the monitors. On all of our platforms the layout is identical
3305 // so this code is shared. If some platform lays the their arrays out
3306 // differently then this code could move to platform specific code or
3307 // the code here could be modified to copy items one at a time using
3308 // frame accessor methods and be platform independent.
3309
3310 frame fr = current->last_frame();
3311 assert(fr.is_interpreted_frame(), "");
3312 assert(fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks");
3313
3314 // Figure out how many monitors are active.
3315 int active_monitor_count = 0;
3316 for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
3317 kptr < fr.interpreter_frame_monitor_begin();
3318 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
3319 if (kptr->obj() != nullptr) active_monitor_count++;
3320 }
3321
3322 // QQQ we could place number of active monitors in the array so that compiled code
3323 // could double check it.
3324
3325 Method* moop = fr.interpreter_frame_method();
3326 int max_locals = moop->max_locals();
3327 // Allocate temp buffer, 1 word per local & 2 per active monitor
3328 int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size();
3329 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
3330
3331 // Copy the locals. Order is preserved so that loading of longs works.
3332 // Since there's no GC I can copy the oops blindly.
3333 assert(sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
3334 Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
3335 (HeapWord*)&buf[0],
3336 max_locals);
3337
3338 // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
3339 int i = max_locals;
3340 for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3341 kptr2 < fr.interpreter_frame_monitor_begin();
3342 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3343 if (kptr2->obj() != nullptr) { // Avoid 'holes' in the monitor array
3344 BasicLock *lock = kptr2->lock();
3345 if (UseObjectMonitorTable) {
3346 buf[i] = (intptr_t)lock->object_monitor_cache();
3347 }
3348 #ifdef ASSERT
3349 else {
3350 buf[i] = badDispHeaderOSR;
3351 }
3352 #endif
3353 i++;
3354 buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3355 }
3356 }
3357 assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3358
3359 RegisterMap map(current,
3360 RegisterMap::UpdateMap::skip,
3361 RegisterMap::ProcessFrames::include,
3362 RegisterMap::WalkContinuation::skip);
3363 frame sender = fr.sender(&map);
3364 if (sender.is_interpreted_frame()) {
3365 current->push_cont_fastpath(sender.sp());
3366 }
3367
3368 return buf;
3369 JRT_END
3370
3371 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3372 FREE_C_HEAP_ARRAY(intptr_t, buf);
3373 JRT_END
3374
3375 const char* AdapterHandlerLibrary::name(AdapterHandlerEntry* handler) {
3376 return handler->fingerprint()->as_basic_args_string();
3377 }
3378
3379 uint32_t AdapterHandlerLibrary::id(AdapterHandlerEntry* handler) {
3380 return handler->id();
3381 }
3382
3383 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3384 bool found = false;
3385 #if INCLUDE_CDS
3386 if (AOTCodeCache::is_using_adapter()) {
3387 auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3388 if (b == handler->adapter_blob()) {
3389 found = true;
3390 st->print("Adapter for signature: ");
3391 handler->print_adapter_on(st);
3392 return false; // abort iteration
3393 } else {
3394 return true; // keep looking
3395 }
3396 };
3397 _aot_adapter_handler_table.iterate(findblob_archived_table);
3398 }
3399 #endif // INCLUDE_CDS
3400 if (!found) {
3401 auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* handler) {
3402 if (b == handler->adapter_blob()) {
3403 found = true;
3404 st->print("Adapter for signature: ");
3405 handler->print_adapter_on(st);
3406 return false; // abort iteration
3407 } else {
3408 return true; // keep looking
3409 }
3410 };
3411 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3412 _adapter_handler_table->iterate(findblob_runtime_table);
3413 }
3414 assert(found, "Should have found handler");
3415 }
3416
3417 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3418 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3419 if (adapter_blob() != nullptr) {
3420 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3421 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3422 st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3423 if (get_c2i_no_clinit_check_entry() != nullptr) {
3424 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3425 }
3426 }
3427 st->cr();
3428 }
3429
3430 #ifndef PRODUCT
3431
3432 void AdapterHandlerLibrary::print_statistics() {
3433 print_table_statistics();
3434 }
3435
3436 #endif /* PRODUCT */
3437
3438 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3439 assert(current == JavaThread::current(), "pre-condition");
3440 StackOverflow* overflow_state = current->stack_overflow_state();
3441 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3442 overflow_state->set_reserved_stack_activation(current->stack_base());
3443 JRT_END
3444
3445 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
3446 ResourceMark rm(current);
3447 frame activation;
3448 nmethod* nm = nullptr;
3449 int count = 1;
3450
3451 assert(fr.is_java_frame(), "Must start on Java frame");
3452
3453 RegisterMap map(JavaThread::current(),
3454 RegisterMap::UpdateMap::skip,
3455 RegisterMap::ProcessFrames::skip,
3456 RegisterMap::WalkContinuation::skip); // don't walk continuations
3457 for (; !fr.is_first_frame(); fr = fr.sender(&map)) {
3458 if (!fr.is_java_frame()) {
3459 continue;
3460 }
3461
3462 Method* method = nullptr;
3463 bool found = false;
3464 if (fr.is_interpreted_frame()) {
3465 method = fr.interpreter_frame_method();
3466 if (method != nullptr && method->has_reserved_stack_access()) {
3467 found = true;
3468 }
3469 } else {
3470 CodeBlob* cb = fr.cb();
3471 if (cb != nullptr && cb->is_nmethod()) {
3472 nm = cb->as_nmethod();
3473 method = nm->method();
3474 for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != nullptr; sd = sd->sender()) {
3475 method = sd->method();
3476 if (method != nullptr && method->has_reserved_stack_access()) {
3477 found = true;
3478 }
3479 }
3480 }
3481 }
3482 if (found) {
3483 activation = fr;
3484 warning("Potentially dangerous stack overflow in "
3485 "ReservedStackAccess annotated method %s [%d]",
3486 method->name_and_sig_as_C_string(), count++);
3487 EventReservedStackActivation event;
3488 if (event.should_commit()) {
3489 event.set_method(method);
3490 event.commit();
3491 }
3492 }
3493 }
3494 return activation;
3495 }
3496
3497 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3498 // After any safepoint, just before going back to compiled code,
3499 // we inform the GC that we will be doing initializing writes to
3500 // this object in the future without emitting card-marks, so
3501 // GC may take any compensating steps.
3502
3503 oop new_obj = current->vm_result_oop();
3504 if (new_obj == nullptr) return;
3505
3506 BarrierSet *bs = BarrierSet::barrier_set();
3507 bs->on_slowpath_allocation_exit(current, new_obj);
3508 }