1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/archiveBuilder.hpp"
26 #include "cds/archiveUtils.inline.hpp"
27 #include "classfile/classLoader.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/stringTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/aotCodeCache.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/compiledIC.hpp"
35 #include "code/nmethod.inline.hpp"
36 #include "code/scopeDesc.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "compiler/abstractCompiler.hpp"
39 #include "compiler/compileBroker.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/interpreterRuntime.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/oopFactory.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "metaprogramming/primitiveConversions.hpp"
52 #include "oops/access.hpp"
53 #include "oops/fieldStreams.inline.hpp"
54 #include "oops/inlineKlass.inline.hpp"
55 #include "oops/klass.hpp"
56 #include "oops/method.inline.hpp"
57 #include "oops/objArrayKlass.hpp"
58 #include "oops/objArrayOop.inline.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "prims/forte.hpp"
61 #include "prims/jvmtiExport.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "prims/methodHandles.hpp"
64 #include "prims/nativeLookup.hpp"
65 #include "runtime/arguments.hpp"
66 #include "runtime/atomicAccess.hpp"
67 #include "runtime/basicLock.inline.hpp"
68 #include "runtime/frame.inline.hpp"
69 #include "runtime/handles.inline.hpp"
70 #include "runtime/init.hpp"
71 #include "runtime/interfaceSupport.inline.hpp"
72 #include "runtime/java.hpp"
73 #include "runtime/javaCalls.hpp"
74 #include "runtime/jniHandles.inline.hpp"
75 #include "runtime/osThread.hpp"
76 #include "runtime/perfData.hpp"
77 #include "runtime/sharedRuntime.hpp"
78 #include "runtime/signature.hpp"
79 #include "runtime/stackWatermarkSet.hpp"
80 #include "runtime/stubRoutines.hpp"
81 #include "runtime/synchronizer.inline.hpp"
82 #include "runtime/timerTrace.hpp"
83 #include "runtime/vframe.inline.hpp"
84 #include "runtime/vframeArray.hpp"
85 #include "runtime/vm_version.hpp"
86 #include "utilities/copy.hpp"
87 #include "utilities/dtrace.hpp"
88 #include "utilities/events.hpp"
89 #include "utilities/globalDefinitions.hpp"
90 #include "utilities/hashTable.hpp"
91 #include "utilities/macros.hpp"
92 #include "utilities/xmlstream.hpp"
93 #ifdef COMPILER1
94 #include "c1/c1_Runtime1.hpp"
95 #endif
96 #ifdef COMPILER2
97 #include "opto/runtime.hpp"
98 #endif
99 #if INCLUDE_JFR
100 #include "jfr/jfr.inline.hpp"
101 #endif
102
103 // Shared runtime stub routines reside in their own unique blob with a
104 // single entry point
105
106
107 #define SHARED_STUB_FIELD_DEFINE(name, type) \
108 type* SharedRuntime::BLOB_FIELD_NAME(name);
109 SHARED_STUBS_DO(SHARED_STUB_FIELD_DEFINE)
110 #undef SHARED_STUB_FIELD_DEFINE
111
112 nmethod* SharedRuntime::_cont_doYield_stub;
113
114 #if 0
115 // TODO tweak global stub name generation to match this
116 #define SHARED_STUB_NAME_DECLARE(name, type) "Shared Runtime " # name "_blob",
117 const char *SharedRuntime::_stub_names[] = {
118 SHARED_STUBS_DO(SHARED_STUB_NAME_DECLARE)
119 };
120 #endif
121
122 //----------------------------generate_stubs-----------------------------------
123 void SharedRuntime::generate_initial_stubs() {
124 // Build this early so it's available for the interpreter.
125 _throw_StackOverflowError_blob =
126 generate_throw_exception(StubId::shared_throw_StackOverflowError_id,
127 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
128 }
129
130 void SharedRuntime::generate_stubs() {
131 _wrong_method_blob =
132 generate_resolve_blob(StubId::shared_wrong_method_id,
133 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method));
134 _wrong_method_abstract_blob =
135 generate_resolve_blob(StubId::shared_wrong_method_abstract_id,
136 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract));
137 _ic_miss_blob =
138 generate_resolve_blob(StubId::shared_ic_miss_id,
139 CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss));
140 _resolve_opt_virtual_call_blob =
141 generate_resolve_blob(StubId::shared_resolve_opt_virtual_call_id,
142 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C));
143 _resolve_virtual_call_blob =
144 generate_resolve_blob(StubId::shared_resolve_virtual_call_id,
145 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C));
146 _resolve_static_call_blob =
147 generate_resolve_blob(StubId::shared_resolve_static_call_id,
148 CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C));
149
150 _throw_delayed_StackOverflowError_blob =
151 generate_throw_exception(StubId::shared_throw_delayed_StackOverflowError_id,
152 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
153
154 _throw_AbstractMethodError_blob =
155 generate_throw_exception(StubId::shared_throw_AbstractMethodError_id,
156 CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
157
158 _throw_IncompatibleClassChangeError_blob =
159 generate_throw_exception(StubId::shared_throw_IncompatibleClassChangeError_id,
160 CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
161
162 _throw_NullPointerException_at_call_blob =
163 generate_throw_exception(StubId::shared_throw_NullPointerException_at_call_id,
164 CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
165
166 #if COMPILER2_OR_JVMCI
167 // Vectors are generated only by C2 and JVMCI.
168 bool support_wide = is_wide_vector(MaxVectorSize);
169 if (support_wide) {
170 _polling_page_vectors_safepoint_handler_blob =
171 generate_handler_blob(StubId::shared_polling_page_vectors_safepoint_handler_id,
172 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
173 }
174 #endif // COMPILER2_OR_JVMCI
175 _polling_page_safepoint_handler_blob =
176 generate_handler_blob(StubId::shared_polling_page_safepoint_handler_id,
177 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
178 _polling_page_return_handler_blob =
179 generate_handler_blob(StubId::shared_polling_page_return_handler_id,
180 CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
181
182 generate_deopt_blob();
183 }
184
185 void SharedRuntime::init_adapter_library() {
186 AdapterHandlerLibrary::initialize();
187 }
188
189 #if INCLUDE_JFR
190 //------------------------------generate jfr runtime stubs ------
191 void SharedRuntime::generate_jfr_stubs() {
192 ResourceMark rm;
193 const char* timer_msg = "SharedRuntime generate_jfr_stubs";
194 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
195
196 _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint();
197 _jfr_return_lease_blob = generate_jfr_return_lease();
198 }
199
200 #endif // INCLUDE_JFR
201
202 #include <math.h>
203
204 // Implementation of SharedRuntime
205
206 #ifndef PRODUCT
207 // For statistics
208 uint SharedRuntime::_ic_miss_ctr = 0;
209 uint SharedRuntime::_wrong_method_ctr = 0;
210 uint SharedRuntime::_resolve_static_ctr = 0;
211 uint SharedRuntime::_resolve_virtual_ctr = 0;
212 uint SharedRuntime::_resolve_opt_virtual_ctr = 0;
213 uint SharedRuntime::_implicit_null_throws = 0;
214 uint SharedRuntime::_implicit_div0_throws = 0;
215
216 int64_t SharedRuntime::_nof_normal_calls = 0;
217 int64_t SharedRuntime::_nof_inlined_calls = 0;
218 int64_t SharedRuntime::_nof_megamorphic_calls = 0;
219 int64_t SharedRuntime::_nof_static_calls = 0;
220 int64_t SharedRuntime::_nof_inlined_static_calls = 0;
221 int64_t SharedRuntime::_nof_interface_calls = 0;
222 int64_t SharedRuntime::_nof_inlined_interface_calls = 0;
223
224 uint SharedRuntime::_new_instance_ctr=0;
225 uint SharedRuntime::_new_array_ctr=0;
226 uint SharedRuntime::_multi2_ctr=0;
227 uint SharedRuntime::_multi3_ctr=0;
228 uint SharedRuntime::_multi4_ctr=0;
229 uint SharedRuntime::_multi5_ctr=0;
230 uint SharedRuntime::_mon_enter_stub_ctr=0;
231 uint SharedRuntime::_mon_exit_stub_ctr=0;
232 uint SharedRuntime::_mon_enter_ctr=0;
233 uint SharedRuntime::_mon_exit_ctr=0;
234 uint SharedRuntime::_partial_subtype_ctr=0;
235 uint SharedRuntime::_jbyte_array_copy_ctr=0;
236 uint SharedRuntime::_jshort_array_copy_ctr=0;
237 uint SharedRuntime::_jint_array_copy_ctr=0;
238 uint SharedRuntime::_jlong_array_copy_ctr=0;
239 uint SharedRuntime::_oop_array_copy_ctr=0;
240 uint SharedRuntime::_checkcast_array_copy_ctr=0;
241 uint SharedRuntime::_unsafe_array_copy_ctr=0;
242 uint SharedRuntime::_generic_array_copy_ctr=0;
243 uint SharedRuntime::_slow_array_copy_ctr=0;
244 uint SharedRuntime::_find_handler_ctr=0;
245 uint SharedRuntime::_rethrow_ctr=0;
246 uint SharedRuntime::_unsafe_set_memory_ctr=0;
247
248 int SharedRuntime::_ICmiss_index = 0;
249 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
250 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
251
252
253 void SharedRuntime::trace_ic_miss(address at) {
254 for (int i = 0; i < _ICmiss_index; i++) {
255 if (_ICmiss_at[i] == at) {
256 _ICmiss_count[i]++;
257 return;
258 }
259 }
260 int index = _ICmiss_index++;
261 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
262 _ICmiss_at[index] = at;
263 _ICmiss_count[index] = 1;
264 }
265
266 void SharedRuntime::print_ic_miss_histogram() {
267 if (ICMissHistogram) {
268 tty->print_cr("IC Miss Histogram:");
269 int tot_misses = 0;
270 for (int i = 0; i < _ICmiss_index; i++) {
271 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
272 tot_misses += _ICmiss_count[i];
273 }
274 tty->print_cr("Total IC misses: %7d", tot_misses);
275 }
276 }
277
278 #ifdef COMPILER2
279 // Runtime methods for printf-style debug nodes (same printing format as fieldDescriptor::print_on_for)
280 void SharedRuntime::debug_print_value(jboolean x) {
281 tty->print_cr("boolean %d", x);
282 }
283
284 void SharedRuntime::debug_print_value(jbyte x) {
285 tty->print_cr("byte %d", x);
286 }
287
288 void SharedRuntime::debug_print_value(jshort x) {
289 tty->print_cr("short %d", x);
290 }
291
292 void SharedRuntime::debug_print_value(jchar x) {
293 tty->print_cr("char %c %d", isprint(x) ? x : ' ', x);
294 }
295
296 void SharedRuntime::debug_print_value(jint x) {
297 tty->print_cr("int %d", x);
298 }
299
300 void SharedRuntime::debug_print_value(jlong x) {
301 tty->print_cr("long " JLONG_FORMAT, x);
302 }
303
304 void SharedRuntime::debug_print_value(jfloat x) {
305 tty->print_cr("float %f", x);
306 }
307
308 void SharedRuntime::debug_print_value(jdouble x) {
309 tty->print_cr("double %lf", x);
310 }
311
312 void SharedRuntime::debug_print_value(oopDesc* x) {
313 x->print();
314 }
315 #endif // COMPILER2
316
317 #endif // PRODUCT
318
319
320 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
321 return x * y;
322 JRT_END
323
324
325 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
326 if (x == min_jlong && y == CONST64(-1)) {
327 return x;
328 } else {
329 return x / y;
330 }
331 JRT_END
332
333
334 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
335 if (x == min_jlong && y == CONST64(-1)) {
336 return 0;
337 } else {
338 return x % y;
339 }
340 JRT_END
341
342
343 #ifdef _WIN64
344 const juint float_sign_mask = 0x7FFFFFFF;
345 const juint float_infinity = 0x7F800000;
346 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
347 const julong double_infinity = CONST64(0x7FF0000000000000);
348 #endif
349
350 #if !defined(X86)
351 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
352 #ifdef _WIN64
353 // 64-bit Windows on amd64 returns the wrong values for
354 // infinity operands.
355 juint xbits = PrimitiveConversions::cast<juint>(x);
356 juint ybits = PrimitiveConversions::cast<juint>(y);
357 // x Mod Infinity == x unless x is infinity
358 if (((xbits & float_sign_mask) != float_infinity) &&
359 ((ybits & float_sign_mask) == float_infinity) ) {
360 return x;
361 }
362 return ((jfloat)fmod_winx64((double)x, (double)y));
363 #else
364 return ((jfloat)fmod((double)x,(double)y));
365 #endif
366 JRT_END
367
368 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
369 #ifdef _WIN64
370 julong xbits = PrimitiveConversions::cast<julong>(x);
371 julong ybits = PrimitiveConversions::cast<julong>(y);
372 // x Mod Infinity == x unless x is infinity
373 if (((xbits & double_sign_mask) != double_infinity) &&
374 ((ybits & double_sign_mask) == double_infinity) ) {
375 return x;
376 }
377 return ((jdouble)fmod_winx64((double)x, (double)y));
378 #else
379 return ((jdouble)fmod((double)x,(double)y));
380 #endif
381 JRT_END
382 #endif // !X86
383
384 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
385 return (jfloat)x;
386 JRT_END
387
388 #ifdef __SOFTFP__
389 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
390 return x + y;
391 JRT_END
392
393 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
394 return x - y;
395 JRT_END
396
397 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
398 return x * y;
399 JRT_END
400
401 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
402 return x / y;
403 JRT_END
404
405 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
406 return x + y;
407 JRT_END
408
409 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
410 return x - y;
411 JRT_END
412
413 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
414 return x * y;
415 JRT_END
416
417 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
418 return x / y;
419 JRT_END
420
421 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
422 return (jdouble)x;
423 JRT_END
424
425 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
426 return (jdouble)x;
427 JRT_END
428
429 JRT_LEAF(int, SharedRuntime::fcmpl(float x, float y))
430 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan*/
431 JRT_END
432
433 JRT_LEAF(int, SharedRuntime::fcmpg(float x, float y))
434 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
435 JRT_END
436
437 JRT_LEAF(int, SharedRuntime::dcmpl(double x, double y))
438 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
439 JRT_END
440
441 JRT_LEAF(int, SharedRuntime::dcmpg(double x, double y))
442 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
443 JRT_END
444
445 // Functions to return the opposite of the aeabi functions for nan.
446 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
447 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
448 JRT_END
449
450 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
451 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
452 JRT_END
453
454 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
455 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
456 JRT_END
457
458 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
459 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
460 JRT_END
461
462 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
463 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
464 JRT_END
465
466 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
467 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
468 JRT_END
469
470 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
471 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
472 JRT_END
473
474 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
475 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
476 JRT_END
477
478 // Intrinsics make gcc generate code for these.
479 float SharedRuntime::fneg(float f) {
480 return -f;
481 }
482
483 double SharedRuntime::dneg(double f) {
484 return -f;
485 }
486
487 #endif // __SOFTFP__
488
489 #if defined(__SOFTFP__) || defined(E500V2)
490 // Intrinsics make gcc generate code for these.
491 double SharedRuntime::dabs(double f) {
492 return (f <= (double)0.0) ? (double)0.0 - f : f;
493 }
494
495 #endif
496
497 #if defined(__SOFTFP__)
498 double SharedRuntime::dsqrt(double f) {
499 return sqrt(f);
500 }
501 #endif
502
503 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
504 if (g_isnan(x))
505 return 0;
506 if (x >= (jfloat) max_jint)
507 return max_jint;
508 if (x <= (jfloat) min_jint)
509 return min_jint;
510 return (jint) x;
511 JRT_END
512
513
514 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
515 if (g_isnan(x))
516 return 0;
517 if (x >= (jfloat) max_jlong)
518 return max_jlong;
519 if (x <= (jfloat) min_jlong)
520 return min_jlong;
521 return (jlong) x;
522 JRT_END
523
524
525 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
526 if (g_isnan(x))
527 return 0;
528 if (x >= (jdouble) max_jint)
529 return max_jint;
530 if (x <= (jdouble) min_jint)
531 return min_jint;
532 return (jint) x;
533 JRT_END
534
535
536 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
537 if (g_isnan(x))
538 return 0;
539 if (x >= (jdouble) max_jlong)
540 return max_jlong;
541 if (x <= (jdouble) min_jlong)
542 return min_jlong;
543 return (jlong) x;
544 JRT_END
545
546
547 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
548 return (jfloat)x;
549 JRT_END
550
551
552 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
553 return (jfloat)x;
554 JRT_END
555
556
557 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
558 return (jdouble)x;
559 JRT_END
560
561
562 // Exception handling across interpreter/compiler boundaries
563 //
564 // exception_handler_for_return_address(...) returns the continuation address.
565 // The continuation address is the entry point of the exception handler of the
566 // previous frame depending on the return address.
567
568 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) {
569 // Note: This is called when we have unwound the frame of the callee that did
570 // throw an exception. So far, no check has been performed by the StackWatermarkSet.
571 // Notably, the stack is not walkable at this point, and hence the check must
572 // be deferred until later. Specifically, any of the handlers returned here in
573 // this function, will get dispatched to, and call deferred checks to
574 // StackWatermarkSet::after_unwind at a point where the stack is walkable.
575 assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
576 assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
577
578 #if INCLUDE_JVMCI
579 // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
580 // and other exception handler continuations do not read it
581 current->set_exception_pc(nullptr);
582 #endif // INCLUDE_JVMCI
583
584 if (Continuation::is_return_barrier_entry(return_address)) {
585 return StubRoutines::cont_returnBarrierExc();
586 }
587
588 // The fastest case first
589 CodeBlob* blob = CodeCache::find_blob(return_address);
590 nmethod* nm = (blob != nullptr) ? blob->as_nmethod_or_null() : nullptr;
591 if (nm != nullptr) {
592 // native nmethods don't have exception handlers
593 assert(!nm->is_native_method() || nm->method()->is_continuation_enter_intrinsic(), "no exception handler");
594 assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
595 if (nm->is_deopt_pc(return_address)) {
596 // If we come here because of a stack overflow, the stack may be
597 // unguarded. Reguard the stack otherwise if we return to the
598 // deopt blob and the stack bang causes a stack overflow we
599 // crash.
600 StackOverflow* overflow_state = current->stack_overflow_state();
601 bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
602 if (overflow_state->reserved_stack_activation() != current->stack_base()) {
603 overflow_state->set_reserved_stack_activation(current->stack_base());
604 }
605 assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
606 // The deferred StackWatermarkSet::after_unwind check will be performed in
607 // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
608 return SharedRuntime::deopt_blob()->unpack_with_exception();
609 } else {
610 // The deferred StackWatermarkSet::after_unwind check will be performed in
611 // * OptoRuntime::handle_exception_C_helper for C2 code
612 // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
613 #ifdef COMPILER2
614 if (nm->compiler_type() == compiler_c2) {
615 return OptoRuntime::exception_blob()->entry_point();
616 }
617 #endif // COMPILER2
618 return nm->exception_begin();
619 }
620 }
621
622 // Entry code
623 if (StubRoutines::returns_to_call_stub(return_address)) {
624 // The deferred StackWatermarkSet::after_unwind check will be performed in
625 // JavaCallWrapper::~JavaCallWrapper
626 assert (StubRoutines::catch_exception_entry() != nullptr, "must be generated before");
627 return StubRoutines::catch_exception_entry();
628 }
629 if (blob != nullptr && blob->is_upcall_stub()) {
630 return StubRoutines::upcall_stub_exception_handler();
631 }
632 // Interpreted code
633 if (Interpreter::contains(return_address)) {
634 // The deferred StackWatermarkSet::after_unwind check will be performed in
635 // InterpreterRuntime::exception_handler_for_exception
636 return Interpreter::rethrow_exception_entry();
637 }
638
639 guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
640 guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
641
642 #ifndef PRODUCT
643 { ResourceMark rm;
644 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
645 os::print_location(tty, (intptr_t)return_address);
646 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
647 tty->print_cr("b) other problem");
648 }
649 #endif // PRODUCT
650 ShouldNotReachHere();
651 return nullptr;
652 }
653
654
655 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* current, address return_address))
656 return raw_exception_handler_for_return_address(current, return_address);
657 JRT_END
658
659
660 address SharedRuntime::get_poll_stub(address pc) {
661 address stub;
662 // Look up the code blob
663 CodeBlob *cb = CodeCache::find_blob(pc);
664
665 // Should be an nmethod
666 guarantee(cb != nullptr && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod");
667
668 // Look up the relocation information
669 assert(cb->as_nmethod()->is_at_poll_or_poll_return(pc),
670 "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
671
672 #ifdef ASSERT
673 if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
674 tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
675 Disassembler::decode(cb);
676 fatal("Only polling locations are used for safepoint");
677 }
678 #endif
679
680 bool at_poll_return = cb->as_nmethod()->is_at_poll_return(pc);
681 bool has_wide_vectors = cb->as_nmethod()->has_wide_vectors();
682 if (at_poll_return) {
683 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
684 "polling page return stub not created yet");
685 stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
686 } else if (has_wide_vectors) {
687 assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr,
688 "polling page vectors safepoint stub not created yet");
689 stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
690 } else {
691 assert(SharedRuntime::polling_page_safepoint_handler_blob() != nullptr,
692 "polling page safepoint stub not created yet");
693 stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
694 }
695 log_trace(safepoint)("Polling page exception: thread = " INTPTR_FORMAT " [%d], pc = "
696 INTPTR_FORMAT " (%s), stub = " INTPTR_FORMAT,
697 p2i(Thread::current()),
698 Thread::current()->osthread()->thread_id(),
699 p2i(pc),
700 at_poll_return ? "return" : "loop",
701 p2i(stub));
702 return stub;
703 }
704
705 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception) {
706 if (JvmtiExport::can_post_on_exceptions()) {
707 vframeStream vfst(current, true);
708 methodHandle method = methodHandle(current, vfst.method());
709 address bcp = method()->bcp_from(vfst.bci());
710 JvmtiExport::post_exception_throw(current, method(), bcp, h_exception());
711 }
712
713 #if INCLUDE_JVMCI
714 if (EnableJVMCI) {
715 vframeStream vfst(current, true);
716 methodHandle method = methodHandle(current, vfst.method());
717 int bci = vfst.bci();
718 MethodData* trap_mdo = method->method_data();
719 if (trap_mdo != nullptr) {
720 // Set exception_seen if the exceptional bytecode is an invoke
721 Bytecode_invoke call = Bytecode_invoke_check(method, bci);
722 if (call.is_valid()) {
723 ResourceMark rm(current);
724
725 // Lock to read ProfileData, and ensure lock is not broken by a safepoint
726 MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
727
728 ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr);
729 if (pdata != nullptr && pdata->is_BitData()) {
730 BitData* bit_data = (BitData*) pdata;
731 bit_data->set_exception_seen();
732 }
733 }
734 }
735 }
736 #endif
737
738 Exceptions::_throw(current, __FILE__, __LINE__, h_exception);
739 }
740
741 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message) {
742 Handle h_exception = Exceptions::new_exception(current, name, message);
743 throw_and_post_jvmti_exception(current, h_exception);
744 }
745
746 #if INCLUDE_JVMTI
747 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_start(oopDesc* vt, jboolean hide, JavaThread* current))
748 assert(hide == JNI_FALSE, "must be VTMS transition finish");
749 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
750 JvmtiVTMSTransitionDisabler::VTMS_vthread_start(vthread);
751 JNIHandles::destroy_local(vthread);
752 JRT_END
753
754 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_end(oopDesc* vt, jboolean hide, JavaThread* current))
755 assert(hide == JNI_TRUE, "must be VTMS transition start");
756 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
757 JvmtiVTMSTransitionDisabler::VTMS_vthread_end(vthread);
758 JNIHandles::destroy_local(vthread);
759 JRT_END
760
761 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_mount(oopDesc* vt, jboolean hide, JavaThread* current))
762 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
763 JvmtiVTMSTransitionDisabler::VTMS_vthread_mount(vthread, hide);
764 JNIHandles::destroy_local(vthread);
765 JRT_END
766
767 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_unmount(oopDesc* vt, jboolean hide, JavaThread* current))
768 jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
769 JvmtiVTMSTransitionDisabler::VTMS_vthread_unmount(vthread, hide);
770 JNIHandles::destroy_local(vthread);
771 JRT_END
772 #endif // INCLUDE_JVMTI
773
774 // The interpreter code to call this tracing function is only
775 // called/generated when UL is on for redefine, class and has the right level
776 // and tags. Since obsolete methods are never compiled, we don't have
777 // to modify the compilers to generate calls to this function.
778 //
779 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
780 JavaThread* thread, Method* method))
781 if (method->is_obsolete()) {
782 // We are calling an obsolete method, but this is not necessarily
783 // an error. Our method could have been redefined just after we
784 // fetched the Method* from the constant pool.
785 ResourceMark rm;
786 log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
787 }
788 return 0;
789 JRT_END
790
791 // ret_pc points into caller; we are returning caller's exception handler
792 // for given exception
793 // Note that the implementation of this method assumes it's only called when an exception has actually occured
794 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
795 bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
796 assert(nm != nullptr, "must exist");
797 ResourceMark rm;
798
799 #if INCLUDE_JVMCI
800 if (nm->is_compiled_by_jvmci()) {
801 // lookup exception handler for this pc
802 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
803 ExceptionHandlerTable table(nm);
804 HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
805 if (t != nullptr) {
806 return nm->code_begin() + t->pco();
807 } else {
808 bool make_not_entrant = true;
809 return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
810 }
811 }
812 #endif // INCLUDE_JVMCI
813
814 ScopeDesc* sd = nm->scope_desc_at(ret_pc);
815 // determine handler bci, if any
816 EXCEPTION_MARK;
817
818 int handler_bci = -1;
819 int scope_depth = 0;
820 if (!force_unwind) {
821 int bci = sd->bci();
822 bool recursive_exception = false;
823 do {
824 bool skip_scope_increment = false;
825 // exception handler lookup
826 Klass* ek = exception->klass();
827 methodHandle mh(THREAD, sd->method());
828 handler_bci = Method::fast_exception_handler_bci_for(mh, ek, bci, THREAD);
829 if (HAS_PENDING_EXCEPTION) {
830 recursive_exception = true;
831 // We threw an exception while trying to find the exception handler.
832 // Transfer the new exception to the exception handle which will
833 // be set into thread local storage, and do another lookup for an
834 // exception handler for this exception, this time starting at the
835 // BCI of the exception handler which caused the exception to be
836 // thrown (bugs 4307310 and 4546590). Set "exception" reference
837 // argument to ensure that the correct exception is thrown (4870175).
838 recursive_exception_occurred = true;
839 exception = Handle(THREAD, PENDING_EXCEPTION);
840 CLEAR_PENDING_EXCEPTION;
841 if (handler_bci >= 0) {
842 bci = handler_bci;
843 handler_bci = -1;
844 skip_scope_increment = true;
845 }
846 }
847 else {
848 recursive_exception = false;
849 }
850 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
851 sd = sd->sender();
852 if (sd != nullptr) {
853 bci = sd->bci();
854 }
855 ++scope_depth;
856 }
857 } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != nullptr));
858 }
859
860 // found handling method => lookup exception handler
861 int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
862
863 ExceptionHandlerTable table(nm);
864 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
865
866 // If the compiler did not anticipate a recursive exception, resulting in an exception
867 // thrown from the catch bci, then the compiled exception handler might be missing.
868 // This is rare. Just deoptimize and let the interpreter handle it.
869 if (t == nullptr && recursive_exception_occurred) {
870 bool make_not_entrant = false;
871 return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
872 }
873
874 if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) {
875 // Allow abbreviated catch tables. The idea is to allow a method
876 // to materialize its exceptions without committing to the exact
877 // routing of exceptions. In particular this is needed for adding
878 // a synthetic handler to unlock monitors when inlining
879 // synchronized methods since the unlock path isn't represented in
880 // the bytecodes.
881 t = table.entry_for(catch_pco, -1, 0);
882 }
883
884 #ifdef COMPILER1
885 if (t == nullptr && nm->is_compiled_by_c1()) {
886 assert(nm->unwind_handler_begin() != nullptr, "");
887 return nm->unwind_handler_begin();
888 }
889 #endif
890
891 if (t == nullptr) {
892 ttyLocker ttyl;
893 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
894 tty->print_cr(" Exception:");
895 exception->print();
896 tty->cr();
897 tty->print_cr(" Compiled exception table :");
898 table.print();
899 nm->print();
900 nm->print_code();
901 guarantee(false, "missing exception handler");
902 return nullptr;
903 }
904
905 if (handler_bci != -1) { // did we find a handler in this method?
906 sd->method()->set_exception_handler_entered(handler_bci); // profile
907 }
908 return nm->code_begin() + t->pco();
909 }
910
911 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current))
912 // These errors occur only at call sites
913 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
914 JRT_END
915
916 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
917 // These errors occur only at call sites
918 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
919 JRT_END
920
921 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
922 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
923 JRT_END
924
925 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* current))
926 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
927 JRT_END
928
929 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current))
930 // This entry point is effectively only used for NullPointerExceptions which occur at inline
931 // cache sites (when the callee activation is not yet set up) so we are at a call site
932 throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
933 JRT_END
934
935 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* current))
936 throw_StackOverflowError_common(current, false);
937 JRT_END
938
939 JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* current))
940 throw_StackOverflowError_common(current, true);
941 JRT_END
942
943 void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool delayed) {
944 // We avoid using the normal exception construction in this case because
945 // it performs an upcall to Java, and we're already out of stack space.
946 JavaThread* THREAD = current; // For exception macros.
947 InstanceKlass* k = vmClasses::StackOverflowError_klass();
948 oop exception_oop = k->allocate_instance(CHECK);
949 if (delayed) {
950 java_lang_Throwable::set_message(exception_oop,
951 Universe::delayed_stack_overflow_error_message());
952 }
953 Handle exception (current, exception_oop);
954 if (StackTraceInThrowable) {
955 java_lang_Throwable::fill_in_stack_trace(exception);
956 }
957 // Remove the ScopedValue bindings in case we got a
958 // StackOverflowError while we were trying to remove ScopedValue
959 // bindings.
960 current->clear_scopedValueBindings();
961 // Increment counter for hs_err file reporting
962 AtomicAccess::inc(&Exceptions::_stack_overflow_errors);
963 throw_and_post_jvmti_exception(current, exception);
964 }
965
966 address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
967 address pc,
968 ImplicitExceptionKind exception_kind)
969 {
970 address target_pc = nullptr;
971
972 if (Interpreter::contains(pc)) {
973 switch (exception_kind) {
974 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
975 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
976 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
977 default: ShouldNotReachHere();
978 }
979 } else {
980 switch (exception_kind) {
981 case STACK_OVERFLOW: {
982 // Stack overflow only occurs upon frame setup; the callee is
983 // going to be unwound. Dispatch to a shared runtime stub
984 // which will cause the StackOverflowError to be fabricated
985 // and processed.
986 // Stack overflow should never occur during deoptimization:
987 // the compiled method bangs the stack by as much as the
988 // interpreter would need in case of a deoptimization. The
989 // deoptimization blob and uncommon trap blob bang the stack
990 // in a debug VM to verify the correctness of the compiled
991 // method stack banging.
992 assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap");
993 Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
994 return SharedRuntime::throw_StackOverflowError_entry();
995 }
996
997 case IMPLICIT_NULL: {
998 if (VtableStubs::contains(pc)) {
999 // We haven't yet entered the callee frame. Fabricate an
1000 // exception and begin dispatching it in the caller. Since
1001 // the caller was at a call site, it's safe to destroy all
1002 // caller-saved registers, as these entry points do.
1003 VtableStub* vt_stub = VtableStubs::stub_containing(pc);
1004
1005 // If vt_stub is null, then return null to signal handler to report the SEGV error.
1006 if (vt_stub == nullptr) return nullptr;
1007
1008 if (vt_stub->is_abstract_method_error(pc)) {
1009 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
1010 Events::log_exception(current, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc));
1011 // Instead of throwing the abstract method error here directly, we re-resolve
1012 // and will throw the AbstractMethodError during resolve. As a result, we'll
1013 // get a more detailed error message.
1014 return SharedRuntime::get_handle_wrong_method_stub();
1015 } else {
1016 Events::log_exception(current, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc));
1017 // Assert that the signal comes from the expected location in stub code.
1018 assert(vt_stub->is_null_pointer_exception(pc),
1019 "obtained signal from unexpected location in stub code");
1020 return SharedRuntime::throw_NullPointerException_at_call_entry();
1021 }
1022 } else {
1023 CodeBlob* cb = CodeCache::find_blob(pc);
1024
1025 // If code blob is null, then return null to signal handler to report the SEGV error.
1026 if (cb == nullptr) return nullptr;
1027
1028 // Exception happened in CodeCache. Must be either:
1029 // 1. Inline-cache check in C2I handler blob,
1030 // 2. Inline-cache check in nmethod, or
1031 // 3. Implicit null exception in nmethod
1032
1033 if (!cb->is_nmethod()) {
1034 bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
1035 if (!is_in_blob) {
1036 // Allow normal crash reporting to handle this
1037 return nullptr;
1038 }
1039 Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
1040 // There is no handler here, so we will simply unwind.
1041 return SharedRuntime::throw_NullPointerException_at_call_entry();
1042 }
1043
1044 // Otherwise, it's a compiled method. Consult its exception handlers.
1045 nmethod* nm = cb->as_nmethod();
1046 if (nm->inlinecache_check_contains(pc)) {
1047 // exception happened inside inline-cache check code
1048 // => the nmethod is not yet active (i.e., the frame
1049 // is not set up yet) => use return address pushed by
1050 // caller => don't push another return address
1051 Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc));
1052 return SharedRuntime::throw_NullPointerException_at_call_entry();
1053 }
1054
1055 if (nm->method()->is_method_handle_intrinsic()) {
1056 // exception happened inside MH dispatch code, similar to a vtable stub
1057 Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
1058 return SharedRuntime::throw_NullPointerException_at_call_entry();
1059 }
1060
1061 #ifndef PRODUCT
1062 _implicit_null_throws++;
1063 #endif
1064 target_pc = nm->continuation_for_implicit_null_exception(pc);
1065 // If there's an unexpected fault, target_pc might be null,
1066 // in which case we want to fall through into the normal
1067 // error handling code.
1068 }
1069
1070 break; // fall through
1071 }
1072
1073
1074 case IMPLICIT_DIVIDE_BY_ZERO: {
1075 nmethod* nm = CodeCache::find_nmethod(pc);
1076 guarantee(nm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions");
1077 #ifndef PRODUCT
1078 _implicit_div0_throws++;
1079 #endif
1080 target_pc = nm->continuation_for_implicit_div0_exception(pc);
1081 // If there's an unexpected fault, target_pc might be null,
1082 // in which case we want to fall through into the normal
1083 // error handling code.
1084 break; // fall through
1085 }
1086
1087 default: ShouldNotReachHere();
1088 }
1089
1090 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
1091
1092 if (exception_kind == IMPLICIT_NULL) {
1093 #ifndef PRODUCT
1094 // for AbortVMOnException flag
1095 Exceptions::debug_check_abort("java.lang.NullPointerException");
1096 #endif //PRODUCT
1097 Events::log_exception(current, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1098 } else {
1099 #ifndef PRODUCT
1100 // for AbortVMOnException flag
1101 Exceptions::debug_check_abort("java.lang.ArithmeticException");
1102 #endif //PRODUCT
1103 Events::log_exception(current, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1104 }
1105 return target_pc;
1106 }
1107
1108 ShouldNotReachHere();
1109 return nullptr;
1110 }
1111
1112
1113 /**
1114 * Throws an java/lang/UnsatisfiedLinkError. The address of this method is
1115 * installed in the native function entry of all native Java methods before
1116 * they get linked to their actual native methods.
1117 *
1118 * \note
1119 * This method actually never gets called! The reason is because
1120 * the interpreter's native entries call NativeLookup::lookup() which
1121 * throws the exception when the lookup fails. The exception is then
1122 * caught and forwarded on the return from NativeLookup::lookup() call
1123 * before the call to the native function. This might change in the future.
1124 */
1125 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
1126 {
1127 // We return a bad value here to make sure that the exception is
1128 // forwarded before we look at the return value.
1129 THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
1130 }
1131 JNI_END
1132
1133 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
1134 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
1135 }
1136
1137 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
1138 #if INCLUDE_JVMCI
1139 if (!obj->klass()->has_finalizer()) {
1140 return;
1141 }
1142 #endif // INCLUDE_JVMCI
1143 assert(oopDesc::is_oop(obj), "must be a valid oop");
1144 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1145 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1146 JRT_END
1147
1148 jlong SharedRuntime::get_java_tid(JavaThread* thread) {
1149 assert(thread != nullptr, "No thread");
1150 if (thread == nullptr) {
1151 return 0;
1152 }
1153 guarantee(Thread::current() != thread || thread->is_oop_safe(),
1154 "current cannot touch oops after its GC barrier is detached.");
1155 oop obj = thread->threadObj();
1156 return (obj == nullptr) ? 0 : java_lang_Thread::thread_id(obj);
1157 }
1158
1159 /**
1160 * This function ought to be a void function, but cannot be because
1161 * it gets turned into a tail-call on sparc, which runs into dtrace bug
1162 * 6254741. Once that is fixed we can remove the dummy return value.
1163 */
1164 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
1165 return dtrace_object_alloc(JavaThread::current(), o, o->size());
1166 }
1167
1168 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o) {
1169 return dtrace_object_alloc(thread, o, o->size());
1170 }
1171
1172 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size) {
1173 assert(DTraceAllocProbes, "wrong call");
1174 Klass* klass = o->klass();
1175 Symbol* name = klass->name();
1176 HOTSPOT_OBJECT_ALLOC(
1177 get_java_tid(thread),
1178 (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
1179 return 0;
1180 }
1181
1182 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
1183 JavaThread* current, Method* method))
1184 assert(current == JavaThread::current(), "pre-condition");
1185
1186 assert(DTraceMethodProbes, "wrong call");
1187 Symbol* kname = method->klass_name();
1188 Symbol* name = method->name();
1189 Symbol* sig = method->signature();
1190 HOTSPOT_METHOD_ENTRY(
1191 get_java_tid(current),
1192 (char *) kname->bytes(), kname->utf8_length(),
1193 (char *) name->bytes(), name->utf8_length(),
1194 (char *) sig->bytes(), sig->utf8_length());
1195 return 0;
1196 JRT_END
1197
1198 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
1199 JavaThread* current, Method* method))
1200 assert(current == JavaThread::current(), "pre-condition");
1201 assert(DTraceMethodProbes, "wrong call");
1202 Symbol* kname = method->klass_name();
1203 Symbol* name = method->name();
1204 Symbol* sig = method->signature();
1205 HOTSPOT_METHOD_RETURN(
1206 get_java_tid(current),
1207 (char *) kname->bytes(), kname->utf8_length(),
1208 (char *) name->bytes(), name->utf8_length(),
1209 (char *) sig->bytes(), sig->utf8_length());
1210 return 0;
1211 JRT_END
1212
1213
1214 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
1215 // for a call current in progress, i.e., arguments has been pushed on stack
1216 // put callee has not been invoked yet. Used by: resolve virtual/static,
1217 // vtable updates, etc. Caller frame must be compiled.
1218 Handle SharedRuntime::find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
1219 JavaThread* current = THREAD;
1220 ResourceMark rm(current);
1221
1222 // last java frame on stack (which includes native call frames)
1223 vframeStream vfst(current, true); // Do not skip and javaCalls
1224
1225 return find_callee_info_helper(vfst, bc, callinfo, THREAD);
1226 }
1227
1228 Method* SharedRuntime::extract_attached_method(vframeStream& vfst) {
1229 nmethod* caller = vfst.nm();
1230
1231 address pc = vfst.frame_pc();
1232 { // Get call instruction under lock because another thread may be busy patching it.
1233 CompiledICLocker ic_locker(caller);
1234 return caller->attached_method_before_pc(pc);
1235 }
1236 return nullptr;
1237 }
1238
1239 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1240 // for a call current in progress, i.e., arguments has been pushed on stack
1241 // but callee has not been invoked yet. Caller frame must be compiled.
1242 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1243 CallInfo& callinfo, TRAPS) {
1244 Handle receiver;
1245 Handle nullHandle; // create a handy null handle for exception returns
1246 JavaThread* current = THREAD;
1247
1248 assert(!vfst.at_end(), "Java frame must exist");
1249
1250 // Find caller and bci from vframe
1251 methodHandle caller(current, vfst.method());
1252 int bci = vfst.bci();
1253
1254 if (caller->is_continuation_enter_intrinsic()) {
1255 bc = Bytecodes::_invokestatic;
1256 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1257 return receiver;
1258 }
1259
1260 // Substitutability test implementation piggy backs on static call resolution
1261 Bytecodes::Code code = caller->java_code_at(bci);
1262 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1263 bc = Bytecodes::_invokestatic;
1264 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1265 assert(attached_method.not_null(), "must have attached method");
1266 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1267 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1268 #ifdef ASSERT
1269 Symbol* subst_method_name = UseAltSubstitutabilityMethod ? vmSymbols::isSubstitutableAlt_name() : vmSymbols::isSubstitutable_name();
1270 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(subst_method_name, vmSymbols::object_object_boolean_signature());
1271 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1272 #endif
1273 return receiver;
1274 }
1275
1276 Bytecode_invoke bytecode(caller, bci);
1277 int bytecode_index = bytecode.index();
1278 bc = bytecode.invoke_code();
1279
1280 methodHandle attached_method(current, extract_attached_method(vfst));
1281 if (attached_method.not_null()) {
1282 Method* callee = bytecode.static_target(CHECK_NH);
1283 vmIntrinsics::ID id = callee->intrinsic_id();
1284 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1285 // it attaches statically resolved method to the call site.
1286 if (MethodHandles::is_signature_polymorphic(id) &&
1287 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1288 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1289
1290 // Adjust invocation mode according to the attached method.
1291 switch (bc) {
1292 case Bytecodes::_invokevirtual:
1293 if (attached_method->method_holder()->is_interface()) {
1294 bc = Bytecodes::_invokeinterface;
1295 }
1296 break;
1297 case Bytecodes::_invokeinterface:
1298 if (!attached_method->method_holder()->is_interface()) {
1299 bc = Bytecodes::_invokevirtual;
1300 }
1301 break;
1302 case Bytecodes::_invokehandle:
1303 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1304 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1305 : Bytecodes::_invokevirtual;
1306 }
1307 break;
1308 default:
1309 break;
1310 }
1311 } else {
1312 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1313 if (!attached_method->method_holder()->is_inline_klass()) {
1314 // Ignore the attached method in this case to not confuse below code
1315 attached_method = methodHandle(current, nullptr);
1316 }
1317 }
1318 }
1319
1320 assert(bc != Bytecodes::_illegal, "not initialized");
1321
1322 bool has_receiver = bc != Bytecodes::_invokestatic &&
1323 bc != Bytecodes::_invokedynamic &&
1324 bc != Bytecodes::_invokehandle;
1325 bool check_null_and_abstract = true;
1326
1327 // Find receiver for non-static call
1328 if (has_receiver) {
1329 // This register map must be update since we need to find the receiver for
1330 // compiled frames. The receiver might be in a register.
1331 RegisterMap reg_map2(current,
1332 RegisterMap::UpdateMap::include,
1333 RegisterMap::ProcessFrames::include,
1334 RegisterMap::WalkContinuation::skip);
1335 frame stubFrame = current->last_frame();
1336 // Caller-frame is a compiled frame
1337 frame callerFrame = stubFrame.sender(®_map2);
1338
1339 Method* callee = attached_method();
1340 if (callee == nullptr) {
1341 callee = bytecode.static_target(CHECK_NH);
1342 if (callee == nullptr) {
1343 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1344 }
1345 }
1346 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->as_nmethod()->is_compiled_by_c1();
1347 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1348 // If the receiver is an inline type that is passed as fields, no oop is available
1349 // Resolve the call without receiver null checking.
1350 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1351 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1352 if (bc == Bytecodes::_invokeinterface) {
1353 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1354 }
1355 check_null_and_abstract = false;
1356 } else {
1357 // Retrieve from a compiled argument list
1358 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1359 assert(oopDesc::is_oop_or_null(receiver()), "");
1360 if (receiver.is_null()) {
1361 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1362 }
1363 }
1364 }
1365
1366 // Resolve method
1367 if (attached_method.not_null()) {
1368 // Parameterized by attached method.
1369 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1370 } else {
1371 // Parameterized by bytecode.
1372 constantPoolHandle constants(current, caller->constants());
1373 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1374 }
1375
1376 #ifdef ASSERT
1377 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1378 if (has_receiver && check_null_and_abstract) {
1379 assert(receiver.not_null(), "should have thrown exception");
1380 Klass* receiver_klass = receiver->klass();
1381 Klass* rk = nullptr;
1382 if (attached_method.not_null()) {
1383 // In case there's resolved method attached, use its holder during the check.
1384 rk = attached_method->method_holder();
1385 } else {
1386 // Klass is already loaded.
1387 constantPoolHandle constants(current, caller->constants());
1388 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1389 }
1390 Klass* static_receiver_klass = rk;
1391 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1392 "actual receiver must be subclass of static receiver klass");
1393 if (receiver_klass->is_instance_klass()) {
1394 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1395 tty->print_cr("ERROR: Klass not yet initialized!!");
1396 receiver_klass->print();
1397 }
1398 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1399 }
1400 }
1401 #endif
1402
1403 return receiver;
1404 }
1405
1406 methodHandle SharedRuntime::find_callee_method(bool& caller_does_not_scalarize, TRAPS) {
1407 JavaThread* current = THREAD;
1408 ResourceMark rm(current);
1409 // We need first to check if any Java activations (compiled, interpreted)
1410 // exist on the stack since last JavaCall. If not, we need
1411 // to get the target method from the JavaCall wrapper.
1412 vframeStream vfst(current, true); // Do not skip any javaCalls
1413 methodHandle callee_method;
1414 if (vfst.at_end()) {
1415 // No Java frames were found on stack since we did the JavaCall.
1416 // Hence the stack can only contain an entry_frame. We need to
1417 // find the target method from the stub frame.
1418 RegisterMap reg_map(current,
1419 RegisterMap::UpdateMap::skip,
1420 RegisterMap::ProcessFrames::include,
1421 RegisterMap::WalkContinuation::skip);
1422 frame fr = current->last_frame();
1423 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1424 fr = fr.sender(®_map);
1425 assert(fr.is_entry_frame(), "must be");
1426 // fr is now pointing to the entry frame.
1427 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1428 } else {
1429 Bytecodes::Code bc;
1430 CallInfo callinfo;
1431 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1432 // Calls via mismatching methods are always non-scalarized
1433 if (callinfo.resolved_method()->mismatch()) {
1434 caller_does_not_scalarize = true;
1435 }
1436 callee_method = methodHandle(current, callinfo.selected_method());
1437 }
1438 assert(callee_method()->is_method(), "must be");
1439 return callee_method;
1440 }
1441
1442 // Resolves a call.
1443 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1444 JavaThread* current = THREAD;
1445 ResourceMark rm(current);
1446 RegisterMap cbl_map(current,
1447 RegisterMap::UpdateMap::skip,
1448 RegisterMap::ProcessFrames::include,
1449 RegisterMap::WalkContinuation::skip);
1450 frame caller_frame = current->last_frame().sender(&cbl_map);
1451
1452 CodeBlob* caller_cb = caller_frame.cb();
1453 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1454 nmethod* caller_nm = caller_cb->as_nmethod();
1455
1456 // determine call info & receiver
1457 // note: a) receiver is null for static calls
1458 // b) an exception is thrown if receiver is null for non-static calls
1459 CallInfo call_info;
1460 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1461 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1462
1463 NoSafepointVerifier nsv;
1464
1465 methodHandle callee_method(current, call_info.selected_method());
1466 // Calls via mismatching methods are always non-scalarized
1467 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1468 caller_does_not_scalarize = true;
1469 }
1470
1471 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1472 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1473 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1474 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1475 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1476
1477 assert(!caller_nm->is_unloading(), "It should not be unloading");
1478
1479 #ifndef PRODUCT
1480 // tracing/debugging/statistics
1481 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1482 (is_virtual) ? (&_resolve_virtual_ctr) :
1483 (&_resolve_static_ctr);
1484 AtomicAccess::inc(addr);
1485
1486 if (TraceCallFixup) {
1487 ResourceMark rm(current);
1488 tty->print("resolving %s%s (%s) %s call to",
1489 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1490 Bytecodes::name(invoke_code), (caller_does_not_scalarize) ? "non-scalar" : "");
1491 callee_method->print_short_name(tty);
1492 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1493 p2i(caller_frame.pc()), p2i(callee_method->code()));
1494 }
1495 #endif
1496
1497 if (invoke_code == Bytecodes::_invokestatic) {
1498 assert(callee_method->method_holder()->is_initialized() ||
1499 callee_method->method_holder()->is_reentrant_initialization(current),
1500 "invalid class initialization state for invoke_static");
1501 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1502 // In order to keep class initialization check, do not patch call
1503 // site for static call when the class is not fully initialized.
1504 // Proper check is enforced by call site re-resolution on every invocation.
1505 //
1506 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1507 // explicit class initialization check is put in nmethod entry (VEP).
1508 assert(callee_method->method_holder()->is_linked(), "must be");
1509 return callee_method;
1510 }
1511 }
1512
1513
1514 // JSR 292 key invariant:
1515 // If the resolved method is a MethodHandle invoke target, the call
1516 // site must be a MethodHandle call site, because the lambda form might tail-call
1517 // leaving the stack in a state unknown to either caller or callee
1518
1519 // Compute entry points. The computation of the entry points is independent of
1520 // patching the call.
1521
1522 // Make sure the callee nmethod does not get deoptimized and removed before
1523 // we are done patching the code.
1524
1525
1526 CompiledICLocker ml(caller_nm);
1527 if (is_virtual && !is_optimized) {
1528 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1529 inline_cache->update(&call_info, receiver->klass(), caller_does_not_scalarize);
1530 } else {
1531 // Callsite is a direct call - set it to the destination method
1532 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1533 callsite->set(callee_method, caller_does_not_scalarize);
1534 }
1535
1536 return callee_method;
1537 }
1538
1539 // Inline caches exist only in compiled code
1540 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1541 #ifdef ASSERT
1542 RegisterMap reg_map(current,
1543 RegisterMap::UpdateMap::skip,
1544 RegisterMap::ProcessFrames::include,
1545 RegisterMap::WalkContinuation::skip);
1546 frame stub_frame = current->last_frame();
1547 assert(stub_frame.is_runtime_frame(), "sanity check");
1548 frame caller_frame = stub_frame.sender(®_map);
1549 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1550 #endif /* ASSERT */
1551
1552 methodHandle callee_method;
1553 bool caller_does_not_scalarize = false;
1554 JRT_BLOCK
1555 callee_method = SharedRuntime::handle_ic_miss_helper(caller_does_not_scalarize, CHECK_NULL);
1556 // Return Method* through TLS
1557 current->set_vm_result_metadata(callee_method());
1558 JRT_BLOCK_END
1559 // return compiled code entry point after potential safepoints
1560 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1561 JRT_END
1562
1563
1564 // Handle call site that has been made non-entrant
1565 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1566 // 6243940 We might end up in here if the callee is deoptimized
1567 // as we race to call it. We don't want to take a safepoint if
1568 // the caller was interpreted because the caller frame will look
1569 // interpreted to the stack walkers and arguments are now
1570 // "compiled" so it is much better to make this transition
1571 // invisible to the stack walking code. The i2c path will
1572 // place the callee method in the callee_target. It is stashed
1573 // there because if we try and find the callee by normal means a
1574 // safepoint is possible and have trouble gc'ing the compiled args.
1575 RegisterMap reg_map(current,
1576 RegisterMap::UpdateMap::skip,
1577 RegisterMap::ProcessFrames::include,
1578 RegisterMap::WalkContinuation::skip);
1579 frame stub_frame = current->last_frame();
1580 assert(stub_frame.is_runtime_frame(), "sanity check");
1581 frame caller_frame = stub_frame.sender(®_map);
1582
1583 if (caller_frame.is_interpreted_frame() ||
1584 caller_frame.is_entry_frame() ||
1585 caller_frame.is_upcall_stub_frame()) {
1586 Method* callee = current->callee_target();
1587 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1588 current->set_vm_result_metadata(callee);
1589 current->set_callee_target(nullptr);
1590 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1591 // Bypass class initialization checks in c2i when caller is in native.
1592 // JNI calls to static methods don't have class initialization checks.
1593 // Fast class initialization checks are present in c2i adapters and call into
1594 // SharedRuntime::handle_wrong_method() on the slow path.
1595 //
1596 // JVM upcalls may land here as well, but there's a proper check present in
1597 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1598 // so bypassing it in c2i adapter is benign.
1599 return callee->get_c2i_no_clinit_check_entry();
1600 } else {
1601 if (caller_frame.is_interpreted_frame()) {
1602 return callee->get_c2i_inline_entry();
1603 } else {
1604 return callee->get_c2i_entry();
1605 }
1606 }
1607 }
1608
1609 // Must be compiled to compiled path which is safe to stackwalk
1610 methodHandle callee_method;
1611 bool is_static_call = false;
1612 bool is_optimized = false;
1613 bool caller_does_not_scalarize = false;
1614 JRT_BLOCK
1615 // Force resolving of caller (if we called from compiled frame)
1616 callee_method = SharedRuntime::reresolve_call_site(is_optimized, caller_does_not_scalarize, CHECK_NULL);
1617 current->set_vm_result_metadata(callee_method());
1618 JRT_BLOCK_END
1619 // return compiled code entry point after potential safepoints
1620 return get_resolved_entry(current, callee_method, callee_method->is_static(), is_optimized, caller_does_not_scalarize);
1621 JRT_END
1622
1623 // Handle abstract method call
1624 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1625 // Verbose error message for AbstractMethodError.
1626 // Get the called method from the invoke bytecode.
1627 vframeStream vfst(current, true);
1628 assert(!vfst.at_end(), "Java frame must exist");
1629 methodHandle caller(current, vfst.method());
1630 Bytecode_invoke invoke(caller, vfst.bci());
1631 DEBUG_ONLY( invoke.verify(); )
1632
1633 // Find the compiled caller frame.
1634 RegisterMap reg_map(current,
1635 RegisterMap::UpdateMap::include,
1636 RegisterMap::ProcessFrames::include,
1637 RegisterMap::WalkContinuation::skip);
1638 frame stubFrame = current->last_frame();
1639 assert(stubFrame.is_runtime_frame(), "must be");
1640 frame callerFrame = stubFrame.sender(®_map);
1641 assert(callerFrame.is_compiled_frame(), "must be");
1642
1643 // Install exception and return forward entry.
1644 address res = SharedRuntime::throw_AbstractMethodError_entry();
1645 JRT_BLOCK
1646 methodHandle callee(current, invoke.static_target(current));
1647 if (!callee.is_null()) {
1648 oop recv = callerFrame.retrieve_receiver(®_map);
1649 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1650 res = StubRoutines::forward_exception_entry();
1651 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1652 }
1653 JRT_BLOCK_END
1654 return res;
1655 JRT_END
1656
1657 // return verified_code_entry if interp_only_mode is not set for the current thread;
1658 // otherwise return c2i entry.
1659 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method,
1660 bool is_static_call, bool is_optimized, bool caller_does_not_scalarize) {
1661 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1662 // In interp_only_mode we need to go to the interpreted entry
1663 // The c2i won't patch in this mode -- see fixup_callers_callsite
1664 return callee_method->get_c2i_entry();
1665 }
1666
1667 if (caller_does_not_scalarize) {
1668 assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
1669 return callee_method->verified_inline_code_entry();
1670 } else if (is_static_call || is_optimized) {
1671 assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
1672 return callee_method->verified_code_entry();
1673 } else {
1674 assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
1675 return callee_method->verified_inline_ro_code_entry();
1676 }
1677 }
1678
1679 // resolve a static call and patch code
1680 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1681 methodHandle callee_method;
1682 bool caller_does_not_scalarize = false;
1683 bool enter_special = false;
1684 JRT_BLOCK
1685 callee_method = SharedRuntime::resolve_helper(false, false, caller_does_not_scalarize, CHECK_NULL);
1686 current->set_vm_result_metadata(callee_method());
1687 JRT_BLOCK_END
1688 // return compiled code entry point after potential safepoints
1689 return get_resolved_entry(current, callee_method, true, false, caller_does_not_scalarize);
1690 JRT_END
1691
1692 // resolve virtual call and update inline cache to monomorphic
1693 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1694 methodHandle callee_method;
1695 bool caller_does_not_scalarize = false;
1696 JRT_BLOCK
1697 callee_method = SharedRuntime::resolve_helper(true, false, caller_does_not_scalarize, CHECK_NULL);
1698 current->set_vm_result_metadata(callee_method());
1699 JRT_BLOCK_END
1700 // return compiled code entry point after potential safepoints
1701 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1702 JRT_END
1703
1704
1705 // Resolve a virtual call that can be statically bound (e.g., always
1706 // monomorphic, so it has no inline cache). Patch code to resolved target.
1707 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1708 methodHandle callee_method;
1709 bool caller_does_not_scalarize = false;
1710 JRT_BLOCK
1711 callee_method = SharedRuntime::resolve_helper(true, true, caller_does_not_scalarize, CHECK_NULL);
1712 current->set_vm_result_metadata(callee_method());
1713 JRT_BLOCK_END
1714 // return compiled code entry point after potential safepoints
1715 return get_resolved_entry(current, callee_method, false, true, caller_does_not_scalarize);
1716 JRT_END
1717
1718
1719
1720 methodHandle SharedRuntime::handle_ic_miss_helper(bool& caller_does_not_scalarize, TRAPS) {
1721 JavaThread* current = THREAD;
1722 ResourceMark rm(current);
1723 CallInfo call_info;
1724 Bytecodes::Code bc;
1725
1726 // receiver is null for static calls. An exception is thrown for null
1727 // receivers for non-static calls
1728 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1729
1730 methodHandle callee_method(current, call_info.selected_method());
1731
1732 #ifndef PRODUCT
1733 AtomicAccess::inc(&_ic_miss_ctr);
1734
1735 // Statistics & Tracing
1736 if (TraceCallFixup) {
1737 ResourceMark rm(current);
1738 tty->print("IC miss (%s) %s call to", Bytecodes::name(bc), (caller_does_not_scalarize) ? "non-scalar" : "");
1739 callee_method->print_short_name(tty);
1740 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1741 }
1742
1743 if (ICMissHistogram) {
1744 MutexLocker m(VMStatistic_lock);
1745 RegisterMap reg_map(current,
1746 RegisterMap::UpdateMap::skip,
1747 RegisterMap::ProcessFrames::include,
1748 RegisterMap::WalkContinuation::skip);
1749 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1750 // produce statistics under the lock
1751 trace_ic_miss(f.pc());
1752 }
1753 #endif
1754
1755 // install an event collector so that when a vtable stub is created the
1756 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1757 // event can't be posted when the stub is created as locks are held
1758 // - instead the event will be deferred until the event collector goes
1759 // out of scope.
1760 JvmtiDynamicCodeEventCollector event_collector;
1761
1762 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1763 RegisterMap reg_map(current,
1764 RegisterMap::UpdateMap::skip,
1765 RegisterMap::ProcessFrames::include,
1766 RegisterMap::WalkContinuation::skip);
1767 frame caller_frame = current->last_frame().sender(®_map);
1768 CodeBlob* cb = caller_frame.cb();
1769 nmethod* caller_nm = cb->as_nmethod();
1770 // Calls via mismatching methods are always non-scalarized
1771 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1772 caller_does_not_scalarize = true;
1773 }
1774
1775 CompiledICLocker ml(caller_nm);
1776 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1777 inline_cache->update(&call_info, receiver()->klass(), caller_does_not_scalarize);
1778
1779 return callee_method;
1780 }
1781
1782 //
1783 // Resets a call-site in compiled code so it will get resolved again.
1784 // This routines handles both virtual call sites, optimized virtual call
1785 // sites, and static call sites. Typically used to change a call sites
1786 // destination from compiled to interpreted.
1787 //
1788 methodHandle SharedRuntime::reresolve_call_site(bool& is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1789 JavaThread* current = THREAD;
1790 ResourceMark rm(current);
1791 RegisterMap reg_map(current,
1792 RegisterMap::UpdateMap::skip,
1793 RegisterMap::ProcessFrames::include,
1794 RegisterMap::WalkContinuation::skip);
1795 frame stub_frame = current->last_frame();
1796 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1797 frame caller = stub_frame.sender(®_map);
1798 if (caller.is_compiled_frame()) {
1799 caller_does_not_scalarize = caller.cb()->as_nmethod()->is_compiled_by_c1();
1800 }
1801 assert(!caller.is_interpreted_frame(), "must be compiled");
1802
1803 // If the frame isn't a live compiled frame (i.e. deoptimized by the time we get here), no IC clearing must be done
1804 // for the caller. However, when the caller is C2 compiled and the callee a C1 or C2 compiled method, then we still
1805 // need to figure out whether it was an optimized virtual call with an inline type receiver. Otherwise, we end up
1806 // using the wrong method entry point and accidentally skip the buffering of the receiver.
1807 methodHandle callee_method = find_callee_method(caller_does_not_scalarize, CHECK_(methodHandle()));
1808 const bool caller_is_compiled_and_not_deoptimized = caller.is_compiled_frame() && !caller.is_deoptimized_frame();
1809 const bool caller_is_continuation_enter_intrinsic =
1810 caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic();
1811 const bool do_IC_clearing = caller_is_compiled_and_not_deoptimized || caller_is_continuation_enter_intrinsic;
1812
1813 const bool callee_compiled_with_scalarized_receiver = callee_method->has_compiled_code() &&
1814 !callee_method()->is_static() &&
1815 callee_method()->is_scalarized_arg(0);
1816 const bool compute_is_optimized = !caller_does_not_scalarize && callee_compiled_with_scalarized_receiver;
1817
1818 if (do_IC_clearing || compute_is_optimized) {
1819 address pc = caller.pc();
1820
1821 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1822 assert(caller_nm != nullptr, "did not find caller nmethod");
1823
1824 // Default call_addr is the location of the "basic" call.
1825 // Determine the address of the call we a reresolving. With
1826 // Inline Caches we will always find a recognizable call.
1827 // With Inline Caches disabled we may or may not find a
1828 // recognizable call. We will always find a call for static
1829 // calls and for optimized virtual calls. For vanilla virtual
1830 // calls it depends on the state of the UseInlineCaches switch.
1831 //
1832 // With Inline Caches disabled we can get here for a virtual call
1833 // for two reasons:
1834 // 1 - calling an abstract method. The vtable for abstract methods
1835 // will run us thru handle_wrong_method and we will eventually
1836 // end up in the interpreter to throw the ame.
1837 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1838 // call and between the time we fetch the entry address and
1839 // we jump to it the target gets deoptimized. Similar to 1
1840 // we will wind up in the interprter (thru a c2i with c2).
1841 //
1842 CompiledICLocker ml(caller_nm);
1843 address call_addr = caller_nm->call_instruction_address(pc);
1844
1845 if (call_addr != nullptr) {
1846 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1847 // bytes back in the instruction stream so we must also check for reloc info.
1848 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1849 bool ret = iter.next(); // Get item
1850 if (ret) {
1851 is_optimized = false;
1852 switch (iter.type()) {
1853 case relocInfo::static_call_type:
1854 assert(callee_method->is_static(), "must be");
1855 case relocInfo::opt_virtual_call_type: {
1856 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1857 if (do_IC_clearing) {
1858 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1859 cdc->set_to_clean();
1860 }
1861 break;
1862 }
1863 case relocInfo::virtual_call_type: {
1864 if (do_IC_clearing) {
1865 // compiled, dispatched call (which used to call an interpreted method)
1866 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1867 inline_cache->set_to_clean();
1868 }
1869 break;
1870 }
1871 default:
1872 break;
1873 }
1874 }
1875 }
1876 }
1877
1878 #ifndef PRODUCT
1879 AtomicAccess::inc(&_wrong_method_ctr);
1880
1881 if (TraceCallFixup) {
1882 ResourceMark rm(current);
1883 tty->print("handle_wrong_method reresolving %s call to", (caller_does_not_scalarize) ? "non-scalar" : "");
1884 callee_method->print_short_name(tty);
1885 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1886 }
1887 #endif
1888
1889 return callee_method;
1890 }
1891
1892 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1893 // The faulting unsafe accesses should be changed to throw the error
1894 // synchronously instead. Meanwhile the faulting instruction will be
1895 // skipped over (effectively turning it into a no-op) and an
1896 // asynchronous exception will be raised which the thread will
1897 // handle at a later point. If the instruction is a load it will
1898 // return garbage.
1899
1900 // Request an async exception.
1901 thread->set_pending_unsafe_access_error();
1902
1903 // Return address of next instruction to execute.
1904 return next_pc;
1905 }
1906
1907 #ifdef ASSERT
1908 void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
1909 const BasicType* sig_bt,
1910 const VMRegPair* regs) {
1911 ResourceMark rm;
1912 const int total_args_passed = method->size_of_parameters();
1913 const VMRegPair* regs_with_member_name = regs;
1914 VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
1915
1916 const int member_arg_pos = total_args_passed - 1;
1917 assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1918 assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1919
1920 java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
1921
1922 for (int i = 0; i < member_arg_pos; i++) {
1923 VMReg a = regs_with_member_name[i].first();
1924 VMReg b = regs_without_member_name[i].first();
1925 assert(a->value() == b->value(), "register allocation mismatch: a= %d, b= %d", a->value(), b->value());
1926 }
1927 assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
1928 }
1929 #endif
1930
1931 // ---------------------------------------------------------------------------
1932 // We are calling the interpreter via a c2i. Normally this would mean that
1933 // we were called by a compiled method. However we could have lost a race
1934 // where we went int -> i2c -> c2i and so the caller could in fact be
1935 // interpreted. If the caller is compiled we attempt to patch the caller
1936 // so he no longer calls into the interpreter.
1937 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1938 AARCH64_PORT_ONLY(assert(pauth_ptr_is_raw(caller_pc), "should be raw"));
1939
1940 // It's possible that deoptimization can occur at a call site which hasn't
1941 // been resolved yet, in which case this function will be called from
1942 // an nmethod that has been patched for deopt and we can ignore the
1943 // request for a fixup.
1944 // Also it is possible that we lost a race in that from_compiled_entry
1945 // is now back to the i2c in that case we don't need to patch and if
1946 // we did we'd leap into space because the callsite needs to use
1947 // "to interpreter" stub in order to load up the Method*. Don't
1948 // ask me how I know this...
1949
1950 // Result from nmethod::is_unloading is not stable across safepoints.
1951 NoSafepointVerifier nsv;
1952
1953 nmethod* callee = method->code();
1954 if (callee == nullptr) {
1955 return;
1956 }
1957
1958 // write lock needed because we might patch call site by set_to_clean()
1959 // and is_unloading() can modify nmethod's state
1960 MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, JavaThread::current()));
1961
1962 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1963 if (cb == nullptr || !cb->is_nmethod() || !callee->is_in_use() || callee->is_unloading()) {
1964 return;
1965 }
1966
1967 // The check above makes sure this is an nmethod.
1968 nmethod* caller = cb->as_nmethod();
1969
1970 // Get the return PC for the passed caller PC.
1971 address return_pc = caller_pc + frame::pc_return_offset;
1972
1973 if (!caller->is_in_use() || !NativeCall::is_call_before(return_pc)) {
1974 return;
1975 }
1976
1977 // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1978 CompiledICLocker ic_locker(caller);
1979 ResourceMark rm;
1980
1981 // If we got here through a static call or opt_virtual call, then we know where the
1982 // call address would be; let's peek at it
1983 address callsite_addr = (address)nativeCall_before(return_pc);
1984 RelocIterator iter(caller, callsite_addr, callsite_addr + 1);
1985 if (!iter.next()) {
1986 // No reloc entry found; not a static or optimized virtual call
1987 return;
1988 }
1989
1990 relocInfo::relocType type = iter.reloc()->type();
1991 if (type != relocInfo::static_call_type &&
1992 type != relocInfo::opt_virtual_call_type) {
1993 return;
1994 }
1995
1996 CompiledDirectCall* callsite = CompiledDirectCall::before(return_pc);
1997 callsite->set_to_clean();
1998 JRT_END
1999
2000
2001 // same as JVM_Arraycopy, but called directly from compiled code
2002 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
2003 oopDesc* dest, jint dest_pos,
2004 jint length,
2005 JavaThread* current)) {
2006 #ifndef PRODUCT
2007 _slow_array_copy_ctr++;
2008 #endif
2009 // Check if we have null pointers
2010 if (src == nullptr || dest == nullptr) {
2011 THROW(vmSymbols::java_lang_NullPointerException());
2012 }
2013 // Do the copy. The casts to arrayOop are necessary to the copy_array API,
2014 // even though the copy_array API also performs dynamic checks to ensure
2015 // that src and dest are truly arrays (and are conformable).
2016 // The copy_array mechanism is awkward and could be removed, but
2017 // the compilers don't call this function except as a last resort,
2018 // so it probably doesn't matter.
2019 src->klass()->copy_array((arrayOopDesc*)src, src_pos,
2020 (arrayOopDesc*)dest, dest_pos,
2021 length, current);
2022 }
2023 JRT_END
2024
2025 // The caller of generate_class_cast_message() (or one of its callers)
2026 // must use a ResourceMark in order to correctly free the result.
2027 char* SharedRuntime::generate_class_cast_message(
2028 JavaThread* thread, Klass* caster_klass) {
2029
2030 // Get target class name from the checkcast instruction
2031 vframeStream vfst(thread, true);
2032 assert(!vfst.at_end(), "Java frame must exist");
2033 Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
2034 constantPoolHandle cpool(thread, vfst.method()->constants());
2035 Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index());
2036 Symbol* target_klass_name = nullptr;
2037 if (target_klass == nullptr) {
2038 // This klass should be resolved, but just in case, get the name in the klass slot.
2039 target_klass_name = cpool->klass_name_at(cc.index());
2040 }
2041 return generate_class_cast_message(caster_klass, target_klass, target_klass_name);
2042 }
2043
2044
2045 // The caller of generate_class_cast_message() (or one of its callers)
2046 // must use a ResourceMark in order to correctly free the result.
2047 char* SharedRuntime::generate_class_cast_message(
2048 Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) {
2049 const char* caster_name = caster_klass->external_name();
2050
2051 assert(target_klass != nullptr || target_klass_name != nullptr, "one must be provided");
2052 const char* target_name = target_klass == nullptr ? target_klass_name->as_klass_external_name() :
2053 target_klass->external_name();
2054
2055 size_t msglen = strlen(caster_name) + strlen("class ") + strlen(" cannot be cast to class ") + strlen(target_name) + 1;
2056
2057 const char* caster_klass_description = "";
2058 const char* target_klass_description = "";
2059 const char* klass_separator = "";
2060 if (target_klass != nullptr && caster_klass->module() == target_klass->module()) {
2061 caster_klass_description = caster_klass->joint_in_module_of_loader(target_klass);
2062 } else {
2063 caster_klass_description = caster_klass->class_in_module_of_loader();
2064 target_klass_description = (target_klass != nullptr) ? target_klass->class_in_module_of_loader() : "";
2065 klass_separator = (target_klass != nullptr) ? "; " : "";
2066 }
2067
2068 // add 3 for parenthesis and preceding space
2069 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
2070
2071 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2072 if (message == nullptr) {
2073 // Shouldn't happen, but don't cause even more problems if it does
2074 message = const_cast<char*>(caster_klass->external_name());
2075 } else {
2076 jio_snprintf(message,
2077 msglen,
2078 "class %s cannot be cast to class %s (%s%s%s)",
2079 caster_name,
2080 target_name,
2081 caster_klass_description,
2082 klass_separator,
2083 target_klass_description
2084 );
2085 }
2086 return message;
2087 }
2088
2089 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
2090 assert(klass->is_inline_klass(), "Must be a concrete value class");
2091 const char* desc = "Cannot synchronize on an instance of value class ";
2092 const char* className = klass->external_name();
2093 size_t msglen = strlen(desc) + strlen(className) + 1;
2094 char* message = NEW_RESOURCE_ARRAY(char, msglen);
2095 if (nullptr == message) {
2096 // Out of memory: can't create detailed error message
2097 message = const_cast<char*>(klass->external_name());
2098 } else {
2099 jio_snprintf(message, msglen, "%s%s", desc, className);
2100 }
2101 return message;
2102 }
2103
2104 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2105 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2106 JRT_END
2107
2108 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2109 if (!SafepointSynchronize::is_synchronizing()) {
2110 // Only try quick_enter() if we're not trying to reach a safepoint
2111 // so that the calling thread reaches the safepoint more quickly.
2112 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2113 return;
2114 }
2115 }
2116 // NO_ASYNC required because an async exception on the state transition destructor
2117 // would leave you with the lock held and it would never be released.
2118 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2119 // and the model is that an exception implies the method failed.
2120 JRT_BLOCK_NO_ASYNC
2121 Handle h_obj(THREAD, obj);
2122 ObjectSynchronizer::enter(h_obj, lock, current);
2123 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2124 JRT_BLOCK_END
2125 }
2126
2127 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
2128 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2129 SharedRuntime::monitor_enter_helper(obj, lock, current);
2130 JRT_END
2131
2132 void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2133 assert(JavaThread::current() == current, "invariant");
2134 // Exit must be non-blocking, and therefore no exceptions can be thrown.
2135 ExceptionMark em(current);
2136
2137 // Check if C2_MacroAssembler::fast_unlock() or
2138 // C2_MacroAssembler::fast_unlock_lightweight() unlocked an inflated
2139 // monitor before going slow path. Since there is no safepoint
2140 // polling when calling into the VM, we can be sure that the monitor
2141 // hasn't been deallocated.
2142 ObjectMonitor* m = current->unlocked_inflated_monitor();
2143 if (m != nullptr) {
2144 assert(!m->has_owner(current), "must be");
2145 current->clear_unlocked_inflated_monitor();
2146
2147 // We need to reacquire the lock before we can call ObjectSynchronizer::exit().
2148 if (!m->try_enter(current, /*check_for_recursion*/ false)) {
2149 // Some other thread acquired the lock (or the monitor was
2150 // deflated). Either way we are done.
2151 return;
2152 }
2153 }
2154
2155 // The object could become unlocked through a JNI call, which we have no other checks for.
2156 // Give a fatal message if CheckJNICalls. Otherwise we ignore it.
2157 if (obj->is_unlocked()) {
2158 if (CheckJNICalls) {
2159 fatal("Object has been unlocked by JNI");
2160 }
2161 return;
2162 }
2163 ObjectSynchronizer::exit(obj, lock, current);
2164 }
2165
2166 // Handles the uncommon cases of monitor unlocking in compiled code
2167 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2168 assert(current == JavaThread::current(), "pre-condition");
2169 SharedRuntime::monitor_exit_helper(obj, lock, current);
2170 JRT_END
2171
2172 #ifndef PRODUCT
2173
2174 void SharedRuntime::print_statistics() {
2175 ttyLocker ttyl;
2176 if (xtty != nullptr) xtty->head("statistics type='SharedRuntime'");
2177
2178 SharedRuntime::print_ic_miss_histogram();
2179
2180 // Dump the JRT_ENTRY counters
2181 if (_new_instance_ctr) tty->print_cr("%5u new instance requires GC", _new_instance_ctr);
2182 if (_new_array_ctr) tty->print_cr("%5u new array requires GC", _new_array_ctr);
2183 if (_multi2_ctr) tty->print_cr("%5u multianewarray 2 dim", _multi2_ctr);
2184 if (_multi3_ctr) tty->print_cr("%5u multianewarray 3 dim", _multi3_ctr);
2185 if (_multi4_ctr) tty->print_cr("%5u multianewarray 4 dim", _multi4_ctr);
2186 if (_multi5_ctr) tty->print_cr("%5u multianewarray 5 dim", _multi5_ctr);
2187
2188 tty->print_cr("%5u inline cache miss in compiled", _ic_miss_ctr);
2189 tty->print_cr("%5u wrong method", _wrong_method_ctr);
2190 tty->print_cr("%5u unresolved static call site", _resolve_static_ctr);
2191 tty->print_cr("%5u unresolved virtual call site", _resolve_virtual_ctr);
2192 tty->print_cr("%5u unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2193
2194 if (_mon_enter_stub_ctr) tty->print_cr("%5u monitor enter stub", _mon_enter_stub_ctr);
2195 if (_mon_exit_stub_ctr) tty->print_cr("%5u monitor exit stub", _mon_exit_stub_ctr);
2196 if (_mon_enter_ctr) tty->print_cr("%5u monitor enter slow", _mon_enter_ctr);
2197 if (_mon_exit_ctr) tty->print_cr("%5u monitor exit slow", _mon_exit_ctr);
2198 if (_partial_subtype_ctr) tty->print_cr("%5u slow partial subtype", _partial_subtype_ctr);
2199 if (_jbyte_array_copy_ctr) tty->print_cr("%5u byte array copies", _jbyte_array_copy_ctr);
2200 if (_jshort_array_copy_ctr) tty->print_cr("%5u short array copies", _jshort_array_copy_ctr);
2201 if (_jint_array_copy_ctr) tty->print_cr("%5u int array copies", _jint_array_copy_ctr);
2202 if (_jlong_array_copy_ctr) tty->print_cr("%5u long array copies", _jlong_array_copy_ctr);
2203 if (_oop_array_copy_ctr) tty->print_cr("%5u oop array copies", _oop_array_copy_ctr);
2204 if (_checkcast_array_copy_ctr) tty->print_cr("%5u checkcast array copies", _checkcast_array_copy_ctr);
2205 if (_unsafe_array_copy_ctr) tty->print_cr("%5u unsafe array copies", _unsafe_array_copy_ctr);
2206 if (_generic_array_copy_ctr) tty->print_cr("%5u generic array copies", _generic_array_copy_ctr);
2207 if (_slow_array_copy_ctr) tty->print_cr("%5u slow array copies", _slow_array_copy_ctr);
2208 if (_find_handler_ctr) tty->print_cr("%5u find exception handler", _find_handler_ctr);
2209 if (_rethrow_ctr) tty->print_cr("%5u rethrow handler", _rethrow_ctr);
2210 if (_unsafe_set_memory_ctr) tty->print_cr("%5u unsafe set memorys", _unsafe_set_memory_ctr);
2211
2212 AdapterHandlerLibrary::print_statistics();
2213
2214 if (xtty != nullptr) xtty->tail("statistics");
2215 }
2216
2217 inline double percent(int64_t x, int64_t y) {
2218 return 100.0 * (double)x / (double)MAX2(y, (int64_t)1);
2219 }
2220
2221 class MethodArityHistogram {
2222 public:
2223 enum { MAX_ARITY = 256 };
2224 private:
2225 static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args
2226 static uint64_t _size_histogram[MAX_ARITY]; // histogram of arg size in words
2227 static uint64_t _total_compiled_calls;
2228 static uint64_t _max_compiled_calls_per_method;
2229 static int _max_arity; // max. arity seen
2230 static int _max_size; // max. arg size seen
2231
2232 static void add_method_to_histogram(nmethod* nm) {
2233 Method* method = (nm == nullptr) ? nullptr : nm->method();
2234 if (method != nullptr) {
2235 ArgumentCount args(method->signature());
2236 int arity = args.size() + (method->is_static() ? 0 : 1);
2237 int argsize = method->size_of_parameters();
2238 arity = MIN2(arity, MAX_ARITY-1);
2239 argsize = MIN2(argsize, MAX_ARITY-1);
2240 uint64_t count = (uint64_t)method->compiled_invocation_count();
2241 _max_compiled_calls_per_method = count > _max_compiled_calls_per_method ? count : _max_compiled_calls_per_method;
2242 _total_compiled_calls += count;
2243 _arity_histogram[arity] += count;
2244 _size_histogram[argsize] += count;
2245 _max_arity = MAX2(_max_arity, arity);
2246 _max_size = MAX2(_max_size, argsize);
2247 }
2248 }
2249
2250 void print_histogram_helper(int n, uint64_t* histo, const char* name) {
2251 const int N = MIN2(9, n);
2252 double sum = 0;
2253 double weighted_sum = 0;
2254 for (int i = 0; i <= n; i++) { sum += (double)histo[i]; weighted_sum += (double)(i*histo[i]); }
2255 if (sum >= 1) { // prevent divide by zero or divide overflow
2256 double rest = sum;
2257 double percent = sum / 100;
2258 for (int i = 0; i <= N; i++) {
2259 rest -= (double)histo[i];
2260 tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], (double)histo[i] / percent);
2261 }
2262 tty->print_cr("rest: " INT64_FORMAT_W(12) " (%5.1f%%)", (int64_t)rest, rest / percent);
2263 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2264 tty->print_cr("(total # of compiled calls = " INT64_FORMAT_W(14) ")", _total_compiled_calls);
2265 tty->print_cr("(max # of compiled calls = " INT64_FORMAT_W(14) ")", _max_compiled_calls_per_method);
2266 } else {
2267 tty->print_cr("Histogram generation failed for %s. n = %d, sum = %7.5f", name, n, sum);
2268 }
2269 }
2270
2271 void print_histogram() {
2272 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2273 print_histogram_helper(_max_arity, _arity_histogram, "arity");
2274 tty->print_cr("\nHistogram of parameter block size (in words, incl. rcvr):");
2275 print_histogram_helper(_max_size, _size_histogram, "size");
2276 tty->cr();
2277 }
2278
2279 public:
2280 MethodArityHistogram() {
2281 // Take the Compile_lock to protect against changes in the CodeBlob structures
2282 MutexLocker mu1(Compile_lock, Mutex::_safepoint_check_flag);
2283 // Take the CodeCache_lock to protect against changes in the CodeHeap structure
2284 MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2285 _max_arity = _max_size = 0;
2286 _total_compiled_calls = 0;
2287 _max_compiled_calls_per_method = 0;
2288 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2289 CodeCache::nmethods_do(add_method_to_histogram);
2290 print_histogram();
2291 }
2292 };
2293
2294 uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2295 uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2296 uint64_t MethodArityHistogram::_total_compiled_calls;
2297 uint64_t MethodArityHistogram::_max_compiled_calls_per_method;
2298 int MethodArityHistogram::_max_arity;
2299 int MethodArityHistogram::_max_size;
2300
2301 void SharedRuntime::print_call_statistics(uint64_t comp_total) {
2302 tty->print_cr("Calls from compiled code:");
2303 int64_t total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2304 int64_t mono_c = _nof_normal_calls - _nof_megamorphic_calls;
2305 int64_t mono_i = _nof_interface_calls;
2306 tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%) total non-inlined ", total);
2307 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
2308 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2309 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
2310 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2311 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
2312 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2313 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
2314 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2315 tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2316 tty->cr();
2317 tty->print_cr("Note 1: counter updates are not MT-safe.");
2318 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2319 tty->print_cr(" %% in nested categories are relative to their category");
2320 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2321 tty->cr();
2322
2323 MethodArityHistogram h;
2324 }
2325 #endif
2326
2327 #ifndef PRODUCT
2328 static int _lookups; // number of calls to lookup
2329 static int _equals; // number of buckets checked with matching hash
2330 static int _archived_hits; // number of successful lookups in archived table
2331 static int _runtime_hits; // number of successful lookups in runtime table
2332 #endif
2333
2334 // A simple wrapper class around the calling convention information
2335 // that allows sharing of adapters for the same calling convention.
2336 class AdapterFingerPrint : public MetaspaceObj {
2337 public:
2338 class Element {
2339 private:
2340 // The highest byte is the type of the argument. The remaining bytes contain the offset of the
2341 // field if it is flattened in the calling convention, -1 otherwise.
2342 juint _payload;
2343
2344 static constexpr int offset_bit_width = 24;
2345 static constexpr juint offset_bit_mask = (1 << offset_bit_width) - 1;
2346 public:
2347 Element(BasicType bt, int offset) : _payload((static_cast<juint>(bt) << offset_bit_width) | (juint(offset) & offset_bit_mask)) {
2348 assert(offset >= -1 && offset < jint(offset_bit_mask), "invalid offset %d", offset);
2349 }
2350
2351 BasicType bt() const {
2352 return static_cast<BasicType>(_payload >> offset_bit_width);
2353 }
2354
2355 int offset() const {
2356 juint res = _payload & offset_bit_mask;
2357 return res == offset_bit_mask ? -1 : res;
2358 }
2359
2360 juint hash() const {
2361 return _payload;
2362 }
2363
2364 bool operator!=(const Element& other) const {
2365 return _payload != other._payload;
2366 }
2367 };
2368
2369 private:
2370 const bool _has_ro_adapter;
2371 const int _length;
2372
2373 static int data_offset() { return sizeof(AdapterFingerPrint); }
2374 Element* data_pointer() {
2375 return reinterpret_cast<Element*>(reinterpret_cast<address>(this) + data_offset());
2376 }
2377
2378 const Element& element_at(int index) {
2379 assert(index < length(), "index %d out of bounds for length %d", index, length());
2380 Element* data = data_pointer();
2381 return data[index];
2382 }
2383
2384 // Private construtor. Use allocate() to get an instance.
2385 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter)
2386 : _has_ro_adapter(has_ro_adapter), _length(total_args_passed_in_sig(sig)) {
2387 Element* data = data_pointer();
2388 BasicType prev_bt = T_ILLEGAL;
2389 int vt_count = 0;
2390 for (int index = 0; index < _length; index++) {
2391 const SigEntry& sig_entry = sig->at(index);
2392 BasicType bt = sig_entry._bt;
2393 if (bt == T_METADATA) {
2394 // Found start of inline type in signature
2395 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2396 vt_count++;
2397 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2398 // Found end of inline type in signature
2399 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2400 vt_count--;
2401 assert(vt_count >= 0, "invalid vt_count");
2402 } else if (vt_count == 0) {
2403 // Widen fields that are not part of a scalarized inline type argument
2404 assert(sig_entry._offset == -1, "invalid offset for argument that is not a flattened field %d", sig_entry._offset);
2405 bt = adapter_encoding(bt);
2406 }
2407
2408 ::new(&data[index]) Element(bt, sig_entry._offset);
2409 prev_bt = bt;
2410 }
2411 assert(vt_count == 0, "invalid vt_count");
2412 }
2413
2414 // Call deallocate instead
2415 ~AdapterFingerPrint() {
2416 ShouldNotCallThis();
2417 }
2418
2419 static int total_args_passed_in_sig(const GrowableArray<SigEntry>* sig) {
2420 return (sig != nullptr) ? sig->length() : 0;
2421 }
2422
2423 static int compute_size_in_words(int len) {
2424 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(Element)));
2425 }
2426
2427 // Remap BasicTypes that are handled equivalently by the adapters.
2428 // These are correct for the current system but someday it might be
2429 // necessary to make this mapping platform dependent.
2430 static BasicType adapter_encoding(BasicType in) {
2431 switch (in) {
2432 case T_BOOLEAN:
2433 case T_BYTE:
2434 case T_SHORT:
2435 case T_CHAR:
2436 // They are all promoted to T_INT in the calling convention
2437 return T_INT;
2438
2439 case T_OBJECT:
2440 case T_ARRAY:
2441 // In other words, we assume that any register good enough for
2442 // an int or long is good enough for a managed pointer.
2443 #ifdef _LP64
2444 return T_LONG;
2445 #else
2446 return T_INT;
2447 #endif
2448
2449 case T_INT:
2450 case T_LONG:
2451 case T_FLOAT:
2452 case T_DOUBLE:
2453 case T_VOID:
2454 return in;
2455
2456 default:
2457 ShouldNotReachHere();
2458 return T_CONFLICT;
2459 }
2460 }
2461
2462 void* operator new(size_t size, size_t fp_size) throw() {
2463 assert(fp_size >= size, "sanity check");
2464 void* p = AllocateHeap(fp_size, mtCode);
2465 memset(p, 0, fp_size);
2466 return p;
2467 }
2468
2469 public:
2470 template<typename Function>
2471 void iterate_args(Function function) {
2472 for (int i = 0; i < length(); i++) {
2473 function(element_at(i));
2474 }
2475 }
2476
2477 static AdapterFingerPrint* allocate(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2478 int len = total_args_passed_in_sig(sig);
2479 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2480 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(sig, has_ro_adapter);
2481 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2482 return afp;
2483 }
2484
2485 static void deallocate(AdapterFingerPrint* fp) {
2486 FreeHeap(fp);
2487 }
2488
2489 bool has_ro_adapter() const {
2490 return _has_ro_adapter;
2491 }
2492
2493 int length() const {
2494 return _length;
2495 }
2496
2497 unsigned int compute_hash() {
2498 int hash = 0;
2499 for (int i = 0; i < length(); i++) {
2500 const Element& v = element_at(i);
2501 //Add arithmetic operation to the hash, like +3 to improve hashing
2502 hash = ((hash << 8) ^ v.hash() ^ (hash >> 5)) + 3;
2503 }
2504 return (unsigned int)hash;
2505 }
2506
2507 const char* as_string() {
2508 stringStream st;
2509 st.print("{");
2510 if (_has_ro_adapter) {
2511 st.print("has_ro_adapter");
2512 } else {
2513 st.print("no_ro_adapter");
2514 }
2515 for (int i = 0; i < length(); i++) {
2516 st.print(", ");
2517 const Element& elem = element_at(i);
2518 st.print("{%s, %d}", type2name(elem.bt()), elem.offset());
2519 }
2520 st.print("}");
2521 return st.as_string();
2522 }
2523
2524 const char* as_basic_args_string() {
2525 stringStream st;
2526 bool long_prev = false;
2527 iterate_args([&] (const Element& arg) {
2528 if (long_prev) {
2529 long_prev = false;
2530 if (arg.bt() == T_VOID) {
2531 st.print("J");
2532 } else {
2533 st.print("L");
2534 }
2535 }
2536 if (arg.bt() == T_LONG) {
2537 long_prev = true;
2538 } else if (arg.bt() != T_VOID) {
2539 st.print("%c", type2char(arg.bt()));
2540 }
2541 });
2542 if (long_prev) {
2543 st.print("L");
2544 }
2545 return st.as_string();
2546 }
2547
2548 bool equals(AdapterFingerPrint* other) {
2549 if (other->_has_ro_adapter != _has_ro_adapter) {
2550 return false;
2551 } else if (other->_length != _length) {
2552 return false;
2553 } else {
2554 for (int i = 0; i < _length; i++) {
2555 if (element_at(i) != other->element_at(i)) {
2556 return false;
2557 }
2558 }
2559 }
2560 return true;
2561 }
2562
2563 // methods required by virtue of being a MetaspaceObj
2564 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2565 int size() const { return compute_size_in_words(_length); }
2566 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2567
2568 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2569 NOT_PRODUCT(_equals++);
2570 return fp1->equals(fp2);
2571 }
2572
2573 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2574 return fp->compute_hash();
2575 }
2576 };
2577
2578 #if INCLUDE_CDS
2579 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2580 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2581 }
2582
2583 class ArchivedAdapterTable : public OffsetCompactHashtable<
2584 AdapterFingerPrint*,
2585 AdapterHandlerEntry*,
2586 adapter_fp_equals_compact_hashtable_entry> {};
2587 #endif // INCLUDE_CDS
2588
2589 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2590 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2591 AnyObj::C_HEAP, mtCode,
2592 AdapterFingerPrint::compute_hash,
2593 AdapterFingerPrint::equals>;
2594 static AdapterHandlerTable* _adapter_handler_table;
2595 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2596
2597 // Find a entry with the same fingerprint if it exists
2598 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter) {
2599 NOT_PRODUCT(_lookups++);
2600 assert_lock_strong(AdapterHandlerLibrary_lock);
2601 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(sig, has_ro_adapter);
2602 AdapterHandlerEntry* entry = nullptr;
2603 #if INCLUDE_CDS
2604 // if we are building the archive then the archived adapter table is
2605 // not valid and we need to use the ones added to the runtime table
2606 if (AOTCodeCache::is_using_adapter()) {
2607 // Search archived table first. It is read-only table so can be searched without lock
2608 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2609 #ifndef PRODUCT
2610 if (entry != nullptr) {
2611 _archived_hits++;
2612 }
2613 #endif
2614 }
2615 #endif // INCLUDE_CDS
2616 if (entry == nullptr) {
2617 assert_lock_strong(AdapterHandlerLibrary_lock);
2618 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2619 if (entry_p != nullptr) {
2620 entry = *entry_p;
2621 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2622 entry->fingerprint()->as_basic_args_string(), entry->fingerprint()->as_string(), entry->fingerprint()->compute_hash(),
2623 fp->as_basic_args_string(), fp->as_string(), fp->compute_hash());
2624 #ifndef PRODUCT
2625 _runtime_hits++;
2626 #endif
2627 }
2628 }
2629 AdapterFingerPrint::deallocate(fp);
2630 return entry;
2631 }
2632
2633 #ifndef PRODUCT
2634 static void print_table_statistics() {
2635 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2636 return sizeof(*key) + sizeof(*a);
2637 };
2638 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2639 ts.print(tty, "AdapterHandlerTable");
2640 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2641 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2642 int total_hits = _archived_hits + _runtime_hits;
2643 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2644 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2645 }
2646 #endif
2647
2648 // ---------------------------------------------------------------------------
2649 // Implementation of AdapterHandlerLibrary
2650 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2651 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2652 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2653 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2654 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2655 #if INCLUDE_CDS
2656 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2657 #endif // INCLUDE_CDS
2658 static const int AdapterHandlerLibrary_size = 48*K;
2659 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2660 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2661
2662 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2663 assert(_buffer != nullptr, "should be initialized");
2664 return _buffer;
2665 }
2666
2667 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2668 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2669 AdapterBlob* adapter_blob = entry->adapter_blob();
2670 char blob_id[256];
2671 jio_snprintf(blob_id,
2672 sizeof(blob_id),
2673 "%s(%s)",
2674 adapter_blob->name(),
2675 entry->fingerprint()->as_string());
2676 if (Forte::is_enabled()) {
2677 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2678 }
2679
2680 if (JvmtiExport::should_post_dynamic_code_generated()) {
2681 JvmtiExport::post_dynamic_code_generated(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2682 }
2683 }
2684 }
2685
2686 void AdapterHandlerLibrary::initialize() {
2687 {
2688 ResourceMark rm;
2689 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2690 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2691 }
2692
2693 #if INCLUDE_CDS
2694 // Link adapters in AOT Cache to their code in AOT Code Cache
2695 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2696 link_aot_adapters();
2697 lookup_simple_adapters();
2698 return;
2699 }
2700 #endif // INCLUDE_CDS
2701
2702 ResourceMark rm;
2703 {
2704 MutexLocker mu(AdapterHandlerLibrary_lock);
2705
2706 CompiledEntrySignature no_args;
2707 no_args.compute_calling_conventions();
2708 _no_arg_handler = create_adapter(no_args, true);
2709
2710 CompiledEntrySignature obj_args;
2711 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
2712 obj_args.compute_calling_conventions();
2713 _obj_arg_handler = create_adapter(obj_args, true);
2714
2715 CompiledEntrySignature int_args;
2716 SigEntry::add_entry(int_args.sig(), T_INT);
2717 int_args.compute_calling_conventions();
2718 _int_arg_handler = create_adapter(int_args, true);
2719
2720 CompiledEntrySignature obj_int_args;
2721 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
2722 SigEntry::add_entry(obj_int_args.sig(), T_INT);
2723 obj_int_args.compute_calling_conventions();
2724 _obj_int_arg_handler = create_adapter(obj_int_args, true);
2725
2726 CompiledEntrySignature obj_obj_args;
2727 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2728 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2729 obj_obj_args.compute_calling_conventions();
2730 _obj_obj_arg_handler = create_adapter(obj_obj_args, true);
2731
2732 // we should always get an entry back but we don't have any
2733 // associated blob on Zero
2734 assert(_no_arg_handler != nullptr &&
2735 _obj_arg_handler != nullptr &&
2736 _int_arg_handler != nullptr &&
2737 _obj_int_arg_handler != nullptr &&
2738 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2739 }
2740
2741 // Outside of the lock
2742 #ifndef ZERO
2743 // no blobs to register when we are on Zero
2744 post_adapter_creation(_no_arg_handler);
2745 post_adapter_creation(_obj_arg_handler);
2746 post_adapter_creation(_int_arg_handler);
2747 post_adapter_creation(_obj_int_arg_handler);
2748 post_adapter_creation(_obj_obj_arg_handler);
2749 #endif // ZERO
2750 }
2751
2752 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2753 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2754 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2755 return AdapterHandlerEntry::allocate(id, fingerprint);
2756 }
2757
2758 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2759 int total_args_passed = method->size_of_parameters(); // All args on stack
2760 if (total_args_passed == 0) {
2761 return _no_arg_handler;
2762 } else if (total_args_passed == 1) {
2763 if (!method->is_static()) {
2764 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2765 return nullptr;
2766 }
2767 return _obj_arg_handler;
2768 }
2769 switch (method->signature()->char_at(1)) {
2770 case JVM_SIGNATURE_CLASS: {
2771 if (InlineTypePassFieldsAsArgs) {
2772 SignatureStream ss(method->signature());
2773 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2774 if (vk != nullptr) {
2775 return nullptr;
2776 }
2777 }
2778 return _obj_arg_handler;
2779 }
2780 case JVM_SIGNATURE_ARRAY:
2781 return _obj_arg_handler;
2782 case JVM_SIGNATURE_INT:
2783 case JVM_SIGNATURE_BOOLEAN:
2784 case JVM_SIGNATURE_CHAR:
2785 case JVM_SIGNATURE_BYTE:
2786 case JVM_SIGNATURE_SHORT:
2787 return _int_arg_handler;
2788 }
2789 } else if (total_args_passed == 2 &&
2790 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2791 switch (method->signature()->char_at(1)) {
2792 case JVM_SIGNATURE_CLASS: {
2793 if (InlineTypePassFieldsAsArgs) {
2794 SignatureStream ss(method->signature());
2795 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2796 if (vk != nullptr) {
2797 return nullptr;
2798 }
2799 }
2800 return _obj_obj_arg_handler;
2801 }
2802 case JVM_SIGNATURE_ARRAY:
2803 return _obj_obj_arg_handler;
2804 case JVM_SIGNATURE_INT:
2805 case JVM_SIGNATURE_BOOLEAN:
2806 case JVM_SIGNATURE_CHAR:
2807 case JVM_SIGNATURE_BYTE:
2808 case JVM_SIGNATURE_SHORT:
2809 return _obj_int_arg_handler;
2810 }
2811 }
2812 return nullptr;
2813 }
2814
2815 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2816 _method(method), _num_inline_args(0), _has_inline_recv(false),
2817 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2818 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2819 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2820 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2821 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2822 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2823 }
2824
2825 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2826 // or the same entry for VEP and VIEP(RO).
2827 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2828 if (!has_scalarized_args()) {
2829 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2830 return CodeOffsets::Verified_Entry;
2831 }
2832 if (_method->is_static()) {
2833 // Static methods don't need VIEP(RO)
2834 return CodeOffsets::Verified_Entry;
2835 }
2836
2837 if (has_inline_recv()) {
2838 if (num_inline_args() == 1) {
2839 // Share same entry for VIEP and VIEP(RO).
2840 // This is quite common: we have an instance method in an InlineKlass that has
2841 // no inline type args other than <this>.
2842 return CodeOffsets::Verified_Inline_Entry;
2843 } else {
2844 assert(num_inline_args() > 1, "must be");
2845 // No sharing:
2846 // VIEP(RO) -- <this> is passed as object
2847 // VEP -- <this> is passed as fields
2848 return CodeOffsets::Verified_Inline_Entry_RO;
2849 }
2850 }
2851
2852 // Either a static method, or <this> is not an inline type
2853 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2854 // No sharing:
2855 // Some arguments are passed on the stack, and we have inserted reserved entries
2856 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2857 return CodeOffsets::Verified_Inline_Entry_RO;
2858 } else {
2859 // Share same entry for VEP and VIEP(RO).
2860 return CodeOffsets::Verified_Entry;
2861 }
2862 }
2863
2864 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2865 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2866 if (_supers != nullptr) {
2867 return _supers;
2868 }
2869 _supers = new GrowableArray<Method*>();
2870 // Skip private, static, and <init> methods
2871 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2872 return _supers;
2873 }
2874 Symbol* name = _method->name();
2875 Symbol* signature = _method->signature();
2876 const Klass* holder = _method->method_holder()->super();
2877 Symbol* holder_name = holder->name();
2878 ThreadInVMfromUnknown tiv;
2879 JavaThread* current = JavaThread::current();
2880 HandleMark hm(current);
2881 Handle loader(current, _method->method_holder()->class_loader());
2882
2883 // Walk up the class hierarchy and search for super methods
2884 while (holder != nullptr) {
2885 Method* super_method = holder->lookup_method(name, signature);
2886 if (super_method == nullptr) {
2887 break;
2888 }
2889 if (!super_method->is_static() && !super_method->is_private() &&
2890 (!super_method->is_package_private() ||
2891 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2892 _supers->push(super_method);
2893 }
2894 holder = super_method->method_holder()->super();
2895 }
2896 // Search interfaces for super methods
2897 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2898 for (int i = 0; i < interfaces->length(); ++i) {
2899 Method* m = interfaces->at(i)->lookup_method(name, signature);
2900 if (m != nullptr && !m->is_static() && m->is_public()) {
2901 _supers->push(m);
2902 }
2903 }
2904 return _supers;
2905 }
2906
2907 // Iterate over arguments and compute scalarized and non-scalarized signatures
2908 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2909 bool has_scalarized = false;
2910 if (_method != nullptr) {
2911 InstanceKlass* holder = _method->method_holder();
2912 int arg_num = 0;
2913 if (!_method->is_static()) {
2914 // We shouldn't scalarize 'this' in a value class constructor
2915 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() && !_method->is_object_constructor() &&
2916 (init || _method->is_scalarized_arg(arg_num))) {
2917 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2918 has_scalarized = true;
2919 _has_inline_recv = true;
2920 _num_inline_args++;
2921 } else {
2922 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2923 }
2924 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2925 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2926 arg_num++;
2927 }
2928 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2929 BasicType bt = ss.type();
2930 if (bt == T_OBJECT) {
2931 InlineKlass* vk = ss.as_inline_klass(holder);
2932 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2933 // Check for a calling convention mismatch with super method(s)
2934 bool scalar_super = false;
2935 bool non_scalar_super = false;
2936 GrowableArray<Method*>* supers = get_supers();
2937 for (int i = 0; i < supers->length(); ++i) {
2938 Method* super_method = supers->at(i);
2939 if (super_method->is_scalarized_arg(arg_num)) {
2940 scalar_super = true;
2941 } else {
2942 non_scalar_super = true;
2943 }
2944 }
2945 #ifdef ASSERT
2946 // Randomly enable below code paths for stress testing
2947 bool stress = init && StressCallingConvention;
2948 if (stress && (os::random() & 1) == 1) {
2949 non_scalar_super = true;
2950 if ((os::random() & 1) == 1) {
2951 scalar_super = true;
2952 }
2953 }
2954 #endif
2955 if (non_scalar_super) {
2956 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2957 if (scalar_super) {
2958 // Found non-scalar *and* scalar super methods. We can't handle both.
2959 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2960 for (int i = 0; i < supers->length(); ++i) {
2961 Method* super_method = supers->at(i);
2962 if (super_method->is_scalarized_arg(arg_num) DEBUG_ONLY(|| (stress && (os::random() & 1) == 1))) {
2963 super_method->set_mismatch();
2964 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2965 JavaThread* thread = JavaThread::current();
2966 HandleMark hm(thread);
2967 methodHandle mh(thread, super_method);
2968 DeoptimizationScope deopt_scope;
2969 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2970 deopt_scope.deoptimize_marked();
2971 }
2972 }
2973 }
2974 // Fall back to non-scalarized calling convention
2975 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2976 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2977 } else {
2978 _num_inline_args++;
2979 has_scalarized = true;
2980 int last = _sig_cc->length();
2981 int last_ro = _sig_cc_ro->length();
2982 _sig_cc->appendAll(vk->extended_sig());
2983 _sig_cc_ro->appendAll(vk->extended_sig());
2984 if (bt == T_OBJECT) {
2985 // Nullable inline type argument, insert InlineTypeNode::NullMarker field right after T_METADATA delimiter
2986 _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, nullptr, true));
2987 _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, nullptr, true));
2988 }
2989 }
2990 } else {
2991 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2992 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2993 }
2994 bt = T_OBJECT;
2995 } else {
2996 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2997 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2998 }
2999 SigEntry::add_entry(_sig, bt, ss.as_symbol());
3000 if (bt != T_VOID) {
3001 arg_num++;
3002 }
3003 }
3004 }
3005
3006 // Compute the non-scalarized calling convention
3007 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3008 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3009
3010 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3011 if (has_scalarized && !_method->is_native()) {
3012 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3013 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3014
3015 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3016 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3017
3018 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3019 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3020
3021 // Upper bound on stack arguments to avoid hitting the argument limit and
3022 // bailing out of compilation ("unsupported incoming calling sequence").
3023 // TODO we need a reasonable limit (flag?) here
3024 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
3025 return; // Success
3026 }
3027 }
3028
3029 // No scalarized args
3030 _sig_cc = _sig;
3031 _regs_cc = _regs;
3032 _args_on_stack_cc = _args_on_stack;
3033
3034 _sig_cc_ro = _sig;
3035 _regs_cc_ro = _regs;
3036 _args_on_stack_cc_ro = _args_on_stack;
3037 }
3038
3039 void CompiledEntrySignature::initialize_from_fingerprint(AdapterFingerPrint* fingerprint) {
3040 _has_inline_recv = fingerprint->has_ro_adapter();
3041
3042 int value_object_count = 0;
3043 BasicType prev_bt = T_ILLEGAL;
3044 bool has_scalarized_arguments = false;
3045 bool long_prev = false;
3046 int long_prev_offset = -1;
3047
3048 fingerprint->iterate_args([&] (const AdapterFingerPrint::Element& arg) {
3049 BasicType bt = arg.bt();
3050 int offset = arg.offset();
3051
3052 if (long_prev) {
3053 long_prev = false;
3054 BasicType bt_to_add;
3055 if (bt == T_VOID) {
3056 bt_to_add = T_LONG;
3057 } else {
3058 bt_to_add = T_OBJECT;
3059 }
3060 if (value_object_count == 0) {
3061 SigEntry::add_entry(_sig, bt_to_add);
3062 }
3063 SigEntry::add_entry(_sig_cc, bt_to_add, nullptr, long_prev_offset);
3064 SigEntry::add_entry(_sig_cc_ro, bt_to_add, nullptr, long_prev_offset);
3065 }
3066
3067 switch (bt) {
3068 case T_VOID:
3069 if (prev_bt != T_LONG && prev_bt != T_DOUBLE) {
3070 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
3071 value_object_count--;
3072 SigEntry::add_entry(_sig_cc, T_VOID, nullptr, offset);
3073 SigEntry::add_entry(_sig_cc_ro, T_VOID, nullptr, offset);
3074 assert(value_object_count >= 0, "invalid value object count");
3075 } else {
3076 // Nothing to add for _sig: We already added an addition T_VOID in add_entry() when adding T_LONG or T_DOUBLE.
3077 }
3078 break;
3079 case T_INT:
3080 case T_FLOAT:
3081 case T_DOUBLE:
3082 if (value_object_count == 0) {
3083 SigEntry::add_entry(_sig, bt);
3084 }
3085 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3086 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3087 break;
3088 case T_LONG:
3089 long_prev = true;
3090 long_prev_offset = offset;
3091 break;
3092 case T_BOOLEAN:
3093 case T_CHAR:
3094 case T_BYTE:
3095 case T_SHORT:
3096 case T_OBJECT:
3097 case T_ARRAY:
3098 assert(value_object_count > 0, "must be value object field");
3099 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3100 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3101 break;
3102 case T_METADATA:
3103 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
3104 if (value_object_count == 0) {
3105 SigEntry::add_entry(_sig, T_OBJECT);
3106 }
3107 SigEntry::add_entry(_sig_cc, T_METADATA, nullptr, offset);
3108 SigEntry::add_entry(_sig_cc_ro, T_METADATA, nullptr, offset);
3109 value_object_count++;
3110 has_scalarized_arguments = true;
3111 break;
3112 default: {
3113 fatal("Unexpected BasicType: %s", basictype_to_str(bt));
3114 }
3115 }
3116 prev_bt = bt;
3117 });
3118
3119 if (long_prev) {
3120 // If previous bt was T_LONG and we reached the end of the signature, we know that it must be a T_OBJECT.
3121 SigEntry::add_entry(_sig, T_OBJECT);
3122 SigEntry::add_entry(_sig_cc, T_OBJECT);
3123 SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3124 }
3125 assert(value_object_count == 0, "invalid value object count");
3126
3127 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3128 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3129
3130 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3131 if (has_scalarized_arguments) {
3132 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3133 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3134
3135 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3136 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3137
3138 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3139 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3140 } else {
3141 // No scalarized args
3142 _sig_cc = _sig;
3143 _regs_cc = _regs;
3144 _args_on_stack_cc = _args_on_stack;
3145
3146 _sig_cc_ro = _sig;
3147 _regs_cc_ro = _regs;
3148 _args_on_stack_cc_ro = _args_on_stack;
3149 }
3150
3151 #ifdef ASSERT
3152 {
3153 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(_sig_cc, _has_inline_recv);
3154 assert(fingerprint->equals(compare_fp), "%s - %s", fingerprint->as_string(), compare_fp->as_string());
3155 AdapterFingerPrint::deallocate(compare_fp);
3156 }
3157 #endif
3158 }
3159
3160 const char* AdapterHandlerEntry::_entry_names[] = {
3161 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
3162 };
3163
3164 #ifdef ASSERT
3165 void AdapterHandlerLibrary::verify_adapter_sharing(CompiledEntrySignature& ces, AdapterHandlerEntry* cached_entry) {
3166 // we can only check for the same code if there is any
3167 #ifndef ZERO
3168 AdapterHandlerEntry* comparison_entry = create_adapter(ces, false, true);
3169 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
3170 assert(comparison_entry->compare_code(cached_entry), "code must match");
3171 // Release the one just created
3172 AdapterHandlerEntry::deallocate(comparison_entry);
3173 # endif // ZERO
3174 }
3175 #endif /* ASSERT*/
3176
3177 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3178 assert(!method->is_abstract(), "abstract methods do not have adapters");
3179 // Use customized signature handler. Need to lock around updates to
3180 // the _adapter_handler_table (it is not safe for concurrent readers
3181 // and a single writer: this could be fixed if it becomes a
3182 // problem).
3183
3184 // Fast-path for trivial adapters
3185 AdapterHandlerEntry* entry = get_simple_adapter(method);
3186 if (entry != nullptr) {
3187 return entry;
3188 }
3189
3190 ResourceMark rm;
3191 bool new_entry = false;
3192
3193 CompiledEntrySignature ces(method());
3194 ces.compute_calling_conventions();
3195 if (ces.has_scalarized_args()) {
3196 if (!method->has_scalarized_args()) {
3197 method->set_has_scalarized_args();
3198 }
3199 if (ces.c1_needs_stack_repair()) {
3200 method->set_c1_needs_stack_repair();
3201 }
3202 if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
3203 method->set_c2_needs_stack_repair();
3204 }
3205 }
3206
3207 {
3208 MutexLocker mu(AdapterHandlerLibrary_lock);
3209
3210 // Lookup method signature's fingerprint
3211 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3212
3213 if (entry != nullptr) {
3214 #ifndef ZERO
3215 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
3216 #endif
3217 #ifdef ASSERT
3218 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
3219 verify_adapter_sharing(ces, entry);
3220 }
3221 #endif
3222 } else {
3223 entry = create_adapter(ces, /* allocate_code_blob */ true);
3224 if (entry != nullptr) {
3225 new_entry = true;
3226 }
3227 }
3228 }
3229
3230 // Outside of the lock
3231 if (new_entry) {
3232 post_adapter_creation(entry);
3233 }
3234 return entry;
3235 }
3236
3237 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
3238 ResourceMark rm;
3239 const char* name = AdapterHandlerLibrary::name(handler);
3240 const uint32_t id = AdapterHandlerLibrary::id(handler);
3241
3242 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
3243 if (blob != nullptr) {
3244 handler->set_adapter_blob(blob->as_adapter_blob());
3245 }
3246 }
3247
3248 #ifndef PRODUCT
3249 void AdapterHandlerLibrary::print_adapter_handler_info(outputStream* st, AdapterHandlerEntry* handler) {
3250 ttyLocker ttyl;
3251 ResourceMark rm;
3252 int insts_size;
3253 // on Zero the blob may be null
3254 handler->print_adapter_on(tty);
3255 AdapterBlob* adapter_blob = handler->adapter_blob();
3256 if (adapter_blob == nullptr) {
3257 return;
3258 }
3259 insts_size = adapter_blob->code_size();
3260 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
3261 handler->fingerprint()->as_basic_args_string(),
3262 handler->fingerprint()->as_string(), insts_size);
3263 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
3264 if (Verbose || PrintStubCode) {
3265 address first_pc = adapter_blob->content_begin();
3266 if (first_pc != nullptr) {
3267 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
3268 st->cr();
3269 }
3270 }
3271 }
3272 #endif // PRODUCT
3273
3274 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
3275 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
3276 entry_offset[AdapterBlob::I2C] = 0;
3277 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
3278 entry_offset[AdapterBlob::C2I_Inline] = entry_address[AdapterBlob::C2I_Inline] - entry_address[AdapterBlob::I2C];
3279 entry_offset[AdapterBlob::C2I_Inline_RO] = entry_address[AdapterBlob::C2I_Inline_RO] - entry_address[AdapterBlob::I2C];
3280 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
3281 entry_offset[AdapterBlob::C2I_Unverified_Inline] = entry_address[AdapterBlob::C2I_Unverified_Inline] - entry_address[AdapterBlob::I2C];
3282 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
3283 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
3284 } else {
3285 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
3286 }
3287 }
3288
3289 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
3290 CompiledEntrySignature& ces,
3291 bool allocate_code_blob,
3292 bool is_transient) {
3293 if (log_is_enabled(Info, perf, class, link)) {
3294 ClassLoader::perf_method_adapters_count()->inc();
3295 }
3296
3297 #ifndef ZERO
3298 AdapterBlob* adapter_blob = nullptr;
3299 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3300 CodeBuffer buffer(buf);
3301 short buffer_locs[20];
3302 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3303 sizeof(buffer_locs)/sizeof(relocInfo));
3304 MacroAssembler masm(&buffer);
3305 address entry_address[AdapterBlob::ENTRY_COUNT];
3306
3307 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
3308 SharedRuntime::generate_i2c2i_adapters(&masm,
3309 ces.args_on_stack(),
3310 ces.sig(),
3311 ces.regs(),
3312 ces.sig_cc(),
3313 ces.regs_cc(),
3314 ces.sig_cc_ro(),
3315 ces.regs_cc_ro(),
3316 entry_address,
3317 adapter_blob,
3318 allocate_code_blob);
3319
3320 if (ces.has_scalarized_args()) {
3321 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3322 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3323 heap_sig->appendAll(ces.sig_cc());
3324 handler->set_sig_cc(heap_sig);
3325 }
3326 // On zero there is no code to save and no need to create a blob and
3327 // or relocate the handler.
3328 int entry_offset[AdapterBlob::ENTRY_COUNT];
3329 address_to_offset(entry_address, entry_offset);
3330 #ifdef ASSERT
3331 if (VerifyAdapterSharing) {
3332 handler->save_code(buf->code_begin(), buffer.insts_size());
3333 if (is_transient) {
3334 return true;
3335 }
3336 }
3337 #endif
3338 if (adapter_blob == nullptr) {
3339 // CodeCache is full, disable compilation
3340 // Ought to log this but compile log is only per compile thread
3341 // and we're some non descript Java thread.
3342 return false;
3343 }
3344 handler->set_adapter_blob(adapter_blob);
3345 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
3346 // try to save generated code
3347 const char* name = AdapterHandlerLibrary::name(handler);
3348 const uint32_t id = AdapterHandlerLibrary::id(handler);
3349 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
3350 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
3351 }
3352 #endif // ZERO
3353
3354 #ifndef PRODUCT
3355 // debugging support
3356 if (PrintAdapterHandlers || PrintStubCode) {
3357 print_adapter_handler_info(tty, handler);
3358 }
3359 #endif
3360
3361 return true;
3362 }
3363
3364 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(CompiledEntrySignature& ces,
3365 bool allocate_code_blob,
3366 bool is_transient) {
3367 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(ces.sig_cc(), ces.has_inline_recv());
3368 #ifdef ASSERT
3369 // Verify that we can successfully restore the compiled entry signature object.
3370 CompiledEntrySignature ces_verify;
3371 ces_verify.initialize_from_fingerprint(fp);
3372 #endif
3373 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
3374 if (!generate_adapter_code(handler, ces, allocate_code_blob, is_transient)) {
3375 AdapterHandlerEntry::deallocate(handler);
3376 return nullptr;
3377 }
3378 if (!is_transient) {
3379 assert_lock_strong(AdapterHandlerLibrary_lock);
3380 _adapter_handler_table->put(fp, handler);
3381 }
3382 return handler;
3383 }
3384
3385 #if INCLUDE_CDS
3386 void AdapterHandlerEntry::remove_unshareable_info() {
3387 #ifdef ASSERT
3388 _saved_code = nullptr;
3389 _saved_code_length = 0;
3390 #endif // ASSERT
3391 _adapter_blob = nullptr;
3392 _linked = false;
3393 }
3394
3395 class CopyAdapterTableToArchive : StackObj {
3396 private:
3397 CompactHashtableWriter* _writer;
3398 ArchiveBuilder* _builder;
3399 public:
3400 CopyAdapterTableToArchive(CompactHashtableWriter* writer) : _writer(writer),
3401 _builder(ArchiveBuilder::current())
3402 {}
3403
3404 bool do_entry(AdapterFingerPrint* fp, AdapterHandlerEntry* entry) {
3405 LogStreamHandle(Trace, aot) lsh;
3406 if (ArchiveBuilder::current()->has_been_archived((address)entry)) {
3407 assert(ArchiveBuilder::current()->has_been_archived((address)fp), "must be");
3408 AdapterFingerPrint* buffered_fp = ArchiveBuilder::current()->get_buffered_addr(fp);
3409 assert(buffered_fp != nullptr,"sanity check");
3410 AdapterHandlerEntry* buffered_entry = ArchiveBuilder::current()->get_buffered_addr(entry);
3411 assert(buffered_entry != nullptr,"sanity check");
3412
3413 uint hash = fp->compute_hash();
3414 u4 delta = _builder->buffer_to_offset_u4((address)buffered_entry);
3415 _writer->add(hash, delta);
3416 if (lsh.is_enabled()) {
3417 address fp_runtime_addr = (address)buffered_fp + ArchiveBuilder::current()->buffer_to_requested_delta();
3418 address entry_runtime_addr = (address)buffered_entry + ArchiveBuilder::current()->buffer_to_requested_delta();
3419 log_trace(aot)("Added fp=%p (%s), entry=%p to the archived adater table", buffered_fp, buffered_fp->as_basic_args_string(), buffered_entry);
3420 }
3421 } else {
3422 if (lsh.is_enabled()) {
3423 log_trace(aot)("Skipping adapter handler %p (fp=%s) as it is not archived", entry, fp->as_basic_args_string());
3424 }
3425 }
3426 return true;
3427 }
3428 };
3429
3430 void AdapterHandlerLibrary::dump_aot_adapter_table() {
3431 CompactHashtableStats stats;
3432 CompactHashtableWriter writer(_adapter_handler_table->number_of_entries(), &stats);
3433 CopyAdapterTableToArchive copy(&writer);
3434 _adapter_handler_table->iterate(©);
3435 writer.dump(&_aot_adapter_handler_table, "archived adapter table");
3436 }
3437
3438 void AdapterHandlerLibrary::serialize_shared_table_header(SerializeClosure* soc) {
3439 _aot_adapter_handler_table.serialize_header(soc);
3440 }
3441
3442 void AdapterHandlerLibrary::link_aot_adapter_handler(AdapterHandlerEntry* handler) {
3443 #ifdef ASSERT
3444 if (TestAOTAdapterLinkFailure) {
3445 return;
3446 }
3447 #endif
3448 lookup_aot_cache(handler);
3449 #ifndef PRODUCT
3450 // debugging support
3451 if (PrintAdapterHandlers || PrintStubCode) {
3452 print_adapter_handler_info(tty, handler);
3453 }
3454 #endif
3455 }
3456
3457 // This method is used during production run to link archived adapters (stored in AOT Cache)
3458 // to their code in AOT Code Cache
3459 void AdapterHandlerEntry::link() {
3460 ResourceMark rm;
3461 assert(_fingerprint != nullptr, "_fingerprint must not be null");
3462 bool generate_code = false;
3463 // Generate code only if AOTCodeCache is not available, or
3464 // caching adapters is disabled, or we fail to link
3465 // the AdapterHandlerEntry to its code in the AOTCodeCache
3466 if (AOTCodeCache::is_using_adapter()) {
3467 AdapterHandlerLibrary::link_aot_adapter_handler(this);
3468 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3469 if (_adapter_blob == nullptr) {
3470 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3471 generate_code = true;
3472 }
3473 } else {
3474 generate_code = true;
3475 }
3476 if (generate_code) {
3477 CompiledEntrySignature ces;
3478 ces.initialize_from_fingerprint(_fingerprint);
3479 if (!AdapterHandlerLibrary::generate_adapter_code(this, ces, true, false)) {
3480 // Don't throw exceptions during VM initialization because java.lang.* classes
3481 // might not have been initialized, causing problems when constructing the
3482 // Java exception object.
3483 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3484 }
3485 }
3486 if (_adapter_blob != nullptr) {
3487 post_adapter_creation(this);
3488 }
3489 assert(_linked, "AdapterHandlerEntry must now be linked");
3490 }
3491
3492 void AdapterHandlerLibrary::link_aot_adapters() {
3493 uint max_id = 0;
3494 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3495 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3496 * That implies adapter ids of the adapters in the cache may not be contiguous.
3497 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3498 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3499 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3500 */
3501 _aot_adapter_handler_table.iterate([&](AdapterHandlerEntry* entry) {
3502 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3503 entry->link();
3504 max_id = MAX2(max_id, entry->id());
3505 });
3506 // Set adapter id to the maximum id found in the AOTCache
3507 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3508 _id_counter = max_id;
3509 }
3510
3511 // This method is called during production run to lookup simple adapters
3512 // in the archived adapter handler table
3513 void AdapterHandlerLibrary::lookup_simple_adapters() {
3514 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3515
3516 MutexLocker mu(AdapterHandlerLibrary_lock);
3517 ResourceMark rm;
3518 CompiledEntrySignature no_args;
3519 no_args.compute_calling_conventions();
3520 _no_arg_handler = lookup(no_args.sig_cc(), no_args.has_inline_recv());
3521
3522 CompiledEntrySignature obj_args;
3523 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
3524 obj_args.compute_calling_conventions();
3525 _obj_arg_handler = lookup(obj_args.sig_cc(), obj_args.has_inline_recv());
3526
3527 CompiledEntrySignature int_args;
3528 SigEntry::add_entry(int_args.sig(), T_INT);
3529 int_args.compute_calling_conventions();
3530 _int_arg_handler = lookup(int_args.sig_cc(), int_args.has_inline_recv());
3531
3532 CompiledEntrySignature obj_int_args;
3533 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
3534 SigEntry::add_entry(obj_int_args.sig(), T_INT);
3535 obj_int_args.compute_calling_conventions();
3536 _obj_int_arg_handler = lookup(obj_int_args.sig_cc(), obj_int_args.has_inline_recv());
3537
3538 CompiledEntrySignature obj_obj_args;
3539 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3540 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3541 obj_obj_args.compute_calling_conventions();
3542 _obj_obj_arg_handler = lookup(obj_obj_args.sig_cc(), obj_obj_args.has_inline_recv());
3543
3544 assert(_no_arg_handler != nullptr &&
3545 _obj_arg_handler != nullptr &&
3546 _int_arg_handler != nullptr &&
3547 _obj_int_arg_handler != nullptr &&
3548 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3549 assert(_no_arg_handler->is_linked() &&
3550 _obj_arg_handler->is_linked() &&
3551 _int_arg_handler->is_linked() &&
3552 _obj_int_arg_handler->is_linked() &&
3553 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3554 }
3555 #endif // INCLUDE_CDS
3556
3557 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3558 LogStreamHandle(Trace, aot) lsh;
3559 if (lsh.is_enabled()) {
3560 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3561 lsh.cr();
3562 }
3563 it->push(&_fingerprint);
3564 }
3565
3566 AdapterHandlerEntry::~AdapterHandlerEntry() {
3567 if (_fingerprint != nullptr) {
3568 AdapterFingerPrint::deallocate(_fingerprint);
3569 _fingerprint = nullptr;
3570 }
3571 if (_sig_cc != nullptr) {
3572 delete _sig_cc;
3573 }
3574 #ifdef ASSERT
3575 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3576 #endif
3577 FreeHeap(this);
3578 }
3579
3580
3581 #ifdef ASSERT
3582 // Capture the code before relocation so that it can be compared
3583 // against other versions. If the code is captured after relocation
3584 // then relative instructions won't be equivalent.
3585 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3586 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3587 _saved_code_length = length;
3588 memcpy(_saved_code, buffer, length);
3589 }
3590
3591
3592 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3593 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3594
3595 if (other->_saved_code_length != _saved_code_length) {
3596 return false;
3597 }
3598
3599 return memcmp(other->_saved_code, _saved_code, _saved_code_length) == 0;
3600 }
3601 #endif
3602
3603
3604 /**
3605 * Create a native wrapper for this native method. The wrapper converts the
3606 * Java-compiled calling convention to the native convention, handles
3607 * arguments, and transitions to native. On return from the native we transition
3608 * back to java blocking if a safepoint is in progress.
3609 */
3610 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
3611 ResourceMark rm;
3612 nmethod* nm = nullptr;
3613
3614 // Check if memory should be freed before allocation
3615 CodeCache::gc_on_allocation();
3616
3617 assert(method->is_native(), "must be native");
3618 assert(method->is_special_native_intrinsic() ||
3619 method->has_native_function(), "must have something valid to call!");
3620
3621 {
3622 // Perform the work while holding the lock, but perform any printing outside the lock
3623 MutexLocker mu(AdapterHandlerLibrary_lock);
3624 // See if somebody beat us to it
3625 if (method->code() != nullptr) {
3626 return;
3627 }
3628
3629 const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
3630 assert(compile_id > 0, "Must generate native wrapper");
3631
3632
3633 ResourceMark rm;
3634 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3635 if (buf != nullptr) {
3636 CodeBuffer buffer(buf);
3637
3638 if (method->is_continuation_enter_intrinsic()) {
3639 buffer.initialize_stubs_size(192);
3640 }
3641
3642 struct { double data[20]; } locs_buf;
3643 struct { double data[20]; } stubs_locs_buf;
3644 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3645 #if defined(AARCH64) || defined(PPC64)
3646 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3647 // in the constant pool to ensure ordering between the barrier and oops
3648 // accesses. For native_wrappers we need a constant.
3649 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3650 // static java call that is resolved in the runtime.
3651 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3652 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3653 }
3654 #endif
3655 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3656 MacroAssembler _masm(&buffer);
3657
3658 // Fill in the signature array, for the calling-convention call.
3659 const int total_args_passed = method->size_of_parameters();
3660
3661 BasicType stack_sig_bt[16];
3662 VMRegPair stack_regs[16];
3663 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3664 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3665
3666 int i = 0;
3667 if (!method->is_static()) { // Pass in receiver first
3668 sig_bt[i++] = T_OBJECT;
3669 }
3670 SignatureStream ss(method->signature());
3671 for (; !ss.at_return_type(); ss.next()) {
3672 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3673 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3674 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3675 }
3676 }
3677 assert(i == total_args_passed, "");
3678 BasicType ret_type = ss.type();
3679
3680 // Now get the compiled-Java arguments layout.
3681 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3682
3683 // Generate the compiled-to-native wrapper code
3684 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3685
3686 if (nm != nullptr) {
3687 {
3688 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3689 if (nm->make_in_use()) {
3690 method->set_code(method, nm);
3691 }
3692 }
3693
3694 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3695 if (directive->PrintAssemblyOption) {
3696 nm->print_code();
3697 }
3698 DirectivesStack::release(directive);
3699 }
3700 }
3701 } // Unlock AdapterHandlerLibrary_lock
3702
3703
3704 // Install the generated code.
3705 if (nm != nullptr) {
3706 const char *msg = method->is_static() ? "(static)" : "";
3707 CompileTask::print_ul(nm, msg);
3708 if (PrintCompilation) {
3709 ttyLocker ttyl;
3710 CompileTask::print(tty, nm, msg);
3711 }
3712 nm->post_compiled_method_load_event();
3713 }
3714 }
3715
3716 // -------------------------------------------------------------------------
3717 // Java-Java calling convention
3718 // (what you use when Java calls Java)
3719
3720 //------------------------------name_for_receiver----------------------------------
3721 // For a given signature, return the VMReg for parameter 0.
3722 VMReg SharedRuntime::name_for_receiver() {
3723 VMRegPair regs;
3724 BasicType sig_bt = T_OBJECT;
3725 (void) java_calling_convention(&sig_bt, ®s, 1);
3726 // Return argument 0 register. In the LP64 build pointers
3727 // take 2 registers, but the VM wants only the 'main' name.
3728 return regs.first();
3729 }
3730
3731 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
3732 // This method is returning a data structure allocating as a
3733 // ResourceObject, so do not put any ResourceMarks in here.
3734
3735 BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
3736 VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
3737 int cnt = 0;
3738 if (has_receiver) {
3739 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
3740 }
3741
3742 for (SignatureStream ss(sig); !ss.at_return_type(); ss.next()) {
3743 BasicType type = ss.type();
3744 sig_bt[cnt++] = type;
3745 if (is_double_word_type(type))
3746 sig_bt[cnt++] = T_VOID;
3747 }
3748
3749 if (has_appendix) {
3750 sig_bt[cnt++] = T_OBJECT;
3751 }
3752
3753 assert(cnt < 256, "grow table size");
3754
3755 int comp_args_on_stack;
3756 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt);
3757
3758 // the calling convention doesn't count out_preserve_stack_slots so
3759 // we must add that in to get "true" stack offsets.
3760
3761 if (comp_args_on_stack) {
3762 for (int i = 0; i < cnt; i++) {
3763 VMReg reg1 = regs[i].first();
3764 if (reg1->is_stack()) {
3765 // Yuck
3766 reg1 = reg1->bias(out_preserve_stack_slots());
3767 }
3768 VMReg reg2 = regs[i].second();
3769 if (reg2->is_stack()) {
3770 // Yuck
3771 reg2 = reg2->bias(out_preserve_stack_slots());
3772 }
3773 regs[i].set_pair(reg2, reg1);
3774 }
3775 }
3776
3777 // results
3778 *arg_size = cnt;
3779 return regs;
3780 }
3781
3782 // OSR Migration Code
3783 //
3784 // This code is used convert interpreter frames into compiled frames. It is
3785 // called from very start of a compiled OSR nmethod. A temp array is
3786 // allocated to hold the interesting bits of the interpreter frame. All
3787 // active locks are inflated to allow them to move. The displaced headers and
3788 // active interpreter locals are copied into the temp buffer. Then we return
3789 // back to the compiled code. The compiled code then pops the current
3790 // interpreter frame off the stack and pushes a new compiled frame. Then it
3791 // copies the interpreter locals and displaced headers where it wants.
3792 // Finally it calls back to free the temp buffer.
3793 //
3794 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3795
3796 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
3797 assert(current == JavaThread::current(), "pre-condition");
3798 JFR_ONLY(Jfr::check_and_process_sample_request(current);)
3799 // During OSR migration, we unwind the interpreted frame and replace it with a compiled
3800 // frame. The stack watermark code below ensures that the interpreted frame is processed
3801 // before it gets unwound. This is helpful as the size of the compiled frame could be
3802 // larger than the interpreted frame, which could result in the new frame not being
3803 // processed correctly.
3804 StackWatermarkSet::before_unwind(current);
3805
3806 //
3807 // This code is dependent on the memory layout of the interpreter local
3808 // array and the monitors. On all of our platforms the layout is identical
3809 // so this code is shared. If some platform lays the their arrays out
3810 // differently then this code could move to platform specific code or
3811 // the code here could be modified to copy items one at a time using
3812 // frame accessor methods and be platform independent.
3813
3814 frame fr = current->last_frame();
3815 assert(fr.is_interpreted_frame(), "");
3816 assert(fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks");
3817
3818 // Figure out how many monitors are active.
3819 int active_monitor_count = 0;
3820 for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
3821 kptr < fr.interpreter_frame_monitor_begin();
3822 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
3823 if (kptr->obj() != nullptr) active_monitor_count++;
3824 }
3825
3826 // QQQ we could place number of active monitors in the array so that compiled code
3827 // could double check it.
3828
3829 Method* moop = fr.interpreter_frame_method();
3830 int max_locals = moop->max_locals();
3831 // Allocate temp buffer, 1 word per local & 2 per active monitor
3832 int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size();
3833 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
3834
3835 // Copy the locals. Order is preserved so that loading of longs works.
3836 // Since there's no GC I can copy the oops blindly.
3837 assert(sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
3838 Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
3839 (HeapWord*)&buf[0],
3840 max_locals);
3841
3842 // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
3843 int i = max_locals;
3844 for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3845 kptr2 < fr.interpreter_frame_monitor_begin();
3846 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3847 if (kptr2->obj() != nullptr) { // Avoid 'holes' in the monitor array
3848 BasicLock *lock = kptr2->lock();
3849 if (UseObjectMonitorTable) {
3850 buf[i] = (intptr_t)lock->object_monitor_cache();
3851 }
3852 #ifdef ASSERT
3853 else {
3854 buf[i] = badDispHeaderOSR;
3855 }
3856 #endif
3857 i++;
3858 buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3859 }
3860 }
3861 assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3862
3863 RegisterMap map(current,
3864 RegisterMap::UpdateMap::skip,
3865 RegisterMap::ProcessFrames::include,
3866 RegisterMap::WalkContinuation::skip);
3867 frame sender = fr.sender(&map);
3868 if (sender.is_interpreted_frame()) {
3869 current->push_cont_fastpath(sender.sp());
3870 }
3871
3872 return buf;
3873 JRT_END
3874
3875 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3876 FREE_C_HEAP_ARRAY(intptr_t, buf);
3877 JRT_END
3878
3879 bool AdapterHandlerLibrary::contains(const CodeBlob* b) {
3880 bool found = false;
3881 #if INCLUDE_CDS
3882 if (AOTCodeCache::is_using_adapter()) {
3883 auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3884 return (found = (b == CodeCache::find_blob(handler->get_i2c_entry())));
3885 };
3886 _aot_adapter_handler_table.iterate(findblob_archived_table);
3887 }
3888 #endif // INCLUDE_CDS
3889 if (!found) {
3890 auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
3891 return (found = (b == CodeCache::find_blob(a->get_i2c_entry())));
3892 };
3893 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3894 _adapter_handler_table->iterate(findblob_runtime_table);
3895 }
3896 return found;
3897 }
3898
3899 const char* AdapterHandlerLibrary::name(AdapterHandlerEntry* handler) {
3900 return handler->fingerprint()->as_basic_args_string();
3901 }
3902
3903 uint32_t AdapterHandlerLibrary::id(AdapterHandlerEntry* handler) {
3904 return handler->id();
3905 }
3906
3907 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3908 bool found = false;
3909 #if INCLUDE_CDS
3910 if (AOTCodeCache::is_using_adapter()) {
3911 auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3912 if (b == CodeCache::find_blob(handler->get_i2c_entry())) {
3913 found = true;
3914 st->print("Adapter for signature: ");
3915 handler->print_adapter_on(st);
3916 return true;
3917 } else {
3918 return false; // keep looking
3919 }
3920 };
3921 _aot_adapter_handler_table.iterate(findblob_archived_table);
3922 }
3923 #endif // INCLUDE_CDS
3924 if (!found) {
3925 auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
3926 if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3927 found = true;
3928 st->print("Adapter for signature: ");
3929 a->print_adapter_on(st);
3930 return true;
3931 } else {
3932 return false; // keep looking
3933 }
3934 };
3935 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3936 _adapter_handler_table->iterate(findblob_runtime_table);
3937 }
3938 assert(found, "Should have found handler");
3939 }
3940
3941 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3942 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3943 if (adapter_blob() != nullptr) {
3944 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3945 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3946 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3947 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3948 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3949 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3950 if (get_c2i_no_clinit_check_entry() != nullptr) {
3951 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3952 }
3953 }
3954 st->cr();
3955 }
3956
3957 #ifndef PRODUCT
3958
3959 void AdapterHandlerLibrary::print_statistics() {
3960 print_table_statistics();
3961 }
3962
3963 #endif /* PRODUCT */
3964
3965 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3966 assert(current == JavaThread::current(), "pre-condition");
3967 StackOverflow* overflow_state = current->stack_overflow_state();
3968 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3969 overflow_state->set_reserved_stack_activation(current->stack_base());
3970 JRT_END
3971
3972 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
3973 ResourceMark rm(current);
3974 frame activation;
3975 nmethod* nm = nullptr;
3976 int count = 1;
3977
3978 assert(fr.is_java_frame(), "Must start on Java frame");
3979
3980 RegisterMap map(JavaThread::current(),
3981 RegisterMap::UpdateMap::skip,
3982 RegisterMap::ProcessFrames::skip,
3983 RegisterMap::WalkContinuation::skip); // don't walk continuations
3984 for (; !fr.is_first_frame(); fr = fr.sender(&map)) {
3985 if (!fr.is_java_frame()) {
3986 continue;
3987 }
3988
3989 Method* method = nullptr;
3990 bool found = false;
3991 if (fr.is_interpreted_frame()) {
3992 method = fr.interpreter_frame_method();
3993 if (method != nullptr && method->has_reserved_stack_access()) {
3994 found = true;
3995 }
3996 } else {
3997 CodeBlob* cb = fr.cb();
3998 if (cb != nullptr && cb->is_nmethod()) {
3999 nm = cb->as_nmethod();
4000 method = nm->method();
4001 for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != nullptr; sd = sd->sender()) {
4002 method = sd->method();
4003 if (method != nullptr && method->has_reserved_stack_access()) {
4004 found = true;
4005 }
4006 }
4007 }
4008 }
4009 if (found) {
4010 activation = fr;
4011 warning("Potentially dangerous stack overflow in "
4012 "ReservedStackAccess annotated method %s [%d]",
4013 method->name_and_sig_as_C_string(), count++);
4014 EventReservedStackActivation event;
4015 if (event.should_commit()) {
4016 event.set_method(method);
4017 event.commit();
4018 }
4019 }
4020 }
4021 return activation;
4022 }
4023
4024 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
4025 // After any safepoint, just before going back to compiled code,
4026 // we inform the GC that we will be doing initializing writes to
4027 // this object in the future without emitting card-marks, so
4028 // GC may take any compensating steps.
4029
4030 oop new_obj = current->vm_result_oop();
4031 if (new_obj == nullptr) return;
4032
4033 BarrierSet *bs = BarrierSet::barrier_set();
4034 bs->on_slowpath_allocation_exit(current, new_obj);
4035 }
4036
4037 // We are at a compiled code to interpreter call. We need backing
4038 // buffers for all inline type arguments. Allocate an object array to
4039 // hold them (convenient because once we're done with it we don't have
4040 // to worry about freeing it).
4041 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
4042 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
4043 ResourceMark rm;
4044
4045 int nb_slots = 0;
4046 InstanceKlass* holder = callee->method_holder();
4047 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
4048 if (allocate_receiver) {
4049 nb_slots++;
4050 }
4051 int arg_num = callee->is_static() ? 0 : 1;
4052 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4053 BasicType bt = ss.type();
4054 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4055 nb_slots++;
4056 }
4057 if (bt != T_VOID) {
4058 arg_num++;
4059 }
4060 }
4061 objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4062 objArrayHandle array(THREAD, array_oop);
4063 arg_num = callee->is_static() ? 0 : 1;
4064 int i = 0;
4065 if (allocate_receiver) {
4066 InlineKlass* vk = InlineKlass::cast(holder);
4067 oop res = vk->allocate_instance(CHECK_NULL);
4068 array->obj_at_put(i++, res);
4069 }
4070 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4071 BasicType bt = ss.type();
4072 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4073 InlineKlass* vk = ss.as_inline_klass(holder);
4074 assert(vk != nullptr, "Unexpected klass");
4075 oop res = vk->allocate_instance(CHECK_NULL);
4076 array->obj_at_put(i++, res);
4077 }
4078 if (bt != T_VOID) {
4079 arg_num++;
4080 }
4081 }
4082 return array();
4083 }
4084
4085 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
4086 methodHandle callee(current, callee_method);
4087 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
4088 current->set_vm_result_oop(array);
4089 current->set_vm_result_metadata(callee()); // TODO: required to keep callee live?
4090 JRT_END
4091
4092 // We're returning from an interpreted method: load each field into a
4093 // register following the calling convention
4094 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
4095 {
4096 assert(res->klass()->is_inline_klass(), "only inline types here");
4097 ResourceMark rm;
4098 RegisterMap reg_map(current,
4099 RegisterMap::UpdateMap::include,
4100 RegisterMap::ProcessFrames::include,
4101 RegisterMap::WalkContinuation::skip);
4102 frame stubFrame = current->last_frame();
4103 frame callerFrame = stubFrame.sender(®_map);
4104 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
4105
4106 InlineKlass* vk = InlineKlass::cast(res->klass());
4107
4108 const Array<SigEntry>* sig_vk = vk->extended_sig();
4109 const Array<VMRegPair>* regs = vk->return_regs();
4110
4111 if (regs == nullptr) {
4112 // The fields of the inline klass don't fit in registers, bail out
4113 return;
4114 }
4115
4116 int j = 1;
4117 for (int i = 0; i < sig_vk->length(); i++) {
4118 BasicType bt = sig_vk->at(i)._bt;
4119 if (bt == T_METADATA) {
4120 continue;
4121 }
4122 if (bt == T_VOID) {
4123 if (sig_vk->at(i-1)._bt == T_LONG ||
4124 sig_vk->at(i-1)._bt == T_DOUBLE) {
4125 j++;
4126 }
4127 continue;
4128 }
4129 int off = sig_vk->at(i)._offset;
4130 assert(off > 0, "offset in object should be positive");
4131 VMRegPair pair = regs->at(j);
4132 address loc = reg_map.location(pair.first(), nullptr);
4133 switch(bt) {
4134 case T_BOOLEAN:
4135 *(jboolean*)loc = res->bool_field(off);
4136 break;
4137 case T_CHAR:
4138 *(jchar*)loc = res->char_field(off);
4139 break;
4140 case T_BYTE:
4141 *(jbyte*)loc = res->byte_field(off);
4142 break;
4143 case T_SHORT:
4144 *(jshort*)loc = res->short_field(off);
4145 break;
4146 case T_INT: {
4147 *(jint*)loc = res->int_field(off);
4148 break;
4149 }
4150 case T_LONG:
4151 #ifdef _LP64
4152 *(intptr_t*)loc = res->long_field(off);
4153 #else
4154 Unimplemented();
4155 #endif
4156 break;
4157 case T_OBJECT:
4158 case T_ARRAY: {
4159 *(oop*)loc = res->obj_field(off);
4160 break;
4161 }
4162 case T_FLOAT:
4163 *(jfloat*)loc = res->float_field(off);
4164 break;
4165 case T_DOUBLE:
4166 *(jdouble*)loc = res->double_field(off);
4167 break;
4168 default:
4169 ShouldNotReachHere();
4170 }
4171 j++;
4172 }
4173 assert(j == regs->length(), "missed a field?");
4174
4175 #ifdef ASSERT
4176 VMRegPair pair = regs->at(0);
4177 address loc = reg_map.location(pair.first(), nullptr);
4178 assert(*(oopDesc**)loc == res, "overwritten object");
4179 #endif
4180
4181 current->set_vm_result_oop(res);
4182 }
4183 JRT_END
4184
4185 // We've returned to an interpreted method, the interpreter needs a
4186 // reference to an inline type instance. Allocate it and initialize it
4187 // from field's values in registers.
4188 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
4189 {
4190 ResourceMark rm;
4191 RegisterMap reg_map(current,
4192 RegisterMap::UpdateMap::include,
4193 RegisterMap::ProcessFrames::include,
4194 RegisterMap::WalkContinuation::skip);
4195 frame stubFrame = current->last_frame();
4196 frame callerFrame = stubFrame.sender(®_map);
4197
4198 #ifdef ASSERT
4199 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
4200 #endif
4201
4202 if (!is_set_nth_bit(res, 0)) {
4203 // We're not returning with inline type fields in registers (the
4204 // calling convention didn't allow it for this inline klass)
4205 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
4206 current->set_vm_result_oop((oopDesc*)res);
4207 assert(verif_vk == nullptr, "broken calling convention");
4208 return;
4209 }
4210
4211 clear_nth_bit(res, 0);
4212 InlineKlass* vk = (InlineKlass*)res;
4213 assert(verif_vk == vk, "broken calling convention");
4214 assert(Metaspace::contains((void*)res), "should be klass");
4215
4216 // Allocate handles for every oop field so they are safe in case of
4217 // a safepoint when allocating
4218 GrowableArray<Handle> handles;
4219 vk->save_oop_fields(reg_map, handles);
4220
4221 // It's unsafe to safepoint until we are here
4222 JRT_BLOCK;
4223 {
4224 JavaThread* THREAD = current;
4225 oop vt = vk->realloc_result(reg_map, handles, CHECK);
4226 current->set_vm_result_oop(vt);
4227 }
4228 JRT_BLOCK_END;
4229 }
4230 JRT_END